Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0167-4.14.68-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 191041 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 4dad2d1c24ba..3da579058926 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 67
10     +SUBLEVEL = 68
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     @@ -490,9 +490,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
15     endif
16    
17     RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
18     +RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
19     RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
20     +RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
21     RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
22     +RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
23     export RETPOLINE_CFLAGS
24     +export RETPOLINE_VDSO_CFLAGS
25    
26     ifeq ($(config-targets),1)
27     # ===========================================================================
28     diff --git a/arch/Kconfig b/arch/Kconfig
29     index 4e01862f58e4..40dc31fea90c 100644
30     --- a/arch/Kconfig
31     +++ b/arch/Kconfig
32     @@ -336,6 +336,9 @@ config HAVE_ARCH_JUMP_LABEL
33     config HAVE_RCU_TABLE_FREE
34     bool
35    
36     +config HAVE_RCU_TABLE_INVALIDATE
37     + bool
38     +
39     config ARCH_HAVE_NMI_SAFE_CMPXCHG
40     bool
41    
42     diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
43     index 5c8caf85c350..8ff066090680 100644
44     --- a/arch/arc/Kconfig
45     +++ b/arch/arc/Kconfig
46     @@ -45,6 +45,9 @@ config ARC
47     select HAVE_KERNEL_GZIP
48     select HAVE_KERNEL_LZMA
49    
50     +config ARCH_HAS_CACHE_LINE_SIZE
51     + def_bool y
52     +
53     config MIGHT_HAVE_PCI
54     bool
55    
56     diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
57     index 8486f328cc5d..ff7d3232764a 100644
58     --- a/arch/arc/include/asm/cache.h
59     +++ b/arch/arc/include/asm/cache.h
60     @@ -48,7 +48,9 @@
61     })
62    
63     /* Largest line length for either L1 or L2 is 128 bytes */
64     -#define ARCH_DMA_MINALIGN 128
65     +#define SMP_CACHE_BYTES 128
66     +#define cache_line_size() SMP_CACHE_BYTES
67     +#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
68    
69     extern void arc_cache_init(void);
70     extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
71     diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
72     index d5da2115d78a..03d6bb0f4e13 100644
73     --- a/arch/arc/include/asm/delay.h
74     +++ b/arch/arc/include/asm/delay.h
75     @@ -17,8 +17,11 @@
76     #ifndef __ASM_ARC_UDELAY_H
77     #define __ASM_ARC_UDELAY_H
78    
79     +#include <asm-generic/types.h>
80     #include <asm/param.h> /* HZ */
81    
82     +extern unsigned long loops_per_jiffy;
83     +
84     static inline void __delay(unsigned long loops)
85     {
86     __asm__ __volatile__(
87     diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
88     index eee924dfffa6..d14499500106 100644
89     --- a/arch/arc/mm/cache.c
90     +++ b/arch/arc/mm/cache.c
91     @@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm)
92     void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
93     unsigned long pfn)
94     {
95     - unsigned int paddr = pfn << PAGE_SHIFT;
96     + phys_addr_t paddr = pfn << PAGE_SHIFT;
97    
98     u_vaddr &= PAGE_MASK;
99    
100     @@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
101     unsigned long u_vaddr)
102     {
103     /* TBD: do we really need to clear the kernel mapping */
104     - __flush_dcache_page(page_address(page), u_vaddr);
105     - __flush_dcache_page(page_address(page), page_address(page));
106     + __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
107     + __flush_dcache_page((phys_addr_t)page_address(page),
108     + (phys_addr_t)page_address(page));
109    
110     }
111    
112     diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
113     index 0c7d11022d0f..4f6a1673b3a6 100644
114     --- a/arch/arc/plat-eznps/include/plat/ctop.h
115     +++ b/arch/arc/plat-eznps/include/plat/ctop.h
116     @@ -21,6 +21,7 @@
117     #error "Incorrect ctop.h include"
118     #endif
119    
120     +#include <linux/types.h>
121     #include <soc/nps/common.h>
122    
123     /* core auxiliary registers */
124     @@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
125     };
126    
127     /* AUX registers definition */
128     +struct nps_host_reg_aux_dpc {
129     + union {
130     + struct {
131     + u32 ien:1, men:1, hen:1, reserved:29;
132     + };
133     + u32 value;
134     + };
135     +};
136     +
137     struct nps_host_reg_aux_udmc {
138     union {
139     struct {
140     diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c
141     index 2388de3d09ef..ed0077ef666e 100644
142     --- a/arch/arc/plat-eznps/mtm.c
143     +++ b/arch/arc/plat-eznps/mtm.c
144     @@ -15,6 +15,8 @@
145     */
146    
147     #include <linux/smp.h>
148     +#include <linux/init.h>
149     +#include <linux/kernel.h>
150     #include <linux/io.h>
151     #include <linux/log2.h>
152     #include <asm/arcregs.h>
153     @@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
154     /* Verify and set the value of the mtm hs counter */
155     static int __init set_mtm_hs_ctr(char *ctr_str)
156     {
157     - long hs_ctr;
158     + int hs_ctr;
159     int ret;
160    
161     - ret = kstrtol(ctr_str, 0, &hs_ctr);
162     + ret = kstrtoint(ctr_str, 0, &hs_ctr);
163    
164     if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
165     pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
166     diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
167     index 52d1cd14fda4..091e9a3c2dcb 100644
168     --- a/arch/arm/probes/kprobes/core.c
169     +++ b/arch/arm/probes/kprobes/core.c
170     @@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
171     break;
172     case KPROBE_REENTER:
173     /* A nested probe was hit in FIQ, it is a BUG */
174     - pr_warn("Unrecoverable kprobe detected at %p.\n",
175     - p->addr);
176     + pr_warn("Unrecoverable kprobe detected.\n");
177     + dump_kprobe(p);
178     /* fall through */
179     default:
180     /* impossible cases */
181     diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
182     index 1c98a87786ca..a10d7187ad2c 100644
183     --- a/arch/arm/probes/kprobes/test-core.c
184     +++ b/arch/arm/probes/kprobes/test-core.c
185     @@ -1517,7 +1517,6 @@ fail:
186     print_registers(&result_regs);
187    
188     if (mem) {
189     - pr_err("current_stack=%p\n", current_stack);
190     pr_err("expected_memory:\n");
191     print_memory(expected_memory, mem_size);
192     pr_err("result_memory:\n");
193     diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
194     index d70e409e2b0c..efac2202b16e 100644
195     --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
196     +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
197     @@ -331,7 +331,7 @@
198     reg = <0x0 0xff120000 0x0 0x100>;
199     interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
200     clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
201     - clock-names = "sclk_uart", "pclk_uart";
202     + clock-names = "baudclk", "apb_pclk";
203     dmas = <&dmac 4>, <&dmac 5>;
204     #dma-cells = <2>;
205     pinctrl-names = "default";
206     diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
207     index d849d9804011..22a5921562c7 100644
208     --- a/arch/arm64/kernel/probes/kprobes.c
209     +++ b/arch/arm64/kernel/probes/kprobes.c
210     @@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
211     break;
212     case KPROBE_HIT_SS:
213     case KPROBE_REENTER:
214     - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
215     + pr_warn("Unrecoverable kprobe detected.\n");
216     dump_kprobe(p);
217     BUG();
218     break;
219     diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
220     index 1190d90e01e6..caa295cd5d09 100644
221     --- a/arch/arm64/mm/init.c
222     +++ b/arch/arm64/mm/init.c
223     @@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
224     #ifdef CONFIG_HAVE_ARCH_PFN_VALID
225     int pfn_valid(unsigned long pfn)
226     {
227     - return memblock_is_map_memory(pfn << PAGE_SHIFT);
228     + phys_addr_t addr = pfn << PAGE_SHIFT;
229     +
230     + if ((addr >> PAGE_SHIFT) != pfn)
231     + return 0;
232     + return memblock_is_map_memory(addr);
233     }
234     EXPORT_SYMBOL(pfn_valid);
235     #endif
236     diff --git a/arch/mips/Makefile b/arch/mips/Makefile
237     index a96d97a806c9..5977884b008e 100644
238     --- a/arch/mips/Makefile
239     +++ b/arch/mips/Makefile
240     @@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
241     cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
242     cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
243     cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
244     -cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
245     - -Wa,-mips32 -Wa,--trap
246     -cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
247     - -Wa,-mips32r2 -Wa,--trap
248     +cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
249     +cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
250     cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
251     -cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
252     - -Wa,-mips64 -Wa,--trap
253     -cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
254     - -Wa,-mips64r2 -Wa,--trap
255     +cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
256     +cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
257     cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
258     cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
259     cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
260     diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
261     index 8c9cbf13d32a..6054d49e608e 100644
262     --- a/arch/mips/bcm47xx/setup.c
263     +++ b/arch/mips/bcm47xx/setup.c
264     @@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
265     */
266     if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
267     cpu_wait = NULL;
268     -
269     - /*
270     - * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
271     - * Enable ExternalSync for sync instruction to take effect
272     - */
273     - set_c0_config7(MIPS_CONF7_ES);
274     break;
275     #endif
276     }
277     diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
278     index 60c787d943b0..a6810923b3f0 100644
279     --- a/arch/mips/include/asm/mipsregs.h
280     +++ b/arch/mips/include/asm/mipsregs.h
281     @@ -680,8 +680,6 @@
282     #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
283    
284     #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
285     -/* ExternalSync */
286     -#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
287    
288     #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
289     #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
290     @@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status)
291     __BUILD_SET_C0(cause)
292     __BUILD_SET_C0(config)
293     __BUILD_SET_C0(config5)
294     -__BUILD_SET_C0(config7)
295     __BUILD_SET_C0(intcontrol)
296     __BUILD_SET_C0(intctl)
297     __BUILD_SET_C0(srsmap)
298     diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
299     index 95b8c471f572..eb1f6030ab85 100644
300     --- a/arch/mips/include/asm/processor.h
301     +++ b/arch/mips/include/asm/processor.h
302     @@ -141,7 +141,7 @@ struct mips_fpu_struct {
303    
304     #define NUM_DSP_REGS 6
305    
306     -typedef __u32 dspreg_t;
307     +typedef unsigned long dspreg_t;
308    
309     struct mips_dsp_state {
310     dspreg_t dspr[NUM_DSP_REGS];
311     @@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p);
312     #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
313     #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
314    
315     +#ifdef CONFIG_CPU_LOONGSON3
316     +/*
317     + * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
318     + * tight read loop is executed, because reads take priority over writes & the
319     + * hardware (incorrectly) doesn't ensure that writes will eventually occur.
320     + *
321     + * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
322     + * flush from cpu_relax() such that any pending writes will become visible as
323     + * expected.
324     + */
325     +#define cpu_relax() smp_mb()
326     +#else
327     #define cpu_relax() barrier()
328     +#endif
329    
330     /*
331     * Return_address is a replacement for __builtin_return_address(count)
332     diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
333     index e058cd300713..efffdf2464ab 100644
334     --- a/arch/mips/kernel/ptrace.c
335     +++ b/arch/mips/kernel/ptrace.c
336     @@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request,
337     goto out;
338     }
339     dregs = __get_dsp_regs(child);
340     - tmp = (unsigned long) (dregs[addr - DSP_BASE]);
341     + tmp = dregs[addr - DSP_BASE];
342     break;
343     }
344     case DSP_CONTROL:
345     diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
346     index 89026d33a07b..6990240785f6 100644
347     --- a/arch/mips/kernel/ptrace32.c
348     +++ b/arch/mips/kernel/ptrace32.c
349     @@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
350     goto out;
351     }
352     dregs = __get_dsp_regs(child);
353     - tmp = (unsigned long) (dregs[addr - DSP_BASE]);
354     + tmp = dregs[addr - DSP_BASE];
355     break;
356     }
357     case DSP_CONTROL:
358     diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
359     index 111ad475aa0c..4c2483f410c2 100644
360     --- a/arch/mips/lib/multi3.c
361     +++ b/arch/mips/lib/multi3.c
362     @@ -4,12 +4,12 @@
363     #include "libgcc.h"
364    
365     /*
366     - * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
367     - * specific case only we'll implement it here.
368     + * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
369     + * that specific case only we implement that intrinsic here.
370     *
371     * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
372     */
373     -#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
374     +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
375    
376     /* multiply 64-bit values, low 64-bits returned */
377     static inline long long notrace dmulu(long long a, long long b)
378     diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
379     index 254634fb3fc7..fee1e1f8c9d3 100644
380     --- a/arch/powerpc/net/bpf_jit_comp64.c
381     +++ b/arch/powerpc/net/bpf_jit_comp64.c
382     @@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
383     u64 imm64;
384     u8 *func;
385     u32 true_cond;
386     + u32 tmp_idx;
387    
388     /*
389     * addrs[] maps a BPF bytecode address into a real offset from
390     @@ -681,11 +682,7 @@ emit_clear:
391     case BPF_STX | BPF_XADD | BPF_W:
392     /* Get EA into TMP_REG_1 */
393     PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
394     - /* error if EA is not word-aligned */
395     - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
396     - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
397     - PPC_LI(b2p[BPF_REG_0], 0);
398     - PPC_JMP(exit_addr);
399     + tmp_idx = ctx->idx * 4;
400     /* load value from memory into TMP_REG_2 */
401     PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
402     /* add value from src_reg into this */
403     @@ -693,32 +690,16 @@ emit_clear:
404     /* store result back */
405     PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
406     /* we're done if this succeeded */
407     - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
408     - /* otherwise, let's try once more */
409     - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
410     - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
411     - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
412     - /* exit if the store was not successful */
413     - PPC_LI(b2p[BPF_REG_0], 0);
414     - PPC_BCC(COND_NE, exit_addr);
415     + PPC_BCC_SHORT(COND_NE, tmp_idx);
416     break;
417     /* *(u64 *)(dst + off) += src */
418     case BPF_STX | BPF_XADD | BPF_DW:
419     PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
420     - /* error if EA is not doubleword-aligned */
421     - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
422     - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
423     - PPC_LI(b2p[BPF_REG_0], 0);
424     - PPC_JMP(exit_addr);
425     - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
426     - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
427     - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
428     - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
429     + tmp_idx = ctx->idx * 4;
430     PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
431     PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
432     PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
433     - PPC_LI(b2p[BPF_REG_0], 0);
434     - PPC_BCC(COND_NE, exit_addr);
435     + PPC_BCC_SHORT(COND_NE, tmp_idx);
436     break;
437    
438     /*
439     diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
440     index de11ecc99c7c..9c9970a5dfb1 100644
441     --- a/arch/s390/include/asm/qdio.h
442     +++ b/arch/s390/include/asm/qdio.h
443     @@ -262,7 +262,6 @@ struct qdio_outbuf_state {
444     void *user;
445     };
446    
447     -#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
448     #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
449    
450     #define CHSC_AC1_INITIATE_INPUTQ 0x80
451     diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
452     index 242b78c0a9ec..40f1888bc4ab 100644
453     --- a/arch/s390/mm/fault.c
454     +++ b/arch/s390/mm/fault.c
455     @@ -486,6 +486,8 @@ retry:
456     /* No reason to continue if interrupted by SIGKILL. */
457     if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
458     fault = VM_FAULT_SIGNAL;
459     + if (flags & FAULT_FLAG_RETRY_NOWAIT)
460     + goto out_up;
461     goto out;
462     }
463     if (unlikely(fault & VM_FAULT_ERROR))
464     diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
465     index 382153ff17e3..dc3cede7f2ec 100644
466     --- a/arch/s390/mm/page-states.c
467     +++ b/arch/s390/mm/page-states.c
468     @@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
469     list_for_each(l, &zone->free_area[order].free_list[t]) {
470     page = list_entry(l, struct page, lru);
471     if (make_stable)
472     - set_page_stable_dat(page, 0);
473     + set_page_stable_dat(page, order);
474     else
475     set_page_unused(page, order);
476     }
477     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
478     index 45f1ea117128..6b1474fa99ab 100644
479     --- a/arch/s390/net/bpf_jit_comp.c
480     +++ b/arch/s390/net/bpf_jit_comp.c
481     @@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
482     /* br %r1 */
483     _EMIT2(0x07f1);
484     } else {
485     - /* larl %r1,.+14 */
486     - EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
487     /* ex 0,S390_lowcore.br_r1_tampoline */
488     EMIT4_DISP(0x44000000, REG_0, REG_0,
489     offsetof(struct lowcore, br_r1_trampoline));
490     diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
491     index 06a80434cfe6..5bd374491f94 100644
492     --- a/arch/s390/numa/numa.c
493     +++ b/arch/s390/numa/numa.c
494     @@ -134,26 +134,14 @@ void __init numa_setup(void)
495     {
496     pr_info("NUMA mode: %s\n", mode->name);
497     nodes_clear(node_possible_map);
498     + /* Initially attach all possible CPUs to node 0. */
499     + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
500     if (mode->setup)
501     mode->setup();
502     numa_setup_memory();
503     memblock_dump_all();
504     }
505    
506     -/*
507     - * numa_init_early() - Initialization initcall
508     - *
509     - * This runs when only one CPU is online and before the first
510     - * topology update is called for by the scheduler.
511     - */
512     -static int __init numa_init_early(void)
513     -{
514     - /* Attach all possible CPUs to node 0 for now. */
515     - cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
516     - return 0;
517     -}
518     -early_initcall(numa_init_early);
519     -
520     /*
521     * numa_init_late() - Initialization initcall
522     *
523     diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
524     index 0fe649c0d542..960c4a362d8c 100644
525     --- a/arch/s390/pci/pci.c
526     +++ b/arch/s390/pci/pci.c
527     @@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
528     hwirq = 0;
529     for_each_pci_msi_entry(msi, pdev) {
530     rc = -EIO;
531     + if (hwirq >= msi_vecs)
532     + break;
533     irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
534     if (irq < 0)
535     return -ENOMEM;
536     diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
537     index 80ddc01f57ac..fcbc0c0aa087 100644
538     --- a/arch/sparc/include/asm/Kbuild
539     +++ b/arch/sparc/include/asm/Kbuild
540     @@ -14,6 +14,7 @@ generic-y += local64.h
541     generic-y += mcs_spinlock.h
542     generic-y += mm-arch-hooks.h
543     generic-y += module.h
544     +generic-y += msi.h
545     generic-y += preempt.h
546     generic-y += rwsem.h
547     generic-y += serial.h
548     diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
549     index 3b397081047a..83aaf4888999 100644
550     --- a/arch/sparc/kernel/time_64.c
551     +++ b/arch/sparc/kernel/time_64.c
552     @@ -813,7 +813,7 @@ static void __init get_tick_patch(void)
553     }
554     }
555    
556     -static void init_tick_ops(struct sparc64_tick_ops *ops)
557     +static void __init init_tick_ops(struct sparc64_tick_ops *ops)
558     {
559     unsigned long freq, quotient, tick;
560    
561     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
562     index 1c63a4b5320d..2af0af33362a 100644
563     --- a/arch/x86/Kconfig
564     +++ b/arch/x86/Kconfig
565     @@ -170,6 +170,7 @@ config X86
566     select HAVE_PERF_REGS
567     select HAVE_PERF_USER_STACK_DUMP
568     select HAVE_RCU_TABLE_FREE
569     + select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
570     select HAVE_REGS_AND_STACK_ACCESS_API
571     select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
572     select HAVE_STACK_VALIDATION if X86_64
573     diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
574     index 98018a621f6b..3a250ca2406c 100644
575     --- a/arch/x86/boot/compressed/Makefile
576     +++ b/arch/x86/boot/compressed/Makefile
577     @@ -104,9 +104,13 @@ define cmd_check_data_rel
578     done
579     endef
580    
581     +# We need to run two commands under "if_changed", so merge them into a
582     +# single invocation.
583     +quiet_cmd_check-and-link-vmlinux = LD $@
584     + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
585     +
586     $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
587     - $(call if_changed,check_data_rel)
588     - $(call if_changed,ld)
589     + $(call if_changed,check-and-link-vmlinux)
590    
591     OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
592     $(obj)/vmlinux.bin: vmlinux FORCE
593     diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
594     index c366c0adeb40..b545bf9d2328 100644
595     --- a/arch/x86/entry/vdso/Makefile
596     +++ b/arch/x86/entry/vdso/Makefile
597     @@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
598     CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
599     $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
600     -fno-omit-frame-pointer -foptimize-sibling-calls \
601     - -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
602     + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
603    
604     -$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
605     +$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
606    
607     #
608     # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
609     @@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
610     KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
611     KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
612     KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
613     +KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
614     KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
615     KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
616     KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
617     KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
618     KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
619     +KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
620     $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
621    
622     $(obj)/vdso32.so.dbg: FORCE \
623     diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
624     index 786fd875de92..8c51844694e2 100644
625     --- a/arch/x86/events/amd/ibs.c
626     +++ b/arch/x86/events/amd/ibs.c
627     @@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
628     {
629     struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
630     struct perf_event *event = pcpu->event;
631     - struct hw_perf_event *hwc = &event->hw;
632     + struct hw_perf_event *hwc;
633     struct perf_sample_data data;
634     struct perf_raw_record raw;
635     struct pt_regs regs;
636     @@ -602,6 +602,10 @@ fail:
637     return 0;
638     }
639    
640     + if (WARN_ON_ONCE(!event))
641     + goto fail;
642     +
643     + hwc = &event->hw;
644     msr = hwc->config_base;
645     buf = ibs_data.regs;
646     rdmsrl(msr, *buf);
647     diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
648     index 717c9219d00e..e5097dc85a06 100644
649     --- a/arch/x86/events/core.c
650     +++ b/arch/x86/events/core.c
651     @@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
652    
653     perf_callchain_store(entry, regs->ip);
654    
655     - if (!current->mm)
656     + if (!nmi_uaccess_okay())
657     return;
658    
659     if (perf_callchain_user32(regs, entry))
660     diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
661     index c14f2a74b2be..15450a675031 100644
662     --- a/arch/x86/include/asm/irqflags.h
663     +++ b/arch/x86/include/asm/irqflags.h
664     @@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
665     return flags;
666     }
667    
668     -static inline void native_restore_fl(unsigned long flags)
669     +extern inline void native_restore_fl(unsigned long flags);
670     +extern inline void native_restore_fl(unsigned long flags)
671     {
672     asm volatile("push %0 ; popf"
673     : /* no output */
674     diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
675     index 0e856c0628b3..b12c8d70dd33 100644
676     --- a/arch/x86/include/asm/processor.h
677     +++ b/arch/x86/include/asm/processor.h
678     @@ -132,6 +132,8 @@ struct cpuinfo_x86 {
679     /* Index into per_cpu list: */
680     u16 cpu_index;
681     u32 microcode;
682     + /* Address space bits used by the cache internally */
683     + u8 x86_cache_bits;
684     } __randomize_layout;
685    
686     struct cpuid_regs {
687     @@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op;
688    
689     extern void cpu_detect(struct cpuinfo_x86 *c);
690    
691     -static inline unsigned long l1tf_pfn_limit(void)
692     +static inline unsigned long long l1tf_pfn_limit(void)
693     {
694     - return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
695     + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
696     }
697    
698     extern void early_cpu_init(void);
699     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
700     index 875ca99b82ee..5f00ecb9d251 100644
701     --- a/arch/x86/include/asm/tlbflush.h
702     +++ b/arch/x86/include/asm/tlbflush.h
703     @@ -175,8 +175,16 @@ struct tlb_state {
704     * are on. This means that it may not match current->active_mm,
705     * which will contain the previous user mm when we're in lazy TLB
706     * mode even if we've already switched back to swapper_pg_dir.
707     + *
708     + * During switch_mm_irqs_off(), loaded_mm will be set to
709     + * LOADED_MM_SWITCHING during the brief interrupts-off window
710     + * when CR3 and loaded_mm would otherwise be inconsistent. This
711     + * is for nmi_uaccess_okay()'s benefit.
712     */
713     struct mm_struct *loaded_mm;
714     +
715     +#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
716     +
717     u16 loaded_mm_asid;
718     u16 next_asid;
719     /* last user mm's ctx id */
720     @@ -246,6 +254,38 @@ struct tlb_state {
721     };
722     DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
723    
724     +/*
725     + * Blindly accessing user memory from NMI context can be dangerous
726     + * if we're in the middle of switching the current user task or
727     + * switching the loaded mm. It can also be dangerous if we
728     + * interrupted some kernel code that was temporarily using a
729     + * different mm.
730     + */
731     +static inline bool nmi_uaccess_okay(void)
732     +{
733     + struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
734     + struct mm_struct *current_mm = current->mm;
735     +
736     + VM_WARN_ON_ONCE(!loaded_mm);
737     +
738     + /*
739     + * The condition we want to check is
740     + * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
741     + * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
742     + * is supposed to be reasonably fast.
743     + *
744     + * Instead, we check the almost equivalent but somewhat conservative
745     + * condition below, and we rely on the fact that switch_mm_irqs_off()
746     + * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
747     + */
748     + if (loaded_mm != current_mm)
749     + return false;
750     +
751     + VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
752     +
753     + return true;
754     +}
755     +
756     /* Initialize cr4 shadow for this CPU. */
757     static inline void cr4_init_shadow(void)
758     {
759     diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
760     index 52250681f68c..d92ccff4e615 100644
761     --- a/arch/x86/include/asm/vgtod.h
762     +++ b/arch/x86/include/asm/vgtod.h
763     @@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
764     *
765     * If RDPID is available, use it.
766     */
767     - alternative_io ("lsl %[p],%[seg]",
768     + alternative_io ("lsl %[seg],%[p]",
769     ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
770     X86_FEATURE_RDPID,
771     [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
772     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
773     index d07addb99b71..3e435f88621d 100644
774     --- a/arch/x86/kernel/cpu/bugs.c
775     +++ b/arch/x86/kernel/cpu/bugs.c
776     @@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
777     enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
778     EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
779    
780     +/*
781     + * These CPUs all support 44bits physical address space internally in the
782     + * cache but CPUID can report a smaller number of physical address bits.
783     + *
784     + * The L1TF mitigation uses the top most address bit for the inversion of
785     + * non present PTEs. When the installed memory reaches into the top most
786     + * address bit due to memory holes, which has been observed on machines
787     + * which report 36bits physical address bits and have 32G RAM installed,
788     + * then the mitigation range check in l1tf_select_mitigation() triggers.
789     + * This is a false positive because the mitigation is still possible due to
790     + * the fact that the cache uses 44bit internally. Use the cache bits
791     + * instead of the reported physical bits and adjust them on the affected
792     + * machines to 44bit if the reported bits are less than 44.
793     + */
794     +static void override_cache_bits(struct cpuinfo_x86 *c)
795     +{
796     + if (c->x86 != 6)
797     + return;
798     +
799     + switch (c->x86_model) {
800     + case INTEL_FAM6_NEHALEM:
801     + case INTEL_FAM6_WESTMERE:
802     + case INTEL_FAM6_SANDYBRIDGE:
803     + case INTEL_FAM6_IVYBRIDGE:
804     + case INTEL_FAM6_HASWELL_CORE:
805     + case INTEL_FAM6_HASWELL_ULT:
806     + case INTEL_FAM6_HASWELL_GT3E:
807     + case INTEL_FAM6_BROADWELL_CORE:
808     + case INTEL_FAM6_BROADWELL_GT3E:
809     + case INTEL_FAM6_SKYLAKE_MOBILE:
810     + case INTEL_FAM6_SKYLAKE_DESKTOP:
811     + case INTEL_FAM6_KABYLAKE_MOBILE:
812     + case INTEL_FAM6_KABYLAKE_DESKTOP:
813     + if (c->x86_cache_bits < 44)
814     + c->x86_cache_bits = 44;
815     + break;
816     + }
817     +}
818     +
819     static void __init l1tf_select_mitigation(void)
820     {
821     u64 half_pa;
822     @@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void)
823     if (!boot_cpu_has_bug(X86_BUG_L1TF))
824     return;
825    
826     + override_cache_bits(&boot_cpu_data);
827     +
828     switch (l1tf_mitigation) {
829     case L1TF_MITIGATION_OFF:
830     case L1TF_MITIGATION_FLUSH_NOWARN:
831     @@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void)
832     return;
833     #endif
834    
835     - /*
836     - * This is extremely unlikely to happen because almost all
837     - * systems have far more MAX_PA/2 than RAM can be fit into
838     - * DIMM slots.
839     - */
840     half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
841     if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
842     pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
843     + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
844     + half_pa);
845     + pr_info("However, doing so will make a part of your RAM unusable.\n");
846     + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
847     return;
848     }
849    
850     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
851     index dd02ee4fa8cd..7d2a7890a823 100644
852     --- a/arch/x86/kernel/cpu/common.c
853     +++ b/arch/x86/kernel/cpu/common.c
854     @@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
855     }
856     }
857     #endif
858     + c->x86_cache_bits = c->x86_phys_bits;
859     }
860    
861     static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
862     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
863     index 278be092b300..574dcdc092ab 100644
864     --- a/arch/x86/kernel/cpu/intel.c
865     +++ b/arch/x86/kernel/cpu/intel.c
866     @@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
867     if (cpu_has(c, X86_FEATURE_HYPERVISOR))
868     return false;
869    
870     + if (c->x86 != 6)
871     + return false;
872     +
873     for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
874     if (c->x86_model == spectre_bad_microcodes[i].model &&
875     c->x86_stepping == spectre_bad_microcodes[i].stepping)
876     diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
877     index a2d8a3908670..224de37821e4 100644
878     --- a/arch/x86/kernel/dumpstack.c
879     +++ b/arch/x86/kernel/dumpstack.c
880     @@ -17,6 +17,7 @@
881     #include <linux/bug.h>
882     #include <linux/nmi.h>
883     #include <linux/sysfs.h>
884     +#include <linux/kasan.h>
885    
886     #include <asm/cpu_entry_area.h>
887     #include <asm/stacktrace.h>
888     @@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
889     * We're not going to return, but we might be on an IST stack or
890     * have very little stack space left. Rewind the stack and kill
891     * the task.
892     + * Before we rewind the stack, we have to tell KASAN that we're going to
893     + * reuse the task stack and that existing poisons are invalid.
894     */
895     + kasan_unpoison_task_stack(current);
896     rewind_stack_do_exit(signr);
897     }
898     NOKPROBE_SYMBOL(oops_end);
899     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
900     index fa093b77689f..cbeecfcc66d6 100644
901     --- a/arch/x86/kernel/process_64.c
902     +++ b/arch/x86/kernel/process_64.c
903     @@ -370,6 +370,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
904     start_thread_common(regs, new_ip, new_sp,
905     __USER_CS, __USER_DS, 0);
906     }
907     +EXPORT_SYMBOL_GPL(start_thread);
908    
909     #ifdef CONFIG_COMPAT
910     void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
911     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
912     index 282bbcbf3b6a..f6bebcec60b4 100644
913     --- a/arch/x86/kvm/svm.c
914     +++ b/arch/x86/kvm/svm.c
915     @@ -5067,8 +5067,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
916    
917     clgi();
918    
919     - local_irq_enable();
920     -
921     /*
922     * If this vCPU has touched SPEC_CTRL, restore the guest's value if
923     * it's non-zero. Since vmentry is serialising on affected CPUs, there
924     @@ -5077,6 +5075,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
925     */
926     x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
927    
928     + local_irq_enable();
929     +
930     asm volatile (
931     "push %%" _ASM_BP "; \n\t"
932     "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
933     @@ -5199,12 +5199,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
934     if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
935     svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
936    
937     - x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
938     -
939     reload_tss(vcpu);
940    
941     local_irq_disable();
942    
943     + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
944     +
945     vcpu->arch.cr2 = svm->vmcb->save.cr2;
946     vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
947     vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
948     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
949     index f015ca3997d9..8958b35f6008 100644
950     --- a/arch/x86/kvm/vmx.c
951     +++ b/arch/x86/kvm/vmx.c
952     @@ -8108,21 +8108,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
953     /* Emulate the VMPTRST instruction */
954     static int handle_vmptrst(struct kvm_vcpu *vcpu)
955     {
956     - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
957     - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
958     - gva_t vmcs_gva;
959     + unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
960     + u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
961     + gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
962     struct x86_exception e;
963     + gva_t gva;
964    
965     if (!nested_vmx_check_permission(vcpu))
966     return 1;
967    
968     - if (get_vmx_mem_address(vcpu, exit_qualification,
969     - vmx_instruction_info, true, &vmcs_gva))
970     + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
971     return 1;
972     /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
973     - if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
974     - (void *)&to_vmx(vcpu)->nested.current_vmptr,
975     - sizeof(u64), &e)) {
976     + if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
977     + sizeof(gpa_t), &e)) {
978     kvm_inject_page_fault(vcpu, &e);
979     return 1;
980     }
981     @@ -9171,9 +9170,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
982     * information but as all relevant affected CPUs have 32KiB L1D cache size
983     * there is no point in doing so.
984     */
985     -#define L1D_CACHE_ORDER 4
986     -static void *vmx_l1d_flush_pages;
987     -
988     static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
989     {
990     int size = PAGE_SIZE << L1D_CACHE_ORDER;
991     diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
992     index c8c6ad0d58b8..3f435d7fca5e 100644
993     --- a/arch/x86/lib/usercopy.c
994     +++ b/arch/x86/lib/usercopy.c
995     @@ -7,6 +7,8 @@
996     #include <linux/uaccess.h>
997     #include <linux/export.h>
998    
999     +#include <asm/tlbflush.h>
1000     +
1001     /*
1002     * We rely on the nested NMI work to allow atomic faults from the NMI path; the
1003     * nested NMI paths are careful to preserve CR2.
1004     @@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1005     if (__range_not_ok(from, n, TASK_SIZE))
1006     return n;
1007    
1008     + if (!nmi_uaccess_okay())
1009     + return n;
1010     +
1011     /*
1012     * Even though this function is typically called from NMI/IRQ context
1013     * disable pagefaults so that its behaviour is consistent even when
1014     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
1015     index 37f60dfd7e4e..94b8d90830d1 100644
1016     --- a/arch/x86/mm/init.c
1017     +++ b/arch/x86/mm/init.c
1018     @@ -892,7 +892,7 @@ unsigned long max_swapfile_size(void)
1019    
1020     if (boot_cpu_has_bug(X86_BUG_L1TF)) {
1021     /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
1022     - unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
1023     + unsigned long long l1tf_limit = l1tf_pfn_limit();
1024     /*
1025     * We encode swap offsets also with 3 bits below those for pfn
1026     * which makes the usable limit higher.
1027     @@ -900,7 +900,7 @@ unsigned long max_swapfile_size(void)
1028     #if CONFIG_PGTABLE_LEVELS > 2
1029     l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1030     #endif
1031     - pages = min_t(unsigned long, l1tf_limit, pages);
1032     + pages = min_t(unsigned long long, l1tf_limit, pages);
1033     }
1034     return pages;
1035     }
1036     diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
1037     index 5f4805d69aab..53f1c18b15bd 100644
1038     --- a/arch/x86/mm/mmap.c
1039     +++ b/arch/x86/mm/mmap.c
1040     @@ -191,7 +191,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1041     /* If it's real memory always allow */
1042     if (pfn_valid(pfn))
1043     return true;
1044     - if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1045     + if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
1046     return false;
1047     return true;
1048     }
1049     diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1050     index 0c936435ea93..83a3f4c935fc 100644
1051     --- a/arch/x86/mm/tlb.c
1052     +++ b/arch/x86/mm/tlb.c
1053     @@ -292,6 +292,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1054    
1055     choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
1056    
1057     + /* Let nmi_uaccess_okay() know that we're changing CR3. */
1058     + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
1059     + barrier();
1060     +
1061     if (need_flush) {
1062     this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
1063     this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
1064     @@ -322,6 +326,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1065     if (next != &init_mm)
1066     this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
1067    
1068     + /* Make sure we write CR3 before loaded_mm. */
1069     + barrier();
1070     +
1071     this_cpu_write(cpu_tlbstate.loaded_mm, next);
1072     this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
1073     }
1074     diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
1075     index 8e2e4757adcb..5a42ae4078c2 100644
1076     --- a/drivers/base/power/clock_ops.c
1077     +++ b/drivers/base/power/clock_ops.c
1078     @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
1079     int of_pm_clk_add_clks(struct device *dev)
1080     {
1081     struct clk **clks;
1082     - unsigned int i, count;
1083     + int i, count;
1084     int ret;
1085    
1086     if (!dev || !dev->of_node)
1087     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1088     index 5feba04ab940..5e55d03d3d01 100644
1089     --- a/drivers/block/nbd.c
1090     +++ b/drivers/block/nbd.c
1091     @@ -112,12 +112,16 @@ struct nbd_device {
1092     struct task_struct *task_setup;
1093     };
1094    
1095     +#define NBD_CMD_REQUEUED 1
1096     +
1097     struct nbd_cmd {
1098     struct nbd_device *nbd;
1099     + struct mutex lock;
1100     int index;
1101     int cookie;
1102     - struct completion send_complete;
1103     blk_status_t status;
1104     + unsigned long flags;
1105     + u32 cmd_cookie;
1106     };
1107    
1108     #if IS_ENABLED(CONFIG_DEBUG_FS)
1109     @@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
1110     return disk_to_dev(nbd->disk);
1111     }
1112    
1113     +static void nbd_requeue_cmd(struct nbd_cmd *cmd)
1114     +{
1115     + struct request *req = blk_mq_rq_from_pdu(cmd);
1116     +
1117     + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
1118     + blk_mq_requeue_request(req, true);
1119     +}
1120     +
1121     +#define NBD_COOKIE_BITS 32
1122     +
1123     +static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
1124     +{
1125     + struct request *req = blk_mq_rq_from_pdu(cmd);
1126     + u32 tag = blk_mq_unique_tag(req);
1127     + u64 cookie = cmd->cmd_cookie;
1128     +
1129     + return (cookie << NBD_COOKIE_BITS) | tag;
1130     +}
1131     +
1132     +static u32 nbd_handle_to_tag(u64 handle)
1133     +{
1134     + return (u32)handle;
1135     +}
1136     +
1137     +static u32 nbd_handle_to_cookie(u64 handle)
1138     +{
1139     + return (u32)(handle >> NBD_COOKIE_BITS);
1140     +}
1141     +
1142     static const char *nbdcmd_to_ascii(int cmd)
1143     {
1144     switch (cmd) {
1145     @@ -306,6 +339,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1146     }
1147     config = nbd->config;
1148    
1149     + if (!mutex_trylock(&cmd->lock))
1150     + return BLK_EH_RESET_TIMER;
1151     +
1152     if (config->num_connections > 1) {
1153     dev_err_ratelimited(nbd_to_dev(nbd),
1154     "Connection timed out, retrying\n");
1155     @@ -328,7 +364,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1156     nbd_mark_nsock_dead(nbd, nsock, 1);
1157     mutex_unlock(&nsock->tx_lock);
1158     }
1159     - blk_mq_requeue_request(req, true);
1160     + mutex_unlock(&cmd->lock);
1161     + nbd_requeue_cmd(cmd);
1162     nbd_config_put(nbd);
1163     return BLK_EH_NOT_HANDLED;
1164     }
1165     @@ -338,6 +375,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1166     }
1167     set_bit(NBD_TIMEDOUT, &config->runtime_flags);
1168     cmd->status = BLK_STS_IOERR;
1169     + mutex_unlock(&cmd->lock);
1170     sock_shutdown(nbd);
1171     nbd_config_put(nbd);
1172    
1173     @@ -414,9 +452,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1174     struct iov_iter from;
1175     unsigned long size = blk_rq_bytes(req);
1176     struct bio *bio;
1177     + u64 handle;
1178     u32 type;
1179     u32 nbd_cmd_flags = 0;
1180     - u32 tag = blk_mq_unique_tag(req);
1181     int sent = nsock->sent, skip = 0;
1182    
1183     iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
1184     @@ -458,6 +496,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1185     goto send_pages;
1186     }
1187     iov_iter_advance(&from, sent);
1188     + } else {
1189     + cmd->cmd_cookie++;
1190     }
1191     cmd->index = index;
1192     cmd->cookie = nsock->cookie;
1193     @@ -466,7 +506,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1194     request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
1195     request.len = htonl(size);
1196     }
1197     - memcpy(request.handle, &tag, sizeof(tag));
1198     + handle = nbd_cmd_handle(cmd);
1199     + memcpy(request.handle, &handle, sizeof(handle));
1200    
1201     dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
1202     cmd, nbdcmd_to_ascii(type),
1203     @@ -484,6 +525,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
1204     nsock->pending = req;
1205     nsock->sent = sent;
1206     }
1207     + set_bit(NBD_CMD_REQUEUED, &cmd->flags);
1208     return BLK_STS_RESOURCE;
1209     }
1210     dev_err_ratelimited(disk_to_dev(nbd->disk),
1211     @@ -525,6 +567,7 @@ send_pages:
1212     */
1213     nsock->pending = req;
1214     nsock->sent = sent;
1215     + set_bit(NBD_CMD_REQUEUED, &cmd->flags);
1216     return BLK_STS_RESOURCE;
1217     }
1218     dev_err(disk_to_dev(nbd->disk),
1219     @@ -557,10 +600,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1220     struct nbd_reply reply;
1221     struct nbd_cmd *cmd;
1222     struct request *req = NULL;
1223     + u64 handle;
1224     u16 hwq;
1225     u32 tag;
1226     struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
1227     struct iov_iter to;
1228     + int ret = 0;
1229    
1230     reply.magic = 0;
1231     iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
1232     @@ -578,8 +623,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1233     return ERR_PTR(-EPROTO);
1234     }
1235    
1236     - memcpy(&tag, reply.handle, sizeof(u32));
1237     -
1238     + memcpy(&handle, reply.handle, sizeof(handle));
1239     + tag = nbd_handle_to_tag(handle);
1240     hwq = blk_mq_unique_tag_to_hwq(tag);
1241     if (hwq < nbd->tag_set.nr_hw_queues)
1242     req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
1243     @@ -590,11 +635,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1244     return ERR_PTR(-ENOENT);
1245     }
1246     cmd = blk_mq_rq_to_pdu(req);
1247     +
1248     + mutex_lock(&cmd->lock);
1249     + if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
1250     + dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
1251     + req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
1252     + ret = -ENOENT;
1253     + goto out;
1254     + }
1255     + if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
1256     + dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
1257     + req);
1258     + ret = -ENOENT;
1259     + goto out;
1260     + }
1261     if (ntohl(reply.error)) {
1262     dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
1263     ntohl(reply.error));
1264     cmd->status = BLK_STS_IOERR;
1265     - return cmd;
1266     + goto out;
1267     }
1268    
1269     dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
1270     @@ -619,18 +678,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
1271     if (nbd_disconnected(config) ||
1272     config->num_connections <= 1) {
1273     cmd->status = BLK_STS_IOERR;
1274     - return cmd;
1275     + goto out;
1276     }
1277     - return ERR_PTR(-EIO);
1278     + ret = -EIO;
1279     + goto out;
1280     }
1281     dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
1282     cmd, bvec.bv_len);
1283     }
1284     - } else {
1285     - /* See the comment in nbd_queue_rq. */
1286     - wait_for_completion(&cmd->send_complete);
1287     }
1288     - return cmd;
1289     +out:
1290     + mutex_unlock(&cmd->lock);
1291     + return ret ? ERR_PTR(ret) : cmd;
1292     }
1293    
1294     static void recv_work(struct work_struct *work)
1295     @@ -793,7 +852,7 @@ again:
1296     */
1297     blk_mq_start_request(req);
1298     if (unlikely(nsock->pending && nsock->pending != req)) {
1299     - blk_mq_requeue_request(req, true);
1300     + nbd_requeue_cmd(cmd);
1301     ret = 0;
1302     goto out;
1303     }
1304     @@ -806,7 +865,7 @@ again:
1305     dev_err_ratelimited(disk_to_dev(nbd->disk),
1306     "Request send failed, requeueing\n");
1307     nbd_mark_nsock_dead(nbd, nsock, 1);
1308     - blk_mq_requeue_request(req, true);
1309     + nbd_requeue_cmd(cmd);
1310     ret = 0;
1311     }
1312     out:
1313     @@ -830,7 +889,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1314     * that the server is misbehaving (or there was an error) before we're
1315     * done sending everything over the wire.
1316     */
1317     - init_completion(&cmd->send_complete);
1318     + mutex_lock(&cmd->lock);
1319     + clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1320    
1321     /* We can be called directly from the user space process, which means we
1322     * could possibly have signals pending so our sendmsg will fail. In
1323     @@ -842,7 +902,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1324     ret = BLK_STS_IOERR;
1325     else if (!ret)
1326     ret = BLK_STS_OK;
1327     - complete(&cmd->send_complete);
1328     + mutex_unlock(&cmd->lock);
1329    
1330     return ret;
1331     }
1332     @@ -1446,6 +1506,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1333     {
1334     struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1335     cmd->nbd = set->driver_data;
1336     + cmd->flags = 0;
1337     + mutex_init(&cmd->lock);
1338     return 0;
1339     }
1340    
1341     diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1342     index bfc566d3f31a..8cfa10ab7abc 100644
1343     --- a/drivers/cdrom/cdrom.c
1344     +++ b/drivers/cdrom/cdrom.c
1345     @@ -2542,7 +2542,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
1346     if (!CDROM_CAN(CDC_SELECT_DISC) ||
1347     (arg == CDSL_CURRENT || arg == CDSL_NONE))
1348     return cdi->ops->drive_status(cdi, CDSL_CURRENT);
1349     - if (((int)arg >= cdi->capacity))
1350     + if (arg >= cdi->capacity)
1351     return -EINVAL;
1352     return cdrom_slot_status(cdi, arg);
1353     }
1354     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1355     index dba5259def60..86b526b7d990 100644
1356     --- a/drivers/char/tpm/tpm-interface.c
1357     +++ b/drivers/char/tpm/tpm-interface.c
1358     @@ -423,7 +423,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1359     header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
1360     header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
1361     TSS2_RESMGR_TPM_RC_LAYER);
1362     - return bufsiz;
1363     + return sizeof(*header);
1364     }
1365    
1366     if (bufsiz > TPM_BUFSIZE)
1367     diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
1368     index 6847120b61cd..62d0a69f8da0 100644
1369     --- a/drivers/clk/rockchip/clk-rk3399.c
1370     +++ b/drivers/clk/rockchip/clk-rk3399.c
1371     @@ -630,7 +630,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
1372     MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
1373     RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
1374     COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
1375     - RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
1376     + RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
1377     RK3399_CLKGATE_CON(8), 12, GFLAGS),
1378    
1379     /* uart */
1380     diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
1381     index 02ba5f2aa0e6..cd777c75291d 100644
1382     --- a/drivers/crypto/vmx/aes_ctr.c
1383     +++ b/drivers/crypto/vmx/aes_ctr.c
1384     @@ -27,21 +27,23 @@
1385     #include <asm/switch_to.h>
1386     #include <crypto/aes.h>
1387     #include <crypto/scatterwalk.h>
1388     +#include <crypto/skcipher.h>
1389     +
1390     #include "aesp8-ppc.h"
1391    
1392     struct p8_aes_ctr_ctx {
1393     - struct crypto_blkcipher *fallback;
1394     + struct crypto_skcipher *fallback;
1395     struct aes_key enc_key;
1396     };
1397    
1398     static int p8_aes_ctr_init(struct crypto_tfm *tfm)
1399     {
1400     const char *alg = crypto_tfm_alg_name(tfm);
1401     - struct crypto_blkcipher *fallback;
1402     + struct crypto_skcipher *fallback;
1403     struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
1404    
1405     - fallback =
1406     - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
1407     + fallback = crypto_alloc_skcipher(alg, 0,
1408     + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1409     if (IS_ERR(fallback)) {
1410     printk(KERN_ERR
1411     "Failed to allocate transformation for '%s': %ld\n",
1412     @@ -49,9 +51,9 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
1413     return PTR_ERR(fallback);
1414     }
1415    
1416     - crypto_blkcipher_set_flags(
1417     + crypto_skcipher_set_flags(
1418     fallback,
1419     - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
1420     + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
1421     ctx->fallback = fallback;
1422    
1423     return 0;
1424     @@ -62,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
1425     struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
1426    
1427     if (ctx->fallback) {
1428     - crypto_free_blkcipher(ctx->fallback);
1429     + crypto_free_skcipher(ctx->fallback);
1430     ctx->fallback = NULL;
1431     }
1432     }
1433     @@ -81,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
1434     pagefault_enable();
1435     preempt_enable();
1436    
1437     - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
1438     + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
1439     return ret;
1440     }
1441    
1442     @@ -115,15 +117,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
1443     struct blkcipher_walk walk;
1444     struct p8_aes_ctr_ctx *ctx =
1445     crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
1446     - struct blkcipher_desc fallback_desc = {
1447     - .tfm = ctx->fallback,
1448     - .info = desc->info,
1449     - .flags = desc->flags
1450     - };
1451    
1452     if (in_interrupt()) {
1453     - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
1454     - nbytes);
1455     + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
1456     + skcipher_request_set_tfm(req, ctx->fallback);
1457     + skcipher_request_set_callback(req, desc->flags, NULL, NULL);
1458     + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
1459     + ret = crypto_skcipher_encrypt(req);
1460     + skcipher_request_zero(req);
1461     } else {
1462     blkcipher_walk_init(&walk, dst, src, nbytes);
1463     ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
1464     diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
1465     index d6f3d9ee1350..70b3c556f6cf 100644
1466     --- a/drivers/gpio/gpiolib-acpi.c
1467     +++ b/drivers/gpio/gpiolib-acpi.c
1468     @@ -25,6 +25,7 @@
1469    
1470     struct acpi_gpio_event {
1471     struct list_head node;
1472     + struct list_head initial_sync_list;
1473     acpi_handle handle;
1474     unsigned int pin;
1475     unsigned int irq;
1476     @@ -50,6 +51,9 @@ struct acpi_gpio_chip {
1477     struct list_head events;
1478     };
1479    
1480     +static LIST_HEAD(acpi_gpio_initial_sync_list);
1481     +static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
1482     +
1483     static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
1484     {
1485     if (!gc->parent)
1486     @@ -142,6 +146,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
1487     return gpiochip_get_desc(chip, offset);
1488     }
1489    
1490     +static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
1491     +{
1492     + mutex_lock(&acpi_gpio_initial_sync_list_lock);
1493     + list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
1494     + mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1495     +}
1496     +
1497     +static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
1498     +{
1499     + mutex_lock(&acpi_gpio_initial_sync_list_lock);
1500     + if (!list_empty(&event->initial_sync_list))
1501     + list_del_init(&event->initial_sync_list);
1502     + mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1503     +}
1504     +
1505     static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
1506     {
1507     struct acpi_gpio_event *event = data;
1508     @@ -193,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1509     irq_handler_t handler = NULL;
1510     struct gpio_desc *desc;
1511     unsigned long irqflags;
1512     - int ret, pin, irq;
1513     + int ret, pin, irq, value;
1514    
1515     if (!acpi_gpio_get_irq_resource(ares, &agpio))
1516     return AE_OK;
1517     @@ -228,6 +247,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1518    
1519     gpiod_direction_input(desc);
1520    
1521     + value = gpiod_get_value(desc);
1522     +
1523     ret = gpiochip_lock_as_irq(chip, pin);
1524     if (ret) {
1525     dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
1526     @@ -269,6 +290,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1527     event->irq = irq;
1528     event->pin = pin;
1529     event->desc = desc;
1530     + INIT_LIST_HEAD(&event->initial_sync_list);
1531    
1532     ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
1533     "ACPI:Event", event);
1534     @@ -283,6 +305,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
1535     enable_irq_wake(irq);
1536    
1537     list_add_tail(&event->node, &acpi_gpio->events);
1538     +
1539     + /*
1540     + * Make sure we trigger the initial state of the IRQ when using RISING
1541     + * or FALLING. Note we run the handlers on late_init, the AML code
1542     + * may refer to OperationRegions from other (builtin) drivers which
1543     + * may be probed after us.
1544     + */
1545     + if (handler == acpi_gpio_irq_handler &&
1546     + (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
1547     + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
1548     + acpi_gpio_add_to_initial_sync_list(event);
1549     +
1550     return AE_OK;
1551    
1552     fail_free_event:
1553     @@ -355,6 +389,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
1554     list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
1555     struct gpio_desc *desc;
1556    
1557     + acpi_gpio_del_from_initial_sync_list(event);
1558     +
1559     if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
1560     disable_irq_wake(event->irq);
1561    
1562     @@ -1210,3 +1246,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1563    
1564     return con_id == NULL;
1565     }
1566     +
1567     +/* Sync the initial state of handlers after all builtin drivers have probed */
1568     +static int acpi_gpio_initial_sync(void)
1569     +{
1570     + struct acpi_gpio_event *event, *ep;
1571     +
1572     + mutex_lock(&acpi_gpio_initial_sync_list_lock);
1573     + list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
1574     + initial_sync_list) {
1575     + acpi_evaluate_object(event->handle, NULL, NULL, NULL);
1576     + list_del_init(&event->initial_sync_list);
1577     + }
1578     + mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1579     +
1580     + return 0;
1581     +}
1582     +/* We must use _sync so that this runs after the first deferred_probe run */
1583     +late_initcall_sync(acpi_gpio_initial_sync);
1584     diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1585     index b2431aee7887..f5091827628a 100644
1586     --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1587     +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
1588     @@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work)
1589     else
1590     status = connector_status_disconnected;
1591    
1592     + /*
1593     + * The bridge resets its registers on unplug. So when we get a plug
1594     + * event and we're already supposed to be powered, cycle the bridge to
1595     + * restore its state.
1596     + */
1597     + if (status == connector_status_connected &&
1598     + adv7511->connector.status == connector_status_disconnected &&
1599     + adv7511->powered) {
1600     + regcache_mark_dirty(adv7511->regmap);
1601     + adv7511_power_on(adv7511);
1602     + }
1603     +
1604     if (adv7511->connector.status != status) {
1605     adv7511->connector.status = status;
1606     drm_kms_helper_hotplug_event(adv7511->connector.dev);
1607     diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
1608     index 56dd7a9a8e25..dd5312b02a8d 100644
1609     --- a/drivers/gpu/drm/imx/imx-ldb.c
1610     +++ b/drivers/gpu/drm/imx/imx-ldb.c
1611     @@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
1612     return PTR_ERR(imx_ldb->regmap);
1613     }
1614    
1615     + /* disable LDB by resetting the control register to POR default */
1616     + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
1617     +
1618     imx_ldb->dev = dev;
1619    
1620     if (of_id)
1621     @@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
1622     if (ret || i < 0 || i > 1)
1623     return -EINVAL;
1624    
1625     + if (!of_device_is_available(child))
1626     + continue;
1627     +
1628     if (dual && i > 0) {
1629     dev_warn(dev, "dual-channel mode, ignoring second output\n");
1630     continue;
1631     }
1632    
1633     - if (!of_device_is_available(child))
1634     - continue;
1635     -
1636     channel = &imx_ldb->channel[i];
1637     channel->ldb = imx_ldb;
1638     channel->chno = i;
1639     diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
1640     index 2a75ab80527a..2c149b841cf1 100644
1641     --- a/drivers/gpu/drm/udl/udl_drv.h
1642     +++ b/drivers/gpu/drm/udl/udl_drv.h
1643     @@ -110,7 +110,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
1644     struct drm_file *file,
1645     const struct drm_mode_fb_cmd2 *mode_cmd);
1646    
1647     -int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1648     +int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
1649     const char *front, char **urb_buf_ptr,
1650     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
1651     int *ident_ptr, int *sent_ptr);
1652     diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
1653     index d5583190f3e4..8746eeeec44d 100644
1654     --- a/drivers/gpu/drm/udl/udl_fb.c
1655     +++ b/drivers/gpu/drm/udl/udl_fb.c
1656     @@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1657     int bytes_identical = 0;
1658     struct urb *urb;
1659     int aligned_x;
1660     - int bpp = fb->base.format->cpp[0];
1661     + int log_bpp;
1662     +
1663     + BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
1664     + log_bpp = __ffs(fb->base.format->cpp[0]);
1665    
1666     if (!fb->active_16)
1667     return 0;
1668     @@ -125,12 +128,12 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1669    
1670     for (i = y; i < y + height ; i++) {
1671     const int line_offset = fb->base.pitches[0] * i;
1672     - const int byte_offset = line_offset + (x * bpp);
1673     - const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
1674     - if (udl_render_hline(dev, bpp, &urb,
1675     + const int byte_offset = line_offset + (x << log_bpp);
1676     + const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
1677     + if (udl_render_hline(dev, log_bpp, &urb,
1678     (char *) fb->obj->vmapping,
1679     &cmd, byte_offset, dev_byte_offset,
1680     - width * bpp,
1681     + width << log_bpp,
1682     &bytes_identical, &bytes_sent))
1683     goto error;
1684     }
1685     @@ -149,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
1686     error:
1687     atomic_add(bytes_sent, &udl->bytes_sent);
1688     atomic_add(bytes_identical, &udl->bytes_identical);
1689     - atomic_add(width*height*bpp, &udl->bytes_rendered);
1690     + atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
1691     end_cycles = get_cycles();
1692     atomic_add(((unsigned int) ((end_cycles - start_cycles)
1693     >> 10)), /* Kcycles */
1694     @@ -221,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
1695    
1696     struct fb_deferred_io *fbdefio;
1697    
1698     - fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1699     + fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1700    
1701     if (fbdefio) {
1702     fbdefio->delay = DL_DEFIO_WRITE_DELAY;
1703     diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1704     index 0328b2c7b210..f8ea3c99b523 100644
1705     --- a/drivers/gpu/drm/udl/udl_main.c
1706     +++ b/drivers/gpu/drm/udl/udl_main.c
1707     @@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev)
1708     struct list_head *node;
1709     struct urb_node *unode;
1710     struct urb *urb;
1711     - int ret;
1712     unsigned long flags;
1713    
1714     DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
1715    
1716     /* keep waiting and freeing, until we've got 'em all */
1717     while (count--) {
1718     -
1719     - /* Getting interrupted means a leak, but ok at shutdown*/
1720     - ret = down_interruptible(&udl->urbs.limit_sem);
1721     - if (ret)
1722     - break;
1723     + down(&udl->urbs.limit_sem);
1724    
1725     spin_lock_irqsave(&udl->urbs.lock, flags);
1726    
1727     @@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
1728     static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1729     {
1730     struct udl_device *udl = dev->dev_private;
1731     - int i = 0;
1732     struct urb *urb;
1733     struct urb_node *unode;
1734     char *buf;
1735     + size_t wanted_size = count * size;
1736    
1737     spin_lock_init(&udl->urbs.lock);
1738    
1739     +retry:
1740     udl->urbs.size = size;
1741     INIT_LIST_HEAD(&udl->urbs.list);
1742    
1743     - while (i < count) {
1744     + sema_init(&udl->urbs.limit_sem, 0);
1745     + udl->urbs.count = 0;
1746     + udl->urbs.available = 0;
1747     +
1748     + while (udl->urbs.count * size < wanted_size) {
1749     unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
1750     if (!unode)
1751     break;
1752     @@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1753     }
1754     unode->urb = urb;
1755    
1756     - buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
1757     + buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
1758     &urb->transfer_dma);
1759     if (!buf) {
1760     kfree(unode);
1761     usb_free_urb(urb);
1762     + if (size > PAGE_SIZE) {
1763     + size /= 2;
1764     + udl_free_urb_list(dev);
1765     + goto retry;
1766     + }
1767     break;
1768     }
1769    
1770     @@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1771    
1772     list_add_tail(&unode->entry, &udl->urbs.list);
1773    
1774     - i++;
1775     + up(&udl->urbs.limit_sem);
1776     + udl->urbs.count++;
1777     + udl->urbs.available++;
1778     }
1779    
1780     - sema_init(&udl->urbs.limit_sem, i);
1781     - udl->urbs.count = i;
1782     - udl->urbs.available = i;
1783     -
1784     - DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
1785     + DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
1786    
1787     - return i;
1788     + return udl->urbs.count;
1789     }
1790    
1791     struct urb *udl_get_urb(struct drm_device *dev)
1792     diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
1793     index b992644c17e6..f3331d33547a 100644
1794     --- a/drivers/gpu/drm/udl/udl_transfer.c
1795     +++ b/drivers/gpu/drm/udl/udl_transfer.c
1796     @@ -83,12 +83,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
1797     ((pixel >> 8) & 0xf800));
1798     }
1799    
1800     -static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
1801     +static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
1802     {
1803     - u16 pixel_val16 = 0;
1804     - if (bpp == 2)
1805     + u16 pixel_val16;
1806     + if (log_bpp == 1)
1807     pixel_val16 = *(const uint16_t *)pixel;
1808     - else if (bpp == 4)
1809     + else
1810     pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
1811     return pixel_val16;
1812     }
1813     @@ -125,8 +125,9 @@ static void udl_compress_hline16(
1814     const u8 *const pixel_end,
1815     uint32_t *device_address_ptr,
1816     uint8_t **command_buffer_ptr,
1817     - const uint8_t *const cmd_buffer_end, int bpp)
1818     + const uint8_t *const cmd_buffer_end, int log_bpp)
1819     {
1820     + const int bpp = 1 << log_bpp;
1821     const u8 *pixel = *pixel_start_ptr;
1822     uint32_t dev_addr = *device_address_ptr;
1823     uint8_t *cmd = *command_buffer_ptr;
1824     @@ -153,12 +154,12 @@ static void udl_compress_hline16(
1825     raw_pixels_count_byte = cmd++; /* we'll know this later */
1826     raw_pixel_start = pixel;
1827    
1828     - cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
1829     - (unsigned long)(pixel_end - pixel) / bpp,
1830     - (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
1831     + cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
1832     + (unsigned long)(pixel_end - pixel) >> log_bpp,
1833     + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
1834    
1835     prefetch_range((void *) pixel, cmd_pixel_end - pixel);
1836     - pixel_val16 = get_pixel_val16(pixel, bpp);
1837     + pixel_val16 = get_pixel_val16(pixel, log_bpp);
1838    
1839     while (pixel < cmd_pixel_end) {
1840     const u8 *const start = pixel;
1841     @@ -170,7 +171,7 @@ static void udl_compress_hline16(
1842     pixel += bpp;
1843    
1844     while (pixel < cmd_pixel_end) {
1845     - pixel_val16 = get_pixel_val16(pixel, bpp);
1846     + pixel_val16 = get_pixel_val16(pixel, log_bpp);
1847     if (pixel_val16 != repeating_pixel_val16)
1848     break;
1849     pixel += bpp;
1850     @@ -179,10 +180,10 @@ static void udl_compress_hline16(
1851     if (unlikely(pixel > start + bpp)) {
1852     /* go back and fill in raw pixel count */
1853     *raw_pixels_count_byte = (((start -
1854     - raw_pixel_start) / bpp) + 1) & 0xFF;
1855     + raw_pixel_start) >> log_bpp) + 1) & 0xFF;
1856    
1857     /* immediately after raw data is repeat byte */
1858     - *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
1859     + *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
1860    
1861     /* Then start another raw pixel span */
1862     raw_pixel_start = pixel;
1863     @@ -192,14 +193,14 @@ static void udl_compress_hline16(
1864    
1865     if (pixel > raw_pixel_start) {
1866     /* finalize last RAW span */
1867     - *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
1868     + *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
1869     } else {
1870     /* undo unused byte */
1871     cmd--;
1872     }
1873    
1874     - *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
1875     - dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
1876     + *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
1877     + dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
1878     }
1879    
1880     if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
1881     @@ -222,19 +223,19 @@ static void udl_compress_hline16(
1882     * (that we can only write to, slowly, and can never read), and (optionally)
1883     * our shadow copy that tracks what's been sent to that hardware buffer.
1884     */
1885     -int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1886     +int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
1887     const char *front, char **urb_buf_ptr,
1888     u32 byte_offset, u32 device_byte_offset,
1889     u32 byte_width,
1890     int *ident_ptr, int *sent_ptr)
1891     {
1892     const u8 *line_start, *line_end, *next_pixel;
1893     - u32 base16 = 0 + (device_byte_offset / bpp) * 2;
1894     + u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
1895     struct urb *urb = *urb_ptr;
1896     u8 *cmd = *urb_buf_ptr;
1897     u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
1898    
1899     - BUG_ON(!(bpp == 2 || bpp == 4));
1900     + BUG_ON(!(log_bpp == 1 || log_bpp == 2));
1901    
1902     line_start = (u8 *) (front + byte_offset);
1903     next_pixel = line_start;
1904     @@ -244,7 +245,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
1905    
1906     udl_compress_hline16(&next_pixel,
1907     line_end, &base16,
1908     - (u8 **) &cmd, (u8 *) cmd_end, bpp);
1909     + (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
1910    
1911     if (cmd >= cmd_end) {
1912     int len = cmd - (u8 *) urb->transfer_buffer;
1913     diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1914     index 5f87764d7015..ca9941fa741b 100644
1915     --- a/drivers/hwmon/nct6775.c
1916     +++ b/drivers/hwmon/nct6775.c
1917     @@ -63,6 +63,7 @@
1918     #include <linux/bitops.h>
1919     #include <linux/dmi.h>
1920     #include <linux/io.h>
1921     +#include <linux/nospec.h>
1922     #include "lm75.h"
1923    
1924     #define USE_ALTERNATE
1925     @@ -2642,6 +2643,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
1926     return err;
1927     if (val > NUM_TEMP)
1928     return -EINVAL;
1929     + val = array_index_nospec(val, NUM_TEMP + 1);
1930     if (val && (!(data->have_temp & BIT(val - 1)) ||
1931     !data->temp_src[val - 1]))
1932     return -EINVAL;
1933     diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
1934     index b8c43535f16c..5cf670f57be7 100644
1935     --- a/drivers/i2c/busses/i2c-davinci.c
1936     +++ b/drivers/i2c/busses/i2c-davinci.c
1937     @@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
1938     /*
1939     * It's not always possible to have 1 to 2 ratio when d=7, so fall back
1940     * to minimal possible clkh in this case.
1941     + *
1942     + * Note:
1943     + * CLKH is not allowed to be 0, in this case I2C clock is not generated
1944     + * at all
1945     */
1946     - if (clk >= clkl + d) {
1947     + if (clk > clkl + d) {
1948     clkh = clk - clkl - d;
1949     clkl -= d;
1950     } else {
1951     - clkh = 0;
1952     + clkh = 1;
1953     clkl = clk - (d << 1);
1954     }
1955    
1956     diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1957     index 6f2fe63e8f5a..7b961c9c62ef 100644
1958     --- a/drivers/i2c/i2c-core-base.c
1959     +++ b/drivers/i2c/i2c-core-base.c
1960     @@ -638,7 +638,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
1961     static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
1962     unsigned int flags)
1963     {
1964     - rt_mutex_lock(&adapter->bus_lock);
1965     + rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
1966     }
1967    
1968     /**
1969     diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
1970     index 9669ca4937b8..7ba31f6bf148 100644
1971     --- a/drivers/i2c/i2c-mux.c
1972     +++ b/drivers/i2c/i2c-mux.c
1973     @@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
1974     struct i2c_mux_priv *priv = adapter->algo_data;
1975     struct i2c_adapter *parent = priv->muxc->parent;
1976    
1977     - rt_mutex_lock(&parent->mux_lock);
1978     + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
1979     if (!(flags & I2C_LOCK_ROOT_ADAPTER))
1980     return;
1981     i2c_lock_bus(parent, flags);
1982     @@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
1983     struct i2c_mux_priv *priv = adapter->algo_data;
1984     struct i2c_adapter *parent = priv->muxc->parent;
1985    
1986     - rt_mutex_lock(&parent->mux_lock);
1987     + rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
1988     i2c_lock_bus(parent, flags);
1989     }
1990    
1991     diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
1992     index 3bdb799d3b4b..2c436376f13e 100644
1993     --- a/drivers/iommu/arm-smmu.c
1994     +++ b/drivers/iommu/arm-smmu.c
1995     @@ -2100,12 +2100,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
1996     if (err)
1997     return err;
1998    
1999     - if (smmu->version == ARM_SMMU_V2 &&
2000     - smmu->num_context_banks != smmu->num_context_irqs) {
2001     - dev_err(dev,
2002     - "found only %d context interrupt(s) but %d required\n",
2003     - smmu->num_context_irqs, smmu->num_context_banks);
2004     - return -ENODEV;
2005     + if (smmu->version == ARM_SMMU_V2) {
2006     + if (smmu->num_context_banks > smmu->num_context_irqs) {
2007     + dev_err(dev,
2008     + "found only %d context irq(s) but %d required\n",
2009     + smmu->num_context_irqs, smmu->num_context_banks);
2010     + return -ENODEV;
2011     + }
2012     +
2013     + /* Ignore superfluous interrupts */
2014     + smmu->num_context_irqs = smmu->num_context_banks;
2015     }
2016    
2017     for (i = 0; i < smmu->num_global_irqs; ++i) {
2018     diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
2019     index 22efc039f302..8d1d40dbf744 100644
2020     --- a/drivers/misc/mei/main.c
2021     +++ b/drivers/misc/mei/main.c
2022     @@ -291,7 +291,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
2023     goto out;
2024     }
2025    
2026     - *offset = 0;
2027     cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
2028     if (!cb) {
2029     rets = -ENOMEM;
2030     diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
2031     index ca3fa82316c2..d3ce904e929e 100644
2032     --- a/drivers/net/can/m_can/m_can.c
2033     +++ b/drivers/net/can/m_can/m_can.c
2034     @@ -1637,8 +1637,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
2035     priv->can.clock.freq = clk_get_rate(cclk);
2036     priv->mram_base = mram_addr;
2037    
2038     - m_can_of_parse_mram(priv, mram_config_vals);
2039     -
2040     platform_set_drvdata(pdev, dev);
2041     SET_NETDEV_DEV(dev, &pdev->dev);
2042    
2043     @@ -1649,6 +1647,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
2044     goto failed_free_dev;
2045     }
2046    
2047     + m_can_of_parse_mram(priv, mram_config_vals);
2048     +
2049     devm_can_led_init(dev);
2050    
2051     dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
2052     @@ -1698,8 +1698,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
2053    
2054     pinctrl_pm_select_default_state(dev);
2055    
2056     - m_can_init_ram(priv);
2057     -
2058     priv->can.state = CAN_STATE_ERROR_ACTIVE;
2059    
2060     if (netif_running(ndev)) {
2061     @@ -1709,6 +1707,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
2062     if (ret)
2063     return ret;
2064    
2065     + m_can_init_ram(priv);
2066     m_can_start(ndev);
2067     netif_device_attach(ndev);
2068     netif_start_queue(ndev);
2069     diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
2070     index c7427bdd3a4b..2949a381a94d 100644
2071     --- a/drivers/net/can/mscan/mpc5xxx_can.c
2072     +++ b/drivers/net/can/mscan/mpc5xxx_can.c
2073     @@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
2074     return 0;
2075     }
2076     cdm = of_iomap(np_cdm, 0);
2077     + if (!cdm) {
2078     + of_node_put(np_cdm);
2079     + dev_err(&ofdev->dev, "can't map clock node!\n");
2080     + return 0;
2081     + }
2082    
2083     if (in_8(&cdm->ipb_clk_sel) & 0x1)
2084     freq *= 2;
2085     diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
2086     index 5b7658bcf020..5c3ef9fc8207 100644
2087     --- a/drivers/net/ethernet/3com/Kconfig
2088     +++ b/drivers/net/ethernet/3com/Kconfig
2089     @@ -32,7 +32,7 @@ config EL3
2090    
2091     config 3C515
2092     tristate "3c515 ISA \"Fast EtherLink\""
2093     - depends on ISA && ISA_DMA_API
2094     + depends on ISA && ISA_DMA_API && !PPC32
2095     ---help---
2096     If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
2097     network card, say Y here.
2098     diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
2099     index d5c15e8bb3de..a8e8f4e9c1bb 100644
2100     --- a/drivers/net/ethernet/amd/Kconfig
2101     +++ b/drivers/net/ethernet/amd/Kconfig
2102     @@ -44,7 +44,7 @@ config AMD8111_ETH
2103    
2104     config LANCE
2105     tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
2106     - depends on ISA && ISA_DMA_API && !ARM
2107     + depends on ISA && ISA_DMA_API && !ARM && !PPC32
2108     ---help---
2109     If you have a network (Ethernet) card of this type, say Y here.
2110     Some LinkSys cards are of this type.
2111     @@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
2112    
2113     config NI65
2114     tristate "NI6510 support"
2115     - depends on ISA && ISA_DMA_API && !ARM
2116     + depends on ISA && ISA_DMA_API && !ARM && !PPC32
2117     ---help---
2118     If you have a network (Ethernet) card of this type, say Y here.
2119    
2120     diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2121     index 8c9986f3fc01..3615c2a06fda 100644
2122     --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2123     +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2124     @@ -1685,6 +1685,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
2125     skb = build_skb(page_address(page) + adapter->rx_page_offset,
2126     adapter->rx_frag_size);
2127     if (likely(skb)) {
2128     + skb_reserve(skb, NET_SKB_PAD);
2129     adapter->rx_page_offset += adapter->rx_frag_size;
2130     if (adapter->rx_page_offset >= PAGE_SIZE)
2131     adapter->rx_page = NULL;
2132     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2133     index 1e33abde4a3e..3fd1085a093f 100644
2134     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2135     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2136     @@ -3387,14 +3387,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2137     DP(BNX2X_MSG_ETHTOOL,
2138     "rss re-configured, UDP 4-tupple %s\n",
2139     udp_rss_requested ? "enabled" : "disabled");
2140     - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
2141     + if (bp->state == BNX2X_STATE_OPEN)
2142     + return bnx2x_rss(bp, &bp->rss_conf_obj, false,
2143     + true);
2144     } else if ((info->flow_type == UDP_V6_FLOW) &&
2145     (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
2146     bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
2147     DP(BNX2X_MSG_ETHTOOL,
2148     "rss re-configured, UDP 4-tupple %s\n",
2149     udp_rss_requested ? "enabled" : "disabled");
2150     - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
2151     + if (bp->state == BNX2X_STATE_OPEN)
2152     + return bnx2x_rss(bp, &bp->rss_conf_obj, false,
2153     + true);
2154     }
2155     return 0;
2156    
2157     @@ -3508,7 +3512,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
2158     bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
2159     }
2160    
2161     - return bnx2x_config_rss_eth(bp, false);
2162     + if (bp->state == BNX2X_STATE_OPEN)
2163     + return bnx2x_config_rss_eth(bp, false);
2164     +
2165     + return 0;
2166     }
2167    
2168     /**
2169     diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
2170     index 5ab912937aff..ec0b545197e2 100644
2171     --- a/drivers/net/ethernet/cirrus/Kconfig
2172     +++ b/drivers/net/ethernet/cirrus/Kconfig
2173     @@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
2174     config CS89x0
2175     tristate "CS89x0 support"
2176     depends on ISA || EISA || ARM
2177     + depends on !PPC32
2178     ---help---
2179     Support for CS89x0 chipset based Ethernet cards. If you have a
2180     network (Ethernet) card of this type, say Y and read the file
2181     diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
2182     index 800edfbd36c1..2bfaf3e118b1 100644
2183     --- a/drivers/net/ethernet/cisco/enic/enic_main.c
2184     +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
2185     @@ -2007,28 +2007,42 @@ static int enic_stop(struct net_device *netdev)
2186     return 0;
2187     }
2188    
2189     +static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2190     +{
2191     + bool running = netif_running(netdev);
2192     + int err = 0;
2193     +
2194     + ASSERT_RTNL();
2195     + if (running) {
2196     + err = enic_stop(netdev);
2197     + if (err)
2198     + return err;
2199     + }
2200     +
2201     + netdev->mtu = new_mtu;
2202     +
2203     + if (running) {
2204     + err = enic_open(netdev);
2205     + if (err)
2206     + return err;
2207     + }
2208     +
2209     + return 0;
2210     +}
2211     +
2212     static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2213     {
2214     struct enic *enic = netdev_priv(netdev);
2215     - int running = netif_running(netdev);
2216    
2217     if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2218     return -EOPNOTSUPP;
2219    
2220     - if (running)
2221     - enic_stop(netdev);
2222     -
2223     - netdev->mtu = new_mtu;
2224     -
2225     if (netdev->mtu > enic->port_mtu)
2226     netdev_warn(netdev,
2227     - "interface MTU (%d) set higher than port MTU (%d)\n",
2228     - netdev->mtu, enic->port_mtu);
2229     + "interface MTU (%d) set higher than port MTU (%d)\n",
2230     + netdev->mtu, enic->port_mtu);
2231    
2232     - if (running)
2233     - enic_open(netdev);
2234     -
2235     - return 0;
2236     + return _enic_change_mtu(netdev, new_mtu);
2237     }
2238    
2239     static void enic_change_mtu_work(struct work_struct *work)
2240     @@ -2036,47 +2050,9 @@ static void enic_change_mtu_work(struct work_struct *work)
2241     struct enic *enic = container_of(work, struct enic, change_mtu_work);
2242     struct net_device *netdev = enic->netdev;
2243     int new_mtu = vnic_dev_mtu(enic->vdev);
2244     - int err;
2245     - unsigned int i;
2246     -
2247     - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
2248    
2249     rtnl_lock();
2250     -
2251     - /* Stop RQ */
2252     - del_timer_sync(&enic->notify_timer);
2253     -
2254     - for (i = 0; i < enic->rq_count; i++)
2255     - napi_disable(&enic->napi[i]);
2256     -
2257     - vnic_intr_mask(&enic->intr[0]);
2258     - enic_synchronize_irqs(enic);
2259     - err = vnic_rq_disable(&enic->rq[0]);
2260     - if (err) {
2261     - rtnl_unlock();
2262     - netdev_err(netdev, "Unable to disable RQ.\n");
2263     - return;
2264     - }
2265     - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
2266     - vnic_cq_clean(&enic->cq[0]);
2267     - vnic_intr_clean(&enic->intr[0]);
2268     -
2269     - /* Fill RQ with new_mtu-sized buffers */
2270     - netdev->mtu = new_mtu;
2271     - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
2272     - /* Need at least one buffer on ring to get going */
2273     - if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
2274     - rtnl_unlock();
2275     - netdev_err(netdev, "Unable to alloc receive buffers.\n");
2276     - return;
2277     - }
2278     -
2279     - /* Start RQ */
2280     - vnic_rq_enable(&enic->rq[0]);
2281     - napi_enable(&enic->napi[0]);
2282     - vnic_intr_unmask(&enic->intr[0]);
2283     - enic_notify_timer_start(enic);
2284     -
2285     + (void)_enic_change_mtu(netdev, new_mtu);
2286     rtnl_unlock();
2287    
2288     netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2289     @@ -2867,7 +2843,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2290     */
2291    
2292     enic->port_mtu = enic->config.mtu;
2293     - (void)enic_change_mtu(netdev, enic->port_mtu);
2294    
2295     err = enic_set_mac_addr(netdev, enic->mac_addr);
2296     if (err) {
2297     @@ -2954,6 +2929,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2298     /* MTU range: 68 - 9000 */
2299     netdev->min_mtu = ENIC_MIN_MTU;
2300     netdev->max_mtu = ENIC_MAX_MTU;
2301     + netdev->mtu = enic->port_mtu;
2302    
2303     err = register_netdev(netdev);
2304     if (err) {
2305     diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
2306     index eb53bd93065e..a696b5b2d40e 100644
2307     --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
2308     +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
2309     @@ -981,6 +981,7 @@ static int nic_dev_init(struct pci_dev *pdev)
2310     hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
2311     nic_dev, link_status_event_handler);
2312    
2313     + SET_NETDEV_DEV(netdev, &pdev->dev);
2314     err = register_netdev(netdev);
2315     if (err) {
2316     dev_err(&pdev->dev, "Failed to register netdev\n");
2317     diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
2318     index 91fe03617106..72496060e332 100644
2319     --- a/drivers/net/ethernet/netronome/nfp/flower/main.c
2320     +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
2321     @@ -79,7 +79,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
2322     return NFP_REPR_TYPE_VF;
2323     }
2324    
2325     - return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
2326     + return __NFP_REPR_TYPE_MAX;
2327     }
2328    
2329     static struct net_device *
2330     @@ -90,6 +90,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
2331     u8 port = 0;
2332    
2333     repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
2334     + if (repr_type > NFP_REPR_TYPE_MAX)
2335     + return NULL;
2336    
2337     reprs = rcu_dereference(app->reprs[repr_type]);
2338     if (!reprs)
2339     diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2340     index c5452b445c37..83c1c4fa102b 100644
2341     --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2342     +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2343     @@ -663,7 +663,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
2344    
2345     p_ramrod->common.update_approx_mcast_flg = 1;
2346     for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
2347     - u32 *p_bins = (u32 *)p_params->bins;
2348     + u32 *p_bins = p_params->bins;
2349    
2350     p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
2351     }
2352     @@ -1474,8 +1474,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
2353     enum spq_mode comp_mode,
2354     struct qed_spq_comp_cb *p_comp_data)
2355     {
2356     - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
2357     struct vport_update_ramrod_data *p_ramrod = NULL;
2358     + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
2359     struct qed_spq_entry *p_ent = NULL;
2360     struct qed_sp_init_data init_data;
2361     u8 abs_vport_id = 0;
2362     @@ -1511,26 +1511,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
2363     /* explicitly clear out the entire vector */
2364     memset(&p_ramrod->approx_mcast.bins, 0,
2365     sizeof(p_ramrod->approx_mcast.bins));
2366     - memset(bins, 0, sizeof(unsigned long) *
2367     - ETH_MULTICAST_MAC_BINS_IN_REGS);
2368     + memset(bins, 0, sizeof(bins));
2369     /* filter ADD op is explicit set op and it removes
2370     * any existing filters for the vport
2371     */
2372     if (p_filter_cmd->opcode == QED_FILTER_ADD) {
2373     for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
2374     - u32 bit;
2375     + u32 bit, nbits;
2376    
2377     bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
2378     - __set_bit(bit, bins);
2379     + nbits = sizeof(u32) * BITS_PER_BYTE;
2380     + bins[bit / nbits] |= 1 << (bit % nbits);
2381     }
2382    
2383     /* Convert to correct endianity */
2384     for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
2385     struct vport_update_ramrod_mcast *p_ramrod_bins;
2386     - u32 *p_bins = (u32 *)bins;
2387    
2388     p_ramrod_bins = &p_ramrod->approx_mcast;
2389     - p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
2390     + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
2391     }
2392     }
2393    
2394     diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2395     index cc1f248551c9..91d383f3a661 100644
2396     --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
2397     +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2398     @@ -214,7 +214,7 @@ struct qed_sp_vport_update_params {
2399     u8 anti_spoofing_en;
2400     u8 update_accept_any_vlan_flg;
2401     u8 accept_any_vlan;
2402     - unsigned long bins[8];
2403     + u32 bins[8];
2404     struct qed_rss_params *rss_params;
2405     struct qed_filter_accept_flags accept_flags;
2406     struct qed_sge_tpa_params *sge_tpa_params;
2407     diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2408     index 376485d99357..3c469355f5a4 100644
2409     --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2410     +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
2411     @@ -1182,6 +1182,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
2412     break;
2413     default:
2414     p_link->speed = 0;
2415     + p_link->link_up = 0;
2416     }
2417    
2418     if (p_link->link_up && p_link->speed)
2419     @@ -1279,9 +1280,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
2420     phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
2421     phy_cfg.adv_speed = params->speed.advertised_speeds;
2422     phy_cfg.loopback_mode = params->loopback_mode;
2423     - if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
2424     - if (params->eee.enable)
2425     - phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
2426     +
2427     + /* There are MFWs that share this capability regardless of whether
2428     + * this is feasible or not. And given that at the very least adv_caps
2429     + * would be set internally by qed, we want to make sure LFA would
2430     + * still work.
2431     + */
2432     + if ((p_hwfn->mcp_info->capabilities &
2433     + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
2434     + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
2435     if (params->eee.tx_lpi_enable)
2436     phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
2437     if (params->eee.adv_caps & QED_EEE_1G_ADV)
2438     diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2439     index d08fe350ab6c..c6411158afd7 100644
2440     --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2441     +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2442     @@ -2826,7 +2826,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2443    
2444     p_data->update_approx_mcast_flg = 1;
2445     memcpy(p_data->bins, p_mcast_tlv->bins,
2446     - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2447     + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2448     *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2449     }
2450    
2451     diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2452     index 91b5e9f02a62..6eb85db69f9a 100644
2453     --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2454     +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2455     @@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
2456     resp_size += sizeof(struct pfvf_def_resp_tlv);
2457    
2458     memcpy(p_mcast_tlv->bins, p_params->bins,
2459     - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2460     + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2461     }
2462    
2463     update_rx = p_params->accept_flags.update_rx_mode_config;
2464     @@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
2465     u32 bit;
2466    
2467     bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
2468     - __set_bit(bit, sp_params.bins);
2469     + sp_params.bins[bit / 32] |= 1 << (bit % 32);
2470     }
2471     }
2472    
2473     diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
2474     index 97d44dfb38ca..1e93c712fa34 100644
2475     --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
2476     +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
2477     @@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
2478     struct channel_tlv tl;
2479     u8 padding[4];
2480    
2481     - u64 bins[8];
2482     + /* There are only 256 approx bins, and in HSI they're divided into
2483     + * 32-bit values. As old VFs used to set-bit to the values on its side,
2484     + * the upper half of the array is never expected to contain any data.
2485     + */
2486     + u64 bins[4];
2487     + u64 obsolete_bins[4];
2488     };
2489    
2490     struct vfpf_vport_update_accept_param_tlv {
2491     diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2492     index 16c3bfbe1992..757a3b37ae8a 100644
2493     --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2494     +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
2495     @@ -218,6 +218,7 @@ issue:
2496     ret = of_mdiobus_register(bus, np1);
2497     if (ret) {
2498     mdiobus_free(bus);
2499     + lp->mii_bus = NULL;
2500     return ret;
2501     }
2502     return 0;
2503     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2504     index 6d3811c869fd..31684f3382f6 100644
2505     --- a/drivers/net/usb/qmi_wwan.c
2506     +++ b/drivers/net/usb/qmi_wwan.c
2507     @@ -1245,7 +1245,7 @@ static const struct usb_device_id products[] = {
2508     {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
2509     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2510     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2511     - {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
2512     + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2513     {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2514     {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2515     {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
2516     diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
2517     index 4698450c77d1..bb43d176eb4e 100644
2518     --- a/drivers/net/wan/lmc/lmc_main.c
2519     +++ b/drivers/net/wan/lmc/lmc_main.c
2520     @@ -1371,7 +1371,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
2521     case 0x001:
2522     printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
2523     break;
2524     - case 0x010:
2525     + case 0x002:
2526     printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
2527     break;
2528     default:
2529     diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
2530     index cb987c2ecc6b..87131f663292 100644
2531     --- a/drivers/net/wireless/broadcom/b43/leds.c
2532     +++ b/drivers/net/wireless/broadcom/b43/leds.c
2533     @@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
2534     led->wl = dev->wl;
2535     led->index = led_index;
2536     led->activelow = activelow;
2537     - strncpy(led->name, name, sizeof(led->name));
2538     + strlcpy(led->name, name, sizeof(led->name));
2539     atomic_set(&led->state, 0);
2540    
2541     led->led_dev.name = led->name;
2542     diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
2543     index fd4565389c77..bc922118b6ac 100644
2544     --- a/drivers/net/wireless/broadcom/b43legacy/leds.c
2545     +++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
2546     @@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
2547     led->dev = dev;
2548     led->index = led_index;
2549     led->activelow = activelow;
2550     - strncpy(led->name, name, sizeof(led->name));
2551     + strlcpy(led->name, name, sizeof(led->name));
2552    
2553     led->led_dev.name = led->name;
2554     led->led_dev.default_trigger = default_trigger;
2555     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2556     index a67d03716510..afb99876fa9e 100644
2557     --- a/drivers/nvme/host/pci.c
2558     +++ b/drivers/nvme/host/pci.c
2559     @@ -306,6 +306,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
2560     old_value = *dbbuf_db;
2561     *dbbuf_db = value;
2562    
2563     + /*
2564     + * Ensure that the doorbell is updated before reading the event
2565     + * index from memory. The controller needs to provide similar
2566     + * ordering to ensure the envent index is updated before reading
2567     + * the doorbell.
2568     + */
2569     + mb();
2570     +
2571     if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
2572     return false;
2573     }
2574     diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2575     index a4e9f430d452..e2cca91fd266 100644
2576     --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2577     +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
2578     @@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2579     const char *name;
2580     int i, ret;
2581    
2582     - if (group > info->ngroups)
2583     + if (group >= info->ngroups)
2584     return;
2585    
2586     seq_puts(s, "\n");
2587     diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2588     index fe98d4ac0df3..e1e7e587b45b 100644
2589     --- a/drivers/platform/x86/ideapad-laptop.c
2590     +++ b/drivers/platform/x86/ideapad-laptop.c
2591     @@ -1097,10 +1097,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
2592     },
2593     },
2594     {
2595     - .ident = "Lenovo Legion Y520-15IKBN",
2596     + .ident = "Lenovo Legion Y520-15IKB",
2597     .matches = {
2598     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2599     - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
2600     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
2601     },
2602     },
2603     {
2604     diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
2605     index 37e523374fe0..371b5ec70087 100644
2606     --- a/drivers/power/supply/generic-adc-battery.c
2607     +++ b/drivers/power/supply/generic-adc-battery.c
2608     @@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev)
2609     struct power_supply_desc *psy_desc;
2610     struct power_supply_config psy_cfg = {};
2611     struct gab_platform_data *pdata = pdev->dev.platform_data;
2612     - enum power_supply_property *properties;
2613     int ret = 0;
2614     int chan;
2615     - int index = 0;
2616     + int index = ARRAY_SIZE(gab_props);
2617     + bool any = false;
2618    
2619     adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
2620     if (!adc_bat) {
2621     @@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev)
2622     }
2623    
2624     memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
2625     - properties = (enum power_supply_property *)
2626     - ((char *)psy_desc->properties + sizeof(gab_props));
2627    
2628     /*
2629     * getting channel from iio and copying the battery properties
2630     @@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev)
2631     adc_bat->channel[chan] = NULL;
2632     } else {
2633     /* copying properties for supported channels only */
2634     - memcpy(properties + sizeof(*(psy_desc->properties)) * index,
2635     - &gab_dyn_props[chan],
2636     - sizeof(gab_dyn_props[chan]));
2637     - index++;
2638     + int index2;
2639     +
2640     + for (index2 = 0; index2 < index; index2++) {
2641     + if (psy_desc->properties[index2] ==
2642     + gab_dyn_props[chan])
2643     + break; /* already known */
2644     + }
2645     + if (index2 == index) /* really new */
2646     + psy_desc->properties[index++] =
2647     + gab_dyn_props[chan];
2648     + any = true;
2649     }
2650     }
2651    
2652     /* none of the channels are supported so let's bail out */
2653     - if (index == 0) {
2654     + if (!any) {
2655     ret = -ENODEV;
2656     goto second_mem_fail;
2657     }
2658     @@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev)
2659     * as come channels may be not be supported by the device.So
2660     * we need to take care of that.
2661     */
2662     - psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
2663     + psy_desc->num_properties = index;
2664    
2665     adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
2666     if (IS_ERR(adc_bat->psy)) {
2667     diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
2668     index 8941e7caaf4d..c7afdbded26b 100644
2669     --- a/drivers/s390/cio/qdio_main.c
2670     +++ b/drivers/s390/cio/qdio_main.c
2671     @@ -641,21 +641,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
2672     unsigned long phys_aob = 0;
2673    
2674     if (!q->use_cq)
2675     - goto out;
2676     + return 0;
2677    
2678     if (!q->aobs[bufnr]) {
2679     struct qaob *aob = qdio_allocate_aob();
2680     q->aobs[bufnr] = aob;
2681     }
2682     if (q->aobs[bufnr]) {
2683     - q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
2684     q->sbal_state[bufnr].aob = q->aobs[bufnr];
2685     q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
2686     phys_aob = virt_to_phys(q->aobs[bufnr]);
2687     WARN_ON_ONCE(phys_aob & 0xFF);
2688     }
2689    
2690     -out:
2691     + q->sbal_state[bufnr].flags = 0;
2692     return phys_aob;
2693     }
2694    
2695     diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
2696     index fff6f1851dc1..03019e07abb9 100644
2697     --- a/drivers/scsi/fcoe/fcoe_ctlr.c
2698     +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
2699     @@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
2700     case ELS_LOGO:
2701     if (fip->mode == FIP_MODE_VN2VN) {
2702     if (fip->state != FIP_ST_VNMP_UP)
2703     - return -EINVAL;
2704     + goto drop;
2705     if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
2706     - return -EINVAL;
2707     + goto drop;
2708     } else {
2709     if (fip->state != FIP_ST_ENABLED)
2710     return 0;
2711     @@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
2712     fip->send(fip, skb);
2713     return -EINPROGRESS;
2714     drop:
2715     - kfree_skb(skb);
2716     LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
2717     op, ntoh24(fh->fh_d_id));
2718     + kfree_skb(skb);
2719     return -EINVAL;
2720     }
2721     EXPORT_SYMBOL(fcoe_ctlr_els_send);
2722     diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
2723     index 31d31aad3de1..89b1f1af2fd4 100644
2724     --- a/drivers/scsi/libfc/fc_rport.c
2725     +++ b/drivers/scsi/libfc/fc_rport.c
2726     @@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
2727     FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
2728     fc_rport_state(rdata));
2729    
2730     + rdata->flags &= ~FC_RP_STARTED;
2731     fc_rport_enter_delete(rdata, RPORT_EV_STOP);
2732     mutex_unlock(&rdata->rp_mutex);
2733     kref_put(&rdata->kref, fc_rport_destroy);
2734     diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2735     index bddbe2da5283..cf8a15e54d83 100644
2736     --- a/drivers/scsi/libiscsi.c
2737     +++ b/drivers/scsi/libiscsi.c
2738     @@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
2739     */
2740     if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
2741     iscsi_conn_printk(KERN_INFO, conn,
2742     - "task [op %x/%x itt "
2743     + "task [op %x itt "
2744     "0x%x/0x%x] "
2745     "rejected.\n",
2746     - task->hdr->opcode, opcode,
2747     - task->itt, task->hdr_itt);
2748     + opcode, task->itt,
2749     + task->hdr_itt);
2750     return -EACCES;
2751     }
2752     /*
2753     @@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
2754     */
2755     if (conn->session->fast_abort) {
2756     iscsi_conn_printk(KERN_INFO, conn,
2757     - "task [op %x/%x itt "
2758     + "task [op %x itt "
2759     "0x%x/0x%x] fast abort.\n",
2760     - task->hdr->opcode, opcode,
2761     - task->itt, task->hdr_itt);
2762     + opcode, task->itt,
2763     + task->hdr_itt);
2764     return -EACCES;
2765     }
2766     break;
2767     diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2768     index d3940c5d079d..63dd9bc21ff2 100644
2769     --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
2770     +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
2771     @@ -1936,12 +1936,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2772     pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
2773     __func__, ioc->name);
2774     rc = -EFAULT;
2775     - goto out;
2776     + goto job_done;
2777     }
2778    
2779     rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
2780     if (rc)
2781     - goto out;
2782     + goto job_done;
2783    
2784     if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
2785     pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
2786     @@ -2066,6 +2066,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2787     out:
2788     ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
2789     mutex_unlock(&ioc->transport_cmds.mutex);
2790     +job_done:
2791     bsg_job_done(job, rc, reslen);
2792     }
2793    
2794     diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2795     index 40406c162d0d..8ce12ffcbb7a 100644
2796     --- a/drivers/scsi/scsi_sysfs.c
2797     +++ b/drivers/scsi/scsi_sysfs.c
2798     @@ -721,8 +721,24 @@ static ssize_t
2799     sdev_store_delete(struct device *dev, struct device_attribute *attr,
2800     const char *buf, size_t count)
2801     {
2802     - if (device_remove_file_self(dev, attr))
2803     - scsi_remove_device(to_scsi_device(dev));
2804     + struct kernfs_node *kn;
2805     +
2806     + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
2807     + WARN_ON_ONCE(!kn);
2808     + /*
2809     + * Concurrent writes into the "delete" sysfs attribute may trigger
2810     + * concurrent calls to device_remove_file() and scsi_remove_device().
2811     + * device_remove_file() handles concurrent removal calls by
2812     + * serializing these and by ignoring the second and later removal
2813     + * attempts. Concurrent calls of scsi_remove_device() are
2814     + * serialized. The second and later calls of scsi_remove_device() are
2815     + * ignored because the first call of that function changes the device
2816     + * state into SDEV_DEL.
2817     + */
2818     + device_remove_file(dev, attr);
2819     + scsi_remove_device(to_scsi_device(dev));
2820     + if (kn)
2821     + sysfs_unbreak_active_protection(kn);
2822     return count;
2823     };
2824     static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
2825     diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
2826     index 777e5f1e52d1..0cd947f78b5b 100644
2827     --- a/drivers/scsi/vmw_pvscsi.c
2828     +++ b/drivers/scsi/vmw_pvscsi.c
2829     @@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
2830     (btstat == BTSTAT_SUCCESS ||
2831     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
2832     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
2833     - cmd->result = (DID_OK << 16) | sdstat;
2834     - if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
2835     - cmd->result |= (DRIVER_SENSE << 24);
2836     + if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
2837     + cmd->result = (DID_RESET << 16);
2838     + } else {
2839     + cmd->result = (DID_OK << 16) | sdstat;
2840     + if (sdstat == SAM_STAT_CHECK_CONDITION &&
2841     + cmd->sense_buffer)
2842     + cmd->result |= (DRIVER_SENSE << 24);
2843     + }
2844     } else
2845     switch (btstat) {
2846     case BTSTAT_SUCCESS:
2847     diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
2848     index 9e2f0421a01e..0bf6643cca07 100644
2849     --- a/drivers/staging/media/omap4iss/iss_video.c
2850     +++ b/drivers/staging/media/omap4iss/iss_video.c
2851     @@ -11,7 +11,6 @@
2852     * (at your option) any later version.
2853     */
2854    
2855     -#include <asm/cacheflush.h>
2856     #include <linux/clk.h>
2857     #include <linux/mm.h>
2858     #include <linux/pagemap.h>
2859     @@ -24,6 +23,8 @@
2860     #include <media/v4l2-ioctl.h>
2861     #include <media/v4l2-mc.h>
2862    
2863     +#include <asm/cacheflush.h>
2864     +
2865     #include "iss_video.h"
2866     #include "iss.h"
2867    
2868     diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
2869     index 514986b57c2d..25eb3891e34b 100644
2870     --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
2871     +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
2872     @@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
2873     struct iscsi_param *param;
2874     u32 mrdsl, mbl;
2875     u32 max_npdu, max_iso_npdu;
2876     + u32 max_iso_payload;
2877    
2878     if (conn->login->leading_connection) {
2879     param = iscsi_find_param_from_key(MAXBURSTLENGTH,
2880     @@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
2881     mrdsl = conn_ops->MaxRecvDataSegmentLength;
2882     max_npdu = mbl / mrdsl;
2883    
2884     - max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
2885     - (ISCSI_HDR_LEN + mrdsl +
2886     + max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
2887     +
2888     + max_iso_npdu = max_iso_payload /
2889     + (ISCSI_HDR_LEN + mrdsl +
2890     cxgbit_digest_len[csk->submode]);
2891    
2892     csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
2893     @@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
2894     if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
2895     conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
2896    
2897     + if (cxgbit_set_digest(csk))
2898     + return -1;
2899     +
2900     if (conn->login->leading_connection) {
2901     param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
2902     conn->param_list);
2903     @@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
2904     if (is_t5(cdev->lldi.adapter_type))
2905     goto enable_ddp;
2906     else
2907     - goto enable_digest;
2908     + return 0;
2909     }
2910    
2911     if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
2912     @@ -781,10 +787,6 @@ enable_ddp:
2913     }
2914     }
2915    
2916     -enable_digest:
2917     - if (cxgbit_set_digest(csk))
2918     - return -1;
2919     -
2920     return 0;
2921     }
2922    
2923     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2924     index dc13afbd4c88..98e27da34f3c 100644
2925     --- a/drivers/target/iscsi/iscsi_target_login.c
2926     +++ b/drivers/target/iscsi/iscsi_target_login.c
2927     @@ -345,8 +345,7 @@ static int iscsi_login_zero_tsih_s1(
2928     pr_err("idr_alloc() for sess_idr failed\n");
2929     iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2930     ISCSI_LOGIN_STATUS_NO_RESOURCES);
2931     - kfree(sess);
2932     - return -ENOMEM;
2933     + goto free_sess;
2934     }
2935    
2936     sess->creation_time = get_jiffies_64();
2937     @@ -362,20 +361,28 @@ static int iscsi_login_zero_tsih_s1(
2938     ISCSI_LOGIN_STATUS_NO_RESOURCES);
2939     pr_err("Unable to allocate memory for"
2940     " struct iscsi_sess_ops.\n");
2941     - kfree(sess);
2942     - return -ENOMEM;
2943     + goto remove_idr;
2944     }
2945    
2946     sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
2947     if (IS_ERR(sess->se_sess)) {
2948     iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
2949     ISCSI_LOGIN_STATUS_NO_RESOURCES);
2950     - kfree(sess->sess_ops);
2951     - kfree(sess);
2952     - return -ENOMEM;
2953     + goto free_ops;
2954     }
2955    
2956     return 0;
2957     +
2958     +free_ops:
2959     + kfree(sess->sess_ops);
2960     +remove_idr:
2961     + spin_lock_bh(&sess_idr_lock);
2962     + idr_remove(&sess_idr, sess->session_index);
2963     + spin_unlock_bh(&sess_idr_lock);
2964     +free_sess:
2965     + kfree(sess);
2966     + conn->sess = NULL;
2967     + return -ENOMEM;
2968     }
2969    
2970     static int iscsi_login_zero_tsih_s2(
2971     @@ -1162,13 +1169,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
2972     ISCSI_LOGIN_STATUS_INIT_ERR);
2973     if (!zero_tsih || !conn->sess)
2974     goto old_sess_out;
2975     - if (conn->sess->se_sess)
2976     - transport_free_session(conn->sess->se_sess);
2977     - if (conn->sess->session_index != 0) {
2978     - spin_lock_bh(&sess_idr_lock);
2979     - idr_remove(&sess_idr, conn->sess->session_index);
2980     - spin_unlock_bh(&sess_idr_lock);
2981     - }
2982     +
2983     + transport_free_session(conn->sess->se_sess);
2984     +
2985     + spin_lock_bh(&sess_idr_lock);
2986     + idr_remove(&sess_idr, conn->sess->session_index);
2987     + spin_unlock_bh(&sess_idr_lock);
2988     +
2989     kfree(conn->sess->sess_ops);
2990     kfree(conn->sess);
2991     conn->sess = NULL;
2992     diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
2993     index 97cb2dfd6369..d063f0401f84 100644
2994     --- a/drivers/usb/gadget/function/f_uac2.c
2995     +++ b/drivers/usb/gadget/function/f_uac2.c
2996     @@ -442,14 +442,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
2997     };
2998    
2999     struct cntrl_cur_lay3 {
3000     - __u32 dCUR;
3001     + __le32 dCUR;
3002     };
3003    
3004     struct cntrl_range_lay3 {
3005     - __u16 wNumSubRanges;
3006     - __u32 dMIN;
3007     - __u32 dMAX;
3008     - __u32 dRES;
3009     + __le16 wNumSubRanges;
3010     + __le32 dMIN;
3011     + __le32 dMAX;
3012     + __le32 dRES;
3013     } __packed;
3014    
3015     static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
3016     @@ -563,13 +563,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
3017     agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
3018     if (!agdev->out_ep) {
3019     dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
3020     - return ret;
3021     + return -ENODEV;
3022     }
3023    
3024     agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
3025     if (!agdev->in_ep) {
3026     dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
3027     - return ret;
3028     + return -ENODEV;
3029     }
3030    
3031     agdev->in_ep_maxpsize = max_t(u16,
3032     @@ -707,9 +707,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
3033     memset(&c, 0, sizeof(struct cntrl_cur_lay3));
3034    
3035     if (entity_id == USB_IN_CLK_ID)
3036     - c.dCUR = p_srate;
3037     + c.dCUR = cpu_to_le32(p_srate);
3038     else if (entity_id == USB_OUT_CLK_ID)
3039     - c.dCUR = c_srate;
3040     + c.dCUR = cpu_to_le32(c_srate);
3041    
3042     value = min_t(unsigned, w_length, sizeof c);
3043     memcpy(req->buf, &c, value);
3044     @@ -746,15 +746,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
3045    
3046     if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
3047     if (entity_id == USB_IN_CLK_ID)
3048     - r.dMIN = p_srate;
3049     + r.dMIN = cpu_to_le32(p_srate);
3050     else if (entity_id == USB_OUT_CLK_ID)
3051     - r.dMIN = c_srate;
3052     + r.dMIN = cpu_to_le32(c_srate);
3053     else
3054     return -EOPNOTSUPP;
3055    
3056     r.dMAX = r.dMIN;
3057     r.dRES = 0;
3058     - r.wNumSubRanges = 1;
3059     + r.wNumSubRanges = cpu_to_le16(1);
3060    
3061     value = min_t(unsigned, w_length, sizeof r);
3062     memcpy(req->buf, &r, value);
3063     diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
3064     index 3971bbab88bd..d3a639297e06 100644
3065     --- a/drivers/usb/gadget/function/u_audio.c
3066     +++ b/drivers/usb/gadget/function/u_audio.c
3067     @@ -41,9 +41,6 @@ struct uac_req {
3068     struct uac_rtd_params {
3069     struct snd_uac_chip *uac; /* parent chip */
3070     bool ep_enabled; /* if the ep is enabled */
3071     - /* Size of the ring buffer */
3072     - size_t dma_bytes;
3073     - unsigned char *dma_area;
3074    
3075     struct snd_pcm_substream *ss;
3076    
3077     @@ -52,8 +49,6 @@ struct uac_rtd_params {
3078    
3079     void *rbuf;
3080    
3081     - size_t period_size;
3082     -
3083     unsigned max_psize; /* MaxPacketSize of endpoint */
3084     struct uac_req *ureq;
3085    
3086     @@ -93,12 +88,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
3087     static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3088     {
3089     unsigned pending;
3090     - unsigned long flags;
3091     + unsigned long flags, flags2;
3092     unsigned int hw_ptr;
3093     - bool update_alsa = false;
3094     int status = req->status;
3095     struct uac_req *ur = req->context;
3096     struct snd_pcm_substream *substream;
3097     + struct snd_pcm_runtime *runtime;
3098     struct uac_rtd_params *prm = ur->pp;
3099     struct snd_uac_chip *uac = prm->uac;
3100    
3101     @@ -120,6 +115,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3102     if (!substream)
3103     goto exit;
3104    
3105     + snd_pcm_stream_lock_irqsave(substream, flags2);
3106     +
3107     + runtime = substream->runtime;
3108     + if (!runtime || !snd_pcm_running(substream)) {
3109     + snd_pcm_stream_unlock_irqrestore(substream, flags2);
3110     + goto exit;
3111     + }
3112     +
3113     spin_lock_irqsave(&prm->lock, flags);
3114    
3115     if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3116     @@ -146,43 +149,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
3117     req->actual = req->length;
3118     }
3119    
3120     - pending = prm->hw_ptr % prm->period_size;
3121     - pending += req->actual;
3122     - if (pending >= prm->period_size)
3123     - update_alsa = true;
3124     -
3125     hw_ptr = prm->hw_ptr;
3126     - prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
3127    
3128     spin_unlock_irqrestore(&prm->lock, flags);
3129    
3130     /* Pack USB load in ALSA ring buffer */
3131     - pending = prm->dma_bytes - hw_ptr;
3132     + pending = runtime->dma_bytes - hw_ptr;
3133    
3134     if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3135     if (unlikely(pending < req->actual)) {
3136     - memcpy(req->buf, prm->dma_area + hw_ptr, pending);
3137     - memcpy(req->buf + pending, prm->dma_area,
3138     + memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
3139     + memcpy(req->buf + pending, runtime->dma_area,
3140     req->actual - pending);
3141     } else {
3142     - memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
3143     + memcpy(req->buf, runtime->dma_area + hw_ptr,
3144     + req->actual);
3145     }
3146     } else {
3147     if (unlikely(pending < req->actual)) {
3148     - memcpy(prm->dma_area + hw_ptr, req->buf, pending);
3149     - memcpy(prm->dma_area, req->buf + pending,
3150     + memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
3151     + memcpy(runtime->dma_area, req->buf + pending,
3152     req->actual - pending);
3153     } else {
3154     - memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
3155     + memcpy(runtime->dma_area + hw_ptr, req->buf,
3156     + req->actual);
3157     }
3158     }
3159    
3160     + spin_lock_irqsave(&prm->lock, flags);
3161     + /* update hw_ptr after data is copied to memory */
3162     + prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
3163     + hw_ptr = prm->hw_ptr;
3164     + spin_unlock_irqrestore(&prm->lock, flags);
3165     + snd_pcm_stream_unlock_irqrestore(substream, flags2);
3166     +
3167     + if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
3168     + snd_pcm_period_elapsed(substream);
3169     +
3170     exit:
3171     if (usb_ep_queue(ep, req, GFP_ATOMIC))
3172     dev_err(uac->card->dev, "%d Error!\n", __LINE__);
3173     -
3174     - if (update_alsa)
3175     - snd_pcm_period_elapsed(substream);
3176     }
3177    
3178     static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
3179     @@ -245,40 +251,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
3180     static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
3181     struct snd_pcm_hw_params *hw_params)
3182     {
3183     - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
3184     - struct uac_rtd_params *prm;
3185     - int err;
3186     -
3187     - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3188     - prm = &uac->p_prm;
3189     - else
3190     - prm = &uac->c_prm;
3191     -
3192     - err = snd_pcm_lib_malloc_pages(substream,
3193     + return snd_pcm_lib_malloc_pages(substream,
3194     params_buffer_bytes(hw_params));
3195     - if (err >= 0) {
3196     - prm->dma_bytes = substream->runtime->dma_bytes;
3197     - prm->dma_area = substream->runtime->dma_area;
3198     - prm->period_size = params_period_bytes(hw_params);
3199     - }
3200     -
3201     - return err;
3202     }
3203    
3204     static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
3205     {
3206     - struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
3207     - struct uac_rtd_params *prm;
3208     -
3209     - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3210     - prm = &uac->p_prm;
3211     - else
3212     - prm = &uac->c_prm;
3213     -
3214     - prm->dma_area = NULL;
3215     - prm->dma_bytes = 0;
3216     - prm->period_size = 0;
3217     -
3218     return snd_pcm_lib_free_pages(substream);
3219     }
3220    
3221     @@ -604,15 +582,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
3222     if (err < 0)
3223     goto snd_fail;
3224    
3225     - strcpy(pcm->name, pcm_name);
3226     + strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
3227     pcm->private_data = uac;
3228     uac->pcm = pcm;
3229    
3230     snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
3231     snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
3232    
3233     - strcpy(card->driver, card_name);
3234     - strcpy(card->shortname, card_name);
3235     + strlcpy(card->driver, card_name, sizeof(card->driver));
3236     + strlcpy(card->shortname, card_name, sizeof(card->shortname));
3237     sprintf(card->longname, "%s %i", card_name, card->dev->id);
3238    
3239     snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
3240     diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
3241     index 118ad70f1af0..84b227ede082 100644
3242     --- a/drivers/usb/gadget/udc/r8a66597-udc.c
3243     +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
3244     @@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597)
3245    
3246     r8a66597_bset(r8a66597, XCKE, SYSCFG0);
3247    
3248     - msleep(3);
3249     + mdelay(3);
3250    
3251     r8a66597_bset(r8a66597, PLLC, SYSCFG0);
3252    
3253     - msleep(1);
3254     + mdelay(1);
3255    
3256     r8a66597_bset(r8a66597, SCKE, SYSCFG0);
3257    
3258     @@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock)
3259     r8a66597->ep0_req->length = 2;
3260     /* AV: what happens if we get called again before that gets through? */
3261     spin_unlock(&r8a66597->lock);
3262     - r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
3263     + r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
3264     spin_lock(&r8a66597->lock);
3265     }
3266    
3267     diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
3268     index cf8f40ae6e01..9b4354a00ca7 100644
3269     --- a/drivers/usb/phy/phy-fsl-usb.c
3270     +++ b/drivers/usb/phy/phy-fsl-usb.c
3271     @@ -874,6 +874,7 @@ int usb_otg_start(struct platform_device *pdev)
3272     if (pdata->init && pdata->init(pdev) != 0)
3273     return -EINVAL;
3274    
3275     +#ifdef CONFIG_PPC32
3276     if (pdata->big_endian_mmio) {
3277     _fsl_readl = _fsl_readl_be;
3278     _fsl_writel = _fsl_writel_be;
3279     @@ -881,6 +882,7 @@ int usb_otg_start(struct platform_device *pdev)
3280     _fsl_readl = _fsl_readl_le;
3281     _fsl_writel = _fsl_writel_le;
3282     }
3283     +#endif
3284    
3285     /* request irq */
3286     p_otg->irq = platform_get_irq(pdev, 0);
3287     @@ -971,7 +973,7 @@ int usb_otg_start(struct platform_device *pdev)
3288     /*
3289     * state file in sysfs
3290     */
3291     -static int show_fsl_usb2_otg_state(struct device *dev,
3292     +static ssize_t show_fsl_usb2_otg_state(struct device *dev,
3293     struct device_attribute *attr, char *buf)
3294     {
3295     struct otg_fsm *fsm = &fsl_otg_dev->fsm;
3296     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3297     index b475d1ebbbbf..5cf1bbe9754c 100644
3298     --- a/fs/btrfs/disk-io.c
3299     +++ b/fs/btrfs/disk-io.c
3300     @@ -1098,8 +1098,9 @@ static int btree_writepages(struct address_space *mapping,
3301    
3302     fs_info = BTRFS_I(mapping->host)->root->fs_info;
3303     /* this is a bit racy, but that's ok */
3304     - ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3305     - BTRFS_DIRTY_METADATA_THRESH);
3306     + ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3307     + BTRFS_DIRTY_METADATA_THRESH,
3308     + fs_info->dirty_metadata_batch);
3309     if (ret < 0)
3310     return 0;
3311     }
3312     @@ -4030,8 +4031,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
3313     if (flush_delayed)
3314     btrfs_balance_delayed_items(fs_info);
3315    
3316     - ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3317     - BTRFS_DIRTY_METADATA_THRESH);
3318     + ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3319     + BTRFS_DIRTY_METADATA_THRESH,
3320     + fs_info->dirty_metadata_batch);
3321     if (ret > 0) {
3322     balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
3323     }
3324     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3325     index 53487102081d..bbabe37c2e8c 100644
3326     --- a/fs/btrfs/extent-tree.c
3327     +++ b/fs/btrfs/extent-tree.c
3328     @@ -4407,7 +4407,7 @@ commit_trans:
3329     data_sinfo->flags, bytes, 1);
3330     spin_unlock(&data_sinfo->lock);
3331    
3332     - return ret;
3333     + return 0;
3334     }
3335    
3336     int btrfs_check_data_free_space(struct inode *inode,
3337     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3338     index 28a58f40f3a4..e8bfafa25a71 100644
3339     --- a/fs/btrfs/inode.c
3340     +++ b/fs/btrfs/inode.c
3341     @@ -6152,32 +6152,6 @@ err:
3342     return ret;
3343     }
3344    
3345     -int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3346     -{
3347     - struct btrfs_root *root = BTRFS_I(inode)->root;
3348     - struct btrfs_trans_handle *trans;
3349     - int ret = 0;
3350     - bool nolock = false;
3351     -
3352     - if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
3353     - return 0;
3354     -
3355     - if (btrfs_fs_closing(root->fs_info) &&
3356     - btrfs_is_free_space_inode(BTRFS_I(inode)))
3357     - nolock = true;
3358     -
3359     - if (wbc->sync_mode == WB_SYNC_ALL) {
3360     - if (nolock)
3361     - trans = btrfs_join_transaction_nolock(root);
3362     - else
3363     - trans = btrfs_join_transaction(root);
3364     - if (IS_ERR(trans))
3365     - return PTR_ERR(trans);
3366     - ret = btrfs_commit_transaction(trans);
3367     - }
3368     - return ret;
3369     -}
3370     -
3371     /*
3372     * This is somewhat expensive, updating the tree every time the
3373     * inode changes. But, it is most likely to find the inode in cache.
3374     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3375     index 8e3ce81d3f44..fe960d5e8913 100644
3376     --- a/fs/btrfs/super.c
3377     +++ b/fs/btrfs/super.c
3378     @@ -2271,7 +2271,6 @@ static const struct super_operations btrfs_super_ops = {
3379     .sync_fs = btrfs_sync_fs,
3380     .show_options = btrfs_show_options,
3381     .show_devname = btrfs_show_devname,
3382     - .write_inode = btrfs_write_inode,
3383     .alloc_inode = btrfs_alloc_inode,
3384     .destroy_inode = btrfs_destroy_inode,
3385     .statfs = btrfs_statfs,
3386     diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
3387     index 3978b324cbca..5f2f67d220fa 100644
3388     --- a/fs/cachefiles/namei.c
3389     +++ b/fs/cachefiles/namei.c
3390     @@ -195,7 +195,6 @@ wait_for_old_object:
3391     pr_err("\n");
3392     pr_err("Error: Unexpected object collision\n");
3393     cachefiles_printk_object(object, xobject);
3394     - BUG();
3395     }
3396     atomic_inc(&xobject->usage);
3397     write_unlock(&cache->active_lock);
3398     diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
3399     index 18d7aa61ef0f..199eb396a1bb 100644
3400     --- a/fs/cachefiles/rdwr.c
3401     +++ b/fs/cachefiles/rdwr.c
3402     @@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
3403     struct cachefiles_one_read *monitor =
3404     container_of(wait, struct cachefiles_one_read, monitor);
3405     struct cachefiles_object *object;
3406     + struct fscache_retrieval *op = monitor->op;
3407     struct wait_bit_key *key = _key;
3408     struct page *page = wait->private;
3409    
3410     @@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
3411     list_del(&wait->entry);
3412    
3413     /* move onto the action list and queue for FS-Cache thread pool */
3414     - ASSERT(monitor->op);
3415     + ASSERT(op);
3416    
3417     - object = container_of(monitor->op->op.object,
3418     - struct cachefiles_object, fscache);
3419     + /* We need to temporarily bump the usage count as we don't own a ref
3420     + * here otherwise cachefiles_read_copier() may free the op between the
3421     + * monitor being enqueued on the op->to_do list and the op getting
3422     + * enqueued on the work queue.
3423     + */
3424     + fscache_get_retrieval(op);
3425    
3426     + object = container_of(op->op.object, struct cachefiles_object, fscache);
3427     spin_lock(&object->work_lock);
3428     - list_add_tail(&monitor->op_link, &monitor->op->to_do);
3429     + list_add_tail(&monitor->op_link, &op->to_do);
3430     spin_unlock(&object->work_lock);
3431    
3432     - fscache_enqueue_retrieval(monitor->op);
3433     + fscache_enqueue_retrieval(op);
3434     + fscache_put_retrieval(op);
3435     return 0;
3436     }
3437    
3438     diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
3439     index cbb9534b89b4..53c9c49f0fbb 100644
3440     --- a/fs/cifs/cifs_debug.c
3441     +++ b/fs/cifs/cifs_debug.c
3442     @@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
3443     seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
3444     seq_printf(m, "Features:");
3445     #ifdef CONFIG_CIFS_DFS_UPCALL
3446     - seq_printf(m, " dfs");
3447     + seq_printf(m, " DFS");
3448     #endif
3449     #ifdef CONFIG_CIFS_FSCACHE
3450     - seq_printf(m, " fscache");
3451     + seq_printf(m, ",FSCACHE");
3452     +#endif
3453     +#ifdef CONFIG_CIFS_SMB_DIRECT
3454     + seq_printf(m, ",SMB_DIRECT");
3455     +#endif
3456     +#ifdef CONFIG_CIFS_STATS2
3457     + seq_printf(m, ",STATS2");
3458     +#elif defined(CONFIG_CIFS_STATS)
3459     + seq_printf(m, ",STATS");
3460     +#endif
3461     +#ifdef CONFIG_CIFS_DEBUG2
3462     + seq_printf(m, ",DEBUG2");
3463     +#elif defined(CONFIG_CIFS_DEBUG)
3464     + seq_printf(m, ",DEBUG");
3465     +#endif
3466     +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3467     + seq_printf(m, ",ALLOW_INSECURE_LEGACY");
3468     #endif
3469     #ifdef CONFIG_CIFS_WEAK_PW_HASH
3470     - seq_printf(m, " lanman");
3471     + seq_printf(m, ",WEAK_PW_HASH");
3472     #endif
3473     #ifdef CONFIG_CIFS_POSIX
3474     - seq_printf(m, " posix");
3475     + seq_printf(m, ",CIFS_POSIX");
3476     #endif
3477     #ifdef CONFIG_CIFS_UPCALL
3478     - seq_printf(m, " spnego");
3479     + seq_printf(m, ",UPCALL(SPNEGO)");
3480     #endif
3481     #ifdef CONFIG_CIFS_XATTR
3482     - seq_printf(m, " xattr");
3483     + seq_printf(m, ",XATTR");
3484     #endif
3485     #ifdef CONFIG_CIFS_ACL
3486     - seq_printf(m, " acl");
3487     + seq_printf(m, ",ACL");
3488     #endif
3489     seq_putc(m, '\n');
3490     seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
3491     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3492     index 490c5fc9e69c..44a7b2dea688 100644
3493     --- a/fs/cifs/cifsfs.c
3494     +++ b/fs/cifs/cifsfs.c
3495     @@ -197,14 +197,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
3496    
3497     xid = get_xid();
3498    
3499     - /*
3500     - * PATH_MAX may be too long - it would presumably be total path,
3501     - * but note that some servers (includinng Samba 3) have a shorter
3502     - * maximum path.
3503     - *
3504     - * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
3505     - */
3506     - buf->f_namelen = PATH_MAX;
3507     + if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
3508     + buf->f_namelen =
3509     + le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
3510     + else
3511     + buf->f_namelen = PATH_MAX;
3512     +
3513     + buf->f_fsid.val[0] = tcon->vol_serial_number;
3514     + /* are using part of create time for more randomness, see man statfs */
3515     + buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
3516     +
3517     buf->f_files = 0; /* undefined */
3518     buf->f_ffree = 0; /* unlimited */
3519    
3520     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3521     index 0c7b7e2a0919..caf9cf91b825 100644
3522     --- a/fs/cifs/inode.c
3523     +++ b/fs/cifs/inode.c
3524     @@ -1122,6 +1122,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
3525     if (!server->ops->set_file_info)
3526     return -ENOSYS;
3527    
3528     + info_buf.Pad = 0;
3529     +
3530     if (attrs->ia_valid & ATTR_ATIME) {
3531     set_time = true;
3532     info_buf.LastAccessTime =
3533     diff --git a/fs/cifs/link.c b/fs/cifs/link.c
3534     index 889a840172eb..9451a7f6893d 100644
3535     --- a/fs/cifs/link.c
3536     +++ b/fs/cifs/link.c
3537     @@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3538     struct cifs_io_parms io_parms;
3539     int buf_type = CIFS_NO_BUFFER;
3540     __le16 *utf16_path;
3541     - __u8 oplock = SMB2_OPLOCK_LEVEL_II;
3542     + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3543     struct smb2_file_all_info *pfile_info = NULL;
3544    
3545     oparms.tcon = tcon;
3546     @@ -458,7 +458,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
3547     struct cifs_io_parms io_parms;
3548     int create_options = CREATE_NOT_DIR;
3549     __le16 *utf16_path;
3550     - __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
3551     + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3552     struct kvec iov[2];
3553    
3554     if (backup_cred(cifs_sb))
3555     diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3556     index 8b0502cd39af..aa23c00367ec 100644
3557     --- a/fs/cifs/sess.c
3558     +++ b/fs/cifs/sess.c
3559     @@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
3560     goto setup_ntlmv2_ret;
3561     }
3562     *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
3563     + if (!*pbuffer) {
3564     + rc = -ENOMEM;
3565     + cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
3566     + *buflen = 0;
3567     + goto setup_ntlmv2_ret;
3568     + }
3569     sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
3570    
3571     memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
3572     diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3573     index 1238cd3552f9..0267d8cbc996 100644
3574     --- a/fs/cifs/smb2inode.c
3575     +++ b/fs/cifs/smb2inode.c
3576     @@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
3577     int rc;
3578    
3579     if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
3580     - (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
3581     + (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
3582     (buf->Attributes == 0))
3583     return 0; /* would be a no op, no sense sending this */
3584    
3585     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3586     index 83267ac3a3f0..e9f246fe9d80 100644
3587     --- a/fs/cifs/smb2ops.c
3588     +++ b/fs/cifs/smb2ops.c
3589     @@ -332,6 +332,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
3590     FS_ATTRIBUTE_INFORMATION);
3591     SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3592     FS_DEVICE_INFORMATION);
3593     + SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3594     + FS_VOLUME_INFORMATION);
3595     SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
3596     FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
3597     SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3598     @@ -1129,6 +1131,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3599    
3600     }
3601    
3602     +/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
3603     +#define GMT_TOKEN_SIZE 50
3604     +
3605     +/*
3606     + * Input buffer contains (empty) struct smb_snapshot array with size filled in
3607     + * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
3608     + */
3609     static int
3610     smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3611     struct cifsFileInfo *cfile, void __user *ioc_buf)
3612     @@ -1158,14 +1167,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
3613     kfree(retbuf);
3614     return rc;
3615     }
3616     - if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
3617     - rc = -ERANGE;
3618     - kfree(retbuf);
3619     - return rc;
3620     - }
3621    
3622     - if (ret_data_len > snapshot_in.snapshot_array_size)
3623     - ret_data_len = snapshot_in.snapshot_array_size;
3624     + /*
3625     + * Check for min size, ie not large enough to fit even one GMT
3626     + * token (snapshot). On the first ioctl some users may pass in
3627     + * smaller size (or zero) to simply get the size of the array
3628     + * so the user space caller can allocate sufficient memory
3629     + * and retry the ioctl again with larger array size sufficient
3630     + * to hold all of the snapshot GMT tokens on the second try.
3631     + */
3632     + if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
3633     + ret_data_len = sizeof(struct smb_snapshot_array);
3634     +
3635     + /*
3636     + * We return struct SRV_SNAPSHOT_ARRAY, followed by
3637     + * the snapshot array (of 50 byte GMT tokens) each
3638     + * representing an available previous version of the data
3639     + */
3640     + if (ret_data_len > (snapshot_in.snapshot_array_size +
3641     + sizeof(struct smb_snapshot_array)))
3642     + ret_data_len = snapshot_in.snapshot_array_size +
3643     + sizeof(struct smb_snapshot_array);
3644    
3645     if (copy_to_user(ioc_buf, retbuf, ret_data_len))
3646     rc = -EFAULT;
3647     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3648     index 71b81980787f..e317e9a400c1 100644
3649     --- a/fs/cifs/smb2pdu.c
3650     +++ b/fs/cifs/smb2pdu.c
3651     @@ -3455,6 +3455,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3652     } else if (level == FS_SECTOR_SIZE_INFORMATION) {
3653     max_len = sizeof(struct smb3_fs_ss_info);
3654     min_len = sizeof(struct smb3_fs_ss_info);
3655     + } else if (level == FS_VOLUME_INFORMATION) {
3656     + max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
3657     + min_len = sizeof(struct smb3_fs_vol_info);
3658     } else {
3659     cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
3660     return -EINVAL;
3661     @@ -3495,6 +3498,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
3662     tcon->ss_flags = le32_to_cpu(ss_info->Flags);
3663     tcon->perf_sector_size =
3664     le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
3665     + } else if (level == FS_VOLUME_INFORMATION) {
3666     + struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
3667     + (offset + (char *)rsp);
3668     + tcon->vol_serial_number = vol_info->VolumeSerialNumber;
3669     + tcon->vol_create_time = vol_info->VolumeCreationTime;
3670     }
3671    
3672     qfsattr_exit:
3673     diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3674     index c2ec934be968..e52454059725 100644
3675     --- a/fs/cifs/smb2pdu.h
3676     +++ b/fs/cifs/smb2pdu.h
3677     @@ -1108,6 +1108,17 @@ struct smb3_fs_ss_info {
3678     __le32 ByteOffsetForPartitionAlignment;
3679     } __packed;
3680    
3681     +/* volume info struct - see MS-FSCC 2.5.9 */
3682     +#define MAX_VOL_LABEL_LEN 32
3683     +struct smb3_fs_vol_info {
3684     + __le64 VolumeCreationTime;
3685     + __u32 VolumeSerialNumber;
3686     + __le32 VolumeLabelLength; /* includes trailing null */
3687     + __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
3688     + __u8 Reserved;
3689     + __u8 VolumeLabel[0]; /* variable len */
3690     +} __packed;
3691     +
3692     /* partial list of QUERY INFO levels */
3693     #define FILE_DIRECTORY_INFORMATION 1
3694     #define FILE_FULL_DIRECTORY_INFORMATION 2
3695     diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
3696     index 27b9a76a0dfa..638ad4743477 100644
3697     --- a/fs/ext4/mmp.c
3698     +++ b/fs/ext4/mmp.c
3699     @@ -186,11 +186,8 @@ static int kmmpd(void *data)
3700     goto exit_thread;
3701     }
3702    
3703     - if (sb_rdonly(sb)) {
3704     - ext4_warning(sb, "kmmpd being stopped since filesystem "
3705     - "has been remounted as readonly.");
3706     - goto exit_thread;
3707     - }
3708     + if (sb_rdonly(sb))
3709     + break;
3710    
3711     diff = jiffies - last_update_time;
3712     if (diff < mmp_update_interval * HZ)
3713     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3714     index 6747861f9b70..1db39e12e02b 100644
3715     --- a/fs/ext4/namei.c
3716     +++ b/fs/ext4/namei.c
3717     @@ -1397,6 +1397,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
3718     goto cleanup_and_exit;
3719     dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
3720     "falling back\n"));
3721     + ret = NULL;
3722     }
3723     nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
3724     if (!nblocks) {
3725     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3726     index f30d2bf40471..b4fb085261fd 100644
3727     --- a/fs/ext4/super.c
3728     +++ b/fs/ext4/super.c
3729     @@ -5163,6 +5163,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3730    
3731     if (sbi->s_journal)
3732     ext4_mark_recovery_complete(sb, es);
3733     + if (sbi->s_mmp_tsk)
3734     + kthread_stop(sbi->s_mmp_tsk);
3735     } else {
3736     /* Make sure we can mount this feature set readwrite */
3737     if (ext4_has_feature_readonly(sb) ||
3738     diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
3739     index e21afd52e7d7..bdfc2a2de8f2 100644
3740     --- a/fs/ext4/sysfs.c
3741     +++ b/fs/ext4/sysfs.c
3742     @@ -278,8 +278,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
3743     case attr_pointer_ui:
3744     if (!ptr)
3745     return 0;
3746     - return snprintf(buf, PAGE_SIZE, "%u\n",
3747     - *((unsigned int *) ptr));
3748     + if (a->attr_ptr == ptr_ext4_super_block_offset)
3749     + return snprintf(buf, PAGE_SIZE, "%u\n",
3750     + le32_to_cpup(ptr));
3751     + else
3752     + return snprintf(buf, PAGE_SIZE, "%u\n",
3753     + *((unsigned int *) ptr));
3754     case attr_pointer_atomic:
3755     if (!ptr)
3756     return 0;
3757     @@ -312,7 +316,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
3758     ret = kstrtoul(skip_spaces(buf), 0, &t);
3759     if (ret)
3760     return ret;
3761     - *((unsigned int *) ptr) = t;
3762     + if (a->attr_ptr == ptr_ext4_super_block_offset)
3763     + *((__le32 *) ptr) = cpu_to_le32(t);
3764     + else
3765     + *((unsigned int *) ptr) = t;
3766     return len;
3767     case attr_inode_readahead:
3768     return inode_readahead_blks_store(a, sbi, buf, len);
3769     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
3770     index c7c8c16ccd93..9bc50eef6127 100644
3771     --- a/fs/ext4/xattr.c
3772     +++ b/fs/ext4/xattr.c
3773     @@ -189,6 +189,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
3774     struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
3775     if ((void *)next >= end)
3776     return -EFSCORRUPTED;
3777     + if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
3778     + return -EFSCORRUPTED;
3779     e = next;
3780     }
3781    
3782     diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
3783     index de67745e1cd7..77946d6f617d 100644
3784     --- a/fs/fscache/operation.c
3785     +++ b/fs/fscache/operation.c
3786     @@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
3787     ASSERT(op->processor != NULL);
3788     ASSERT(fscache_object_is_available(op->object));
3789     ASSERTCMP(atomic_read(&op->usage), >, 0);
3790     - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
3791     + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
3792     + op->state, ==, FSCACHE_OP_ST_CANCELLED);
3793    
3794     fscache_stat(&fscache_n_op_enqueue);
3795     switch (op->flags & FSCACHE_OP_TYPE) {
3796     @@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op)
3797     struct fscache_cache *cache;
3798    
3799     _enter("{OBJ%x OP%x,%d}",
3800     - op->object->debug_id, op->debug_id, atomic_read(&op->usage));
3801     + op->object ? op->object->debug_id : 0,
3802     + op->debug_id, atomic_read(&op->usage));
3803    
3804     ASSERTCMP(atomic_read(&op->usage), >, 0);
3805    
3806     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3807     index 261fd13a75c6..ee8105af4001 100644
3808     --- a/fs/fuse/dev.c
3809     +++ b/fs/fuse/dev.c
3810     @@ -131,6 +131,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
3811     return !fc->initialized || (for_background && fc->blocked);
3812     }
3813    
3814     +static void fuse_drop_waiting(struct fuse_conn *fc)
3815     +{
3816     + if (fc->connected) {
3817     + atomic_dec(&fc->num_waiting);
3818     + } else if (atomic_dec_and_test(&fc->num_waiting)) {
3819     + /* wake up aborters */
3820     + wake_up_all(&fc->blocked_waitq);
3821     + }
3822     +}
3823     +
3824     static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3825     bool for_background)
3826     {
3827     @@ -171,7 +181,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
3828     return req;
3829    
3830     out:
3831     - atomic_dec(&fc->num_waiting);
3832     + fuse_drop_waiting(fc);
3833     return ERR_PTR(err);
3834     }
3835    
3836     @@ -278,7 +288,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
3837    
3838     if (test_bit(FR_WAITING, &req->flags)) {
3839     __clear_bit(FR_WAITING, &req->flags);
3840     - atomic_dec(&fc->num_waiting);
3841     + fuse_drop_waiting(fc);
3842     }
3843    
3844     if (req->stolen_file)
3845     @@ -364,7 +374,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3846     struct fuse_iqueue *fiq = &fc->iq;
3847    
3848     if (test_and_set_bit(FR_FINISHED, &req->flags))
3849     - return;
3850     + goto put_request;
3851    
3852     spin_lock(&fiq->waitq.lock);
3853     list_del_init(&req->intr_entry);
3854     @@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
3855     wake_up(&req->waitq);
3856     if (req->end)
3857     req->end(fc, req);
3858     +put_request:
3859     fuse_put_request(fc, req);
3860     }
3861    
3862     @@ -1941,11 +1952,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
3863     if (!fud)
3864     return -EPERM;
3865    
3866     + pipe_lock(pipe);
3867     +
3868     bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
3869     - if (!bufs)
3870     + if (!bufs) {
3871     + pipe_unlock(pipe);
3872     return -ENOMEM;
3873     + }
3874    
3875     - pipe_lock(pipe);
3876     nbuf = 0;
3877     rem = 0;
3878     for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
3879     @@ -2100,6 +2114,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
3880     set_bit(FR_ABORTED, &req->flags);
3881     if (!test_bit(FR_LOCKED, &req->flags)) {
3882     set_bit(FR_PRIVATE, &req->flags);
3883     + __fuse_get_request(req);
3884     list_move(&req->list, &to_end1);
3885     }
3886     spin_unlock(&req->waitq.lock);
3887     @@ -2126,7 +2141,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
3888    
3889     while (!list_empty(&to_end1)) {
3890     req = list_first_entry(&to_end1, struct fuse_req, list);
3891     - __fuse_get_request(req);
3892     list_del_init(&req->list);
3893     request_end(fc, req);
3894     }
3895     @@ -2137,6 +2151,11 @@ void fuse_abort_conn(struct fuse_conn *fc)
3896     }
3897     EXPORT_SYMBOL_GPL(fuse_abort_conn);
3898    
3899     +void fuse_wait_aborted(struct fuse_conn *fc)
3900     +{
3901     + wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
3902     +}
3903     +
3904     int fuse_dev_release(struct inode *inode, struct file *file)
3905     {
3906     struct fuse_dev *fud = fuse_get_dev(file);
3907     @@ -2144,9 +2163,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
3908     if (fud) {
3909     struct fuse_conn *fc = fud->fc;
3910     struct fuse_pqueue *fpq = &fud->pq;
3911     + LIST_HEAD(to_end);
3912    
3913     + spin_lock(&fpq->lock);
3914     WARN_ON(!list_empty(&fpq->io));
3915     - end_requests(fc, &fpq->processing);
3916     + list_splice_init(&fpq->processing, &to_end);
3917     + spin_unlock(&fpq->lock);
3918     +
3919     + end_requests(fc, &to_end);
3920     +
3921     /* Are we the last open device? */
3922     if (atomic_dec_and_test(&fc->dev_count)) {
3923     WARN_ON(fc->iq.fasync != NULL);
3924     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3925     index 7a980b4462d9..29868c35c19a 100644
3926     --- a/fs/fuse/dir.c
3927     +++ b/fs/fuse/dir.c
3928     @@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
3929     struct inode *inode;
3930     struct dentry *newent;
3931     bool outarg_valid = true;
3932     + bool locked;
3933    
3934     - fuse_lock_inode(dir);
3935     + locked = fuse_lock_inode(dir);
3936     err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
3937     &outarg, &inode);
3938     - fuse_unlock_inode(dir);
3939     + fuse_unlock_inode(dir, locked);
3940     if (err == -ENOENT) {
3941     outarg_valid = false;
3942     err = 0;
3943     @@ -1332,6 +1333,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3944     struct fuse_conn *fc = get_fuse_conn(inode);
3945     struct fuse_req *req;
3946     u64 attr_version = 0;
3947     + bool locked;
3948    
3949     if (is_bad_inode(inode))
3950     return -EIO;
3951     @@ -1359,9 +1361,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
3952     fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
3953     FUSE_READDIR);
3954     }
3955     - fuse_lock_inode(inode);
3956     + locked = fuse_lock_inode(inode);
3957     fuse_request_send(fc, req);
3958     - fuse_unlock_inode(inode);
3959     + fuse_unlock_inode(inode, locked);
3960     nbytes = req->out.args[0].size;
3961     err = req->out.h.error;
3962     fuse_put_request(fc, req);
3963     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3964     index cb7dff5c45d7..fb4738ef162f 100644
3965     --- a/fs/fuse/file.c
3966     +++ b/fs/fuse/file.c
3967     @@ -866,6 +866,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
3968     }
3969    
3970     if (WARN_ON(req->num_pages >= req->max_pages)) {
3971     + unlock_page(page);
3972     fuse_put_request(fc, req);
3973     return -EIO;
3974     }
3975     diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
3976     index d5773ca67ad2..e105640153ce 100644
3977     --- a/fs/fuse/fuse_i.h
3978     +++ b/fs/fuse/fuse_i.h
3979     @@ -852,6 +852,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
3980    
3981     /* Abort all requests */
3982     void fuse_abort_conn(struct fuse_conn *fc);
3983     +void fuse_wait_aborted(struct fuse_conn *fc);
3984    
3985     /**
3986     * Invalidate inode attributes
3987     @@ -964,8 +965,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
3988    
3989     void fuse_set_initialized(struct fuse_conn *fc);
3990    
3991     -void fuse_unlock_inode(struct inode *inode);
3992     -void fuse_lock_inode(struct inode *inode);
3993     +void fuse_unlock_inode(struct inode *inode, bool locked);
3994     +bool fuse_lock_inode(struct inode *inode);
3995    
3996     int fuse_setxattr(struct inode *inode, const char *name, const void *value,
3997     size_t size, int flags);
3998     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
3999     index a13ecefa9cd1..ffb61787d77a 100644
4000     --- a/fs/fuse/inode.c
4001     +++ b/fs/fuse/inode.c
4002     @@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
4003     return 0;
4004     }
4005    
4006     -void fuse_lock_inode(struct inode *inode)
4007     +bool fuse_lock_inode(struct inode *inode)
4008     {
4009     - if (!get_fuse_conn(inode)->parallel_dirops)
4010     + bool locked = false;
4011     +
4012     + if (!get_fuse_conn(inode)->parallel_dirops) {
4013     mutex_lock(&get_fuse_inode(inode)->mutex);
4014     + locked = true;
4015     + }
4016     +
4017     + return locked;
4018     }
4019    
4020     -void fuse_unlock_inode(struct inode *inode)
4021     +void fuse_unlock_inode(struct inode *inode, bool locked)
4022     {
4023     - if (!get_fuse_conn(inode)->parallel_dirops)
4024     + if (locked)
4025     mutex_unlock(&get_fuse_inode(inode)->mutex);
4026     }
4027    
4028     @@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb)
4029     {
4030     struct fuse_conn *fc = get_fuse_conn_super(sb);
4031    
4032     - fuse_send_destroy(fc);
4033     -
4034     - fuse_abort_conn(fc);
4035     mutex_lock(&fuse_mutex);
4036     list_del(&fc->entry);
4037     fuse_ctl_remove_conn(fc);
4038     @@ -1190,16 +1193,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
4039     return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
4040     }
4041    
4042     -static void fuse_kill_sb_anon(struct super_block *sb)
4043     +static void fuse_sb_destroy(struct super_block *sb)
4044     {
4045     struct fuse_conn *fc = get_fuse_conn_super(sb);
4046    
4047     if (fc) {
4048     + fuse_send_destroy(fc);
4049     +
4050     + fuse_abort_conn(fc);
4051     + fuse_wait_aborted(fc);
4052     +
4053     down_write(&fc->killsb);
4054     fc->sb = NULL;
4055     up_write(&fc->killsb);
4056     }
4057     +}
4058    
4059     +static void fuse_kill_sb_anon(struct super_block *sb)
4060     +{
4061     + fuse_sb_destroy(sb);
4062     kill_anon_super(sb);
4063     }
4064    
4065     @@ -1222,14 +1234,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
4066    
4067     static void fuse_kill_sb_blk(struct super_block *sb)
4068     {
4069     - struct fuse_conn *fc = get_fuse_conn_super(sb);
4070     -
4071     - if (fc) {
4072     - down_write(&fc->killsb);
4073     - fc->sb = NULL;
4074     - up_write(&fc->killsb);
4075     - }
4076     -
4077     + fuse_sb_destroy(sb);
4078     kill_block_super(sb);
4079     }
4080    
4081     diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
4082     index fcff2e0487fe..f1c1430ae721 100644
4083     --- a/fs/squashfs/file.c
4084     +++ b/fs/squashfs/file.c
4085     @@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
4086     return squashfs_block_size(size);
4087     }
4088    
4089     +void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
4090     +{
4091     + int copied;
4092     + void *pageaddr;
4093     +
4094     + pageaddr = kmap_atomic(page);
4095     + copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
4096     + memset(pageaddr + copied, 0, PAGE_SIZE - copied);
4097     + kunmap_atomic(pageaddr);
4098     +
4099     + flush_dcache_page(page);
4100     + if (copied == avail)
4101     + SetPageUptodate(page);
4102     + else
4103     + SetPageError(page);
4104     +}
4105     +
4106     /* Copy data into page cache */
4107     void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
4108     int bytes, int offset)
4109     {
4110     struct inode *inode = page->mapping->host;
4111     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4112     - void *pageaddr;
4113     int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
4114     int start_index = page->index & ~mask, end_index = start_index | mask;
4115    
4116     @@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
4117     if (PageUptodate(push_page))
4118     goto skip_page;
4119    
4120     - pageaddr = kmap_atomic(push_page);
4121     - squashfs_copy_data(pageaddr, buffer, offset, avail);
4122     - memset(pageaddr + avail, 0, PAGE_SIZE - avail);
4123     - kunmap_atomic(pageaddr);
4124     - flush_dcache_page(push_page);
4125     - SetPageUptodate(push_page);
4126     + squashfs_fill_page(push_page, buffer, offset, avail);
4127     skip_page:
4128     unlock_page(push_page);
4129     if (i != page->index)
4130     @@ -420,10 +431,9 @@ skip_page:
4131     }
4132    
4133     /* Read datablock stored packed inside a fragment (tail-end packed block) */
4134     -static int squashfs_readpage_fragment(struct page *page)
4135     +static int squashfs_readpage_fragment(struct page *page, int expected)
4136     {
4137     struct inode *inode = page->mapping->host;
4138     - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4139     struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
4140     squashfs_i(inode)->fragment_block,
4141     squashfs_i(inode)->fragment_size);
4142     @@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
4143     squashfs_i(inode)->fragment_block,
4144     squashfs_i(inode)->fragment_size);
4145     else
4146     - squashfs_copy_cache(page, buffer, i_size_read(inode) &
4147     - (msblk->block_size - 1),
4148     + squashfs_copy_cache(page, buffer, expected,
4149     squashfs_i(inode)->fragment_offset);
4150    
4151     squashfs_cache_put(buffer);
4152     return res;
4153     }
4154    
4155     -static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
4156     +static int squashfs_readpage_sparse(struct page *page, int expected)
4157     {
4158     - struct inode *inode = page->mapping->host;
4159     - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4160     - int bytes = index == file_end ?
4161     - (i_size_read(inode) & (msblk->block_size - 1)) :
4162     - msblk->block_size;
4163     -
4164     - squashfs_copy_cache(page, NULL, bytes, 0);
4165     + squashfs_copy_cache(page, NULL, expected, 0);
4166     return 0;
4167     }
4168    
4169     @@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
4170     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
4171     int index = page->index >> (msblk->block_log - PAGE_SHIFT);
4172     int file_end = i_size_read(inode) >> msblk->block_log;
4173     + int expected = index == file_end ?
4174     + (i_size_read(inode) & (msblk->block_size - 1)) :
4175     + msblk->block_size;
4176     int res;
4177     void *pageaddr;
4178    
4179     @@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
4180     goto error_out;
4181    
4182     if (bsize == 0)
4183     - res = squashfs_readpage_sparse(page, index, file_end);
4184     + res = squashfs_readpage_sparse(page, expected);
4185     else
4186     - res = squashfs_readpage_block(page, block, bsize);
4187     + res = squashfs_readpage_block(page, block, bsize, expected);
4188     } else
4189     - res = squashfs_readpage_fragment(page);
4190     + res = squashfs_readpage_fragment(page, expected);
4191    
4192     if (!res)
4193     return 0;
4194     diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
4195     index f2310d2a2019..a9ba8d96776a 100644
4196     --- a/fs/squashfs/file_cache.c
4197     +++ b/fs/squashfs/file_cache.c
4198     @@ -20,7 +20,7 @@
4199     #include "squashfs.h"
4200    
4201     /* Read separately compressed datablock and memcopy into page cache */
4202     -int squashfs_readpage_block(struct page *page, u64 block, int bsize)
4203     +int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
4204     {
4205     struct inode *i = page->mapping->host;
4206     struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
4207     @@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
4208     ERROR("Unable to read page, block %llx, size %x\n", block,
4209     bsize);
4210     else
4211     - squashfs_copy_cache(page, buffer, buffer->length, 0);
4212     + squashfs_copy_cache(page, buffer, expected, 0);
4213    
4214     squashfs_cache_put(buffer);
4215     return res;
4216     diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
4217     index cb485d8e0e91..80db1b86a27c 100644
4218     --- a/fs/squashfs/file_direct.c
4219     +++ b/fs/squashfs/file_direct.c
4220     @@ -21,10 +21,11 @@
4221     #include "page_actor.h"
4222    
4223     static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4224     - int pages, struct page **page);
4225     + int pages, struct page **page, int bytes);
4226    
4227     /* Read separately compressed datablock directly into page cache */
4228     -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4229     +int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
4230     + int expected)
4231    
4232     {
4233     struct inode *inode = target_page->mapping->host;
4234     @@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4235     * using an intermediate buffer.
4236     */
4237     res = squashfs_read_cache(target_page, block, bsize, pages,
4238     - page);
4239     + page, expected);
4240     if (res < 0)
4241     goto mark_errored;
4242    
4243     @@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
4244     if (res < 0)
4245     goto mark_errored;
4246    
4247     + if (res != expected) {
4248     + res = -EIO;
4249     + goto mark_errored;
4250     + }
4251     +
4252     /* Last page may have trailing bytes not filled */
4253     bytes = res % PAGE_SIZE;
4254     if (bytes) {
4255     @@ -138,13 +144,12 @@ out:
4256    
4257    
4258     static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4259     - int pages, struct page **page)
4260     + int pages, struct page **page, int bytes)
4261     {
4262     struct inode *i = target_page->mapping->host;
4263     struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
4264     block, bsize);
4265     - int bytes = buffer->length, res = buffer->error, n, offset = 0;
4266     - void *pageaddr;
4267     + int res = buffer->error, n, offset = 0;
4268    
4269     if (res) {
4270     ERROR("Unable to read page, block %llx, size %x\n", block,
4271     @@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
4272     if (page[n] == NULL)
4273     continue;
4274    
4275     - pageaddr = kmap_atomic(page[n]);
4276     - squashfs_copy_data(pageaddr, buffer, offset, avail);
4277     - memset(pageaddr + avail, 0, PAGE_SIZE - avail);
4278     - kunmap_atomic(pageaddr);
4279     - flush_dcache_page(page[n]);
4280     - SetPageUptodate(page[n]);
4281     + squashfs_fill_page(page[n], buffer, offset, avail);
4282     unlock_page(page[n]);
4283     if (page[n] != target_page)
4284     put_page(page[n]);
4285     diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
4286     index 887d6d270080..f89f8a74c6ce 100644
4287     --- a/fs/squashfs/squashfs.h
4288     +++ b/fs/squashfs/squashfs.h
4289     @@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
4290     u64, u64, unsigned int);
4291    
4292     /* file.c */
4293     +void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
4294     void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
4295     int);
4296    
4297     /* file_xxx.c */
4298     -extern int squashfs_readpage_block(struct page *, u64, int);
4299     +extern int squashfs_readpage_block(struct page *, u64, int, int);
4300    
4301     /* id.c */
4302     extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
4303     diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
4304     index 39c75a86c67f..666986b95c5d 100644
4305     --- a/fs/sysfs/file.c
4306     +++ b/fs/sysfs/file.c
4307     @@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
4308     }
4309     EXPORT_SYMBOL_GPL(sysfs_chmod_file);
4310    
4311     +/**
4312     + * sysfs_break_active_protection - break "active" protection
4313     + * @kobj: The kernel object @attr is associated with.
4314     + * @attr: The attribute to break the "active" protection for.
4315     + *
4316     + * With sysfs, just like kernfs, deletion of an attribute is postponed until
4317     + * all active .show() and .store() callbacks have finished unless this function
4318     + * is called. Hence this function is useful in methods that implement self
4319     + * deletion.
4320     + */
4321     +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4322     + const struct attribute *attr)
4323     +{
4324     + struct kernfs_node *kn;
4325     +
4326     + kobject_get(kobj);
4327     + kn = kernfs_find_and_get(kobj->sd, attr->name);
4328     + if (kn)
4329     + kernfs_break_active_protection(kn);
4330     + return kn;
4331     +}
4332     +EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
4333     +
4334     +/**
4335     + * sysfs_unbreak_active_protection - restore "active" protection
4336     + * @kn: Pointer returned by sysfs_break_active_protection().
4337     + *
4338     + * Undo the effects of sysfs_break_active_protection(). Since this function
4339     + * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
4340     + * argument passed to sysfs_break_active_protection() that attribute may have
4341     + * been removed between the sysfs_break_active_protection() and
4342     + * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
4343     + * this function has returned.
4344     + */
4345     +void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4346     +{
4347     + struct kobject *kobj = kn->parent->priv;
4348     +
4349     + kernfs_unbreak_active_protection(kn);
4350     + kernfs_put(kn);
4351     + kobject_put(kobj);
4352     +}
4353     +EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
4354     +
4355     /**
4356     * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
4357     * @kobj: object we're acting for
4358     diff --git a/include/linux/printk.h b/include/linux/printk.h
4359     index 335926039adc..6106befed756 100644
4360     --- a/include/linux/printk.h
4361     +++ b/include/linux/printk.h
4362     @@ -150,9 +150,13 @@ void early_printk(const char *s, ...) { }
4363     #ifdef CONFIG_PRINTK_NMI
4364     extern void printk_nmi_enter(void);
4365     extern void printk_nmi_exit(void);
4366     +extern void printk_nmi_direct_enter(void);
4367     +extern void printk_nmi_direct_exit(void);
4368     #else
4369     static inline void printk_nmi_enter(void) { }
4370     static inline void printk_nmi_exit(void) { }
4371     +static inline void printk_nmi_direct_enter(void) { }
4372     +static inline void printk_nmi_direct_exit(void) { }
4373     #endif /* PRINTK_NMI */
4374    
4375     #ifdef CONFIG_PRINTK
4376     diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
4377     index 1b92a28dd672..6fd615a0eea9 100644
4378     --- a/include/linux/rtmutex.h
4379     +++ b/include/linux/rtmutex.h
4380     @@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
4381     extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
4382     extern void rt_mutex_destroy(struct rt_mutex *lock);
4383    
4384     +#ifdef CONFIG_DEBUG_LOCK_ALLOC
4385     +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
4386     +#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
4387     +#else
4388     extern void rt_mutex_lock(struct rt_mutex *lock);
4389     +#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
4390     +#endif
4391     +
4392     extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
4393     extern int rt_mutex_timed_lock(struct rt_mutex *lock,
4394     struct hrtimer_sleeper *timeout);
4395     diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
4396     index 40839c02d28c..cca19bb200bd 100644
4397     --- a/include/linux/sysfs.h
4398     +++ b/include/linux/sysfs.h
4399     @@ -239,6 +239,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
4400     const struct attribute **attr);
4401     int __must_check sysfs_chmod_file(struct kobject *kobj,
4402     const struct attribute *attr, umode_t mode);
4403     +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
4404     + const struct attribute *attr);
4405     +void sysfs_unbreak_active_protection(struct kernfs_node *kn);
4406     void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
4407     const void *ns);
4408     bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
4409     @@ -352,6 +355,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
4410     return 0;
4411     }
4412    
4413     +static inline struct kernfs_node *
4414     +sysfs_break_active_protection(struct kobject *kobj,
4415     + const struct attribute *attr)
4416     +{
4417     + return NULL;
4418     +}
4419     +
4420     +static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
4421     +{
4422     +}
4423     +
4424     static inline void sysfs_remove_file_ns(struct kobject *kobj,
4425     const struct attribute *attr,
4426     const void *ns)
4427     diff --git a/ipc/sem.c b/ipc/sem.c
4428     index b2698ebdcb31..d6dd2dc9ddad 100644
4429     --- a/ipc/sem.c
4430     +++ b/ipc/sem.c
4431     @@ -2041,7 +2041,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
4432     }
4433    
4434     do {
4435     - queue.status = -EINTR;
4436     + WRITE_ONCE(queue.status, -EINTR);
4437     queue.sleeper = current;
4438    
4439     __set_current_state(TASK_INTERRUPTIBLE);
4440     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4441     index a66e838640ea..5c90765d37e7 100644
4442     --- a/kernel/kprobes.c
4443     +++ b/kernel/kprobes.c
4444     @@ -2531,7 +2531,7 @@ static int __init debugfs_kprobe_init(void)
4445     if (!dir)
4446     return -ENOMEM;
4447    
4448     - file = debugfs_create_file("list", 0444, dir, NULL,
4449     + file = debugfs_create_file("list", 0400, dir, NULL,
4450     &debugfs_kprobes_operations);
4451     if (!file)
4452     goto error;
4453     @@ -2541,7 +2541,7 @@ static int __init debugfs_kprobe_init(void)
4454     if (!file)
4455     goto error;
4456    
4457     - file = debugfs_create_file("blacklist", 0444, dir, NULL,
4458     + file = debugfs_create_file("blacklist", 0400, dir, NULL,
4459     &debugfs_kprobe_blacklist_ops);
4460     if (!file)
4461     goto error;
4462     diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
4463     index 65cc0cb984e6..4ad35718f123 100644
4464     --- a/kernel/locking/rtmutex.c
4465     +++ b/kernel/locking/rtmutex.c
4466     @@ -1466,6 +1466,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
4467     rt_mutex_postunlock(&wake_q);
4468     }
4469    
4470     +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
4471     +{
4472     + might_sleep();
4473     +
4474     + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
4475     + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
4476     +}
4477     +
4478     +#ifdef CONFIG_DEBUG_LOCK_ALLOC
4479     +/**
4480     + * rt_mutex_lock_nested - lock a rt_mutex
4481     + *
4482     + * @lock: the rt_mutex to be locked
4483     + * @subclass: the lockdep subclass
4484     + */
4485     +void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
4486     +{
4487     + __rt_mutex_lock(lock, subclass);
4488     +}
4489     +EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
4490     +#endif
4491     +
4492     +#ifndef CONFIG_DEBUG_LOCK_ALLOC
4493     /**
4494     * rt_mutex_lock - lock a rt_mutex
4495     *
4496     @@ -1473,12 +1496,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
4497     */
4498     void __sched rt_mutex_lock(struct rt_mutex *lock)
4499     {
4500     - might_sleep();
4501     -
4502     - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
4503     - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
4504     + __rt_mutex_lock(lock, 0);
4505     }
4506     EXPORT_SYMBOL_GPL(rt_mutex_lock);
4507     +#endif
4508    
4509     /**
4510     * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
4511     diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
4512     index 2a7d04049af4..0f1898820cba 100644
4513     --- a/kernel/printk/internal.h
4514     +++ b/kernel/printk/internal.h
4515     @@ -19,11 +19,16 @@
4516     #ifdef CONFIG_PRINTK
4517    
4518     #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff
4519     -#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
4520     +#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x40000000
4521     #define PRINTK_NMI_CONTEXT_MASK 0x80000000
4522    
4523     extern raw_spinlock_t logbuf_lock;
4524    
4525     +__printf(5, 0)
4526     +int vprintk_store(int facility, int level,
4527     + const char *dict, size_t dictlen,
4528     + const char *fmt, va_list args);
4529     +
4530     __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
4531     __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
4532     __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
4533     @@ -54,6 +59,8 @@ void __printk_safe_exit(void);
4534     local_irq_enable(); \
4535     } while (0)
4536    
4537     +void defer_console_output(void);
4538     +
4539     #else
4540    
4541     __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
4542     diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4543     index 512f7c2baedd..f0223a7d9ed1 100644
4544     --- a/kernel/printk/printk.c
4545     +++ b/kernel/printk/printk.c
4546     @@ -1680,28 +1680,16 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
4547     return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
4548     }
4549    
4550     -asmlinkage int vprintk_emit(int facility, int level,
4551     - const char *dict, size_t dictlen,
4552     - const char *fmt, va_list args)
4553     +/* Must be called under logbuf_lock. */
4554     +int vprintk_store(int facility, int level,
4555     + const char *dict, size_t dictlen,
4556     + const char *fmt, va_list args)
4557     {
4558     static char textbuf[LOG_LINE_MAX];
4559     char *text = textbuf;
4560     size_t text_len;
4561     enum log_flags lflags = 0;
4562     - unsigned long flags;
4563     - int printed_len;
4564     - bool in_sched = false;
4565     -
4566     - if (level == LOGLEVEL_SCHED) {
4567     - level = LOGLEVEL_DEFAULT;
4568     - in_sched = true;
4569     - }
4570     -
4571     - boot_delay_msec(level);
4572     - printk_delay();
4573    
4574     - /* This stops the holder of console_sem just where we want him */
4575     - logbuf_lock_irqsave(flags);
4576     /*
4577     * The printf needs to come first; we need the syslog
4578     * prefix which might be passed-in as a parameter.
4579     @@ -1742,8 +1730,29 @@ asmlinkage int vprintk_emit(int facility, int level,
4580     if (dict)
4581     lflags |= LOG_PREFIX|LOG_NEWLINE;
4582    
4583     - printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len);
4584     + return log_output(facility, level, lflags,
4585     + dict, dictlen, text, text_len);
4586     +}
4587    
4588     +asmlinkage int vprintk_emit(int facility, int level,
4589     + const char *dict, size_t dictlen,
4590     + const char *fmt, va_list args)
4591     +{
4592     + int printed_len;
4593     + bool in_sched = false;
4594     + unsigned long flags;
4595     +
4596     + if (level == LOGLEVEL_SCHED) {
4597     + level = LOGLEVEL_DEFAULT;
4598     + in_sched = true;
4599     + }
4600     +
4601     + boot_delay_msec(level);
4602     + printk_delay();
4603     +
4604     + /* This stops the holder of console_sem just where we want him */
4605     + logbuf_lock_irqsave(flags);
4606     + printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
4607     logbuf_unlock_irqrestore(flags);
4608    
4609     /* If called from the scheduler, we can not call up(). */
4610     @@ -2714,16 +2723,20 @@ void wake_up_klogd(void)
4611     preempt_enable();
4612     }
4613    
4614     -int vprintk_deferred(const char *fmt, va_list args)
4615     +void defer_console_output(void)
4616     {
4617     - int r;
4618     -
4619     - r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4620     -
4621     preempt_disable();
4622     __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
4623     irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
4624     preempt_enable();
4625     +}
4626     +
4627     +int vprintk_deferred(const char *fmt, va_list args)
4628     +{
4629     + int r;
4630     +
4631     + r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
4632     + defer_console_output();
4633    
4634     return r;
4635     }
4636     diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
4637     index 64825b2df3a5..d482fd61ac67 100644
4638     --- a/kernel/printk/printk_safe.c
4639     +++ b/kernel/printk/printk_safe.c
4640     @@ -311,24 +311,33 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
4641    
4642     void printk_nmi_enter(void)
4643     {
4644     - /*
4645     - * The size of the extra per-CPU buffer is limited. Use it only when
4646     - * the main one is locked. If this CPU is not in the safe context,
4647     - * the lock must be taken on another CPU and we could wait for it.
4648     - */
4649     - if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
4650     - raw_spin_is_locked(&logbuf_lock)) {
4651     - this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4652     - } else {
4653     - this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
4654     - }
4655     + this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
4656     }
4657    
4658     void printk_nmi_exit(void)
4659     {
4660     - this_cpu_and(printk_context,
4661     - ~(PRINTK_NMI_CONTEXT_MASK |
4662     - PRINTK_NMI_DEFERRED_CONTEXT_MASK));
4663     + this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
4664     +}
4665     +
4666     +/*
4667     + * Marks a code that might produce many messages in NMI context
4668     + * and the risk of losing them is more critical than eventual
4669     + * reordering.
4670     + *
4671     + * It has effect only when called in NMI context. Then printk()
4672     + * will try to store the messages into the main logbuf directly
4673     + * and use the per-CPU buffers only as a fallback when the lock
4674     + * is not available.
4675     + */
4676     +void printk_nmi_direct_enter(void)
4677     +{
4678     + if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4679     + this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
4680     +}
4681     +
4682     +void printk_nmi_direct_exit(void)
4683     +{
4684     + this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
4685     }
4686    
4687     #else
4688     @@ -366,6 +375,20 @@ void __printk_safe_exit(void)
4689    
4690     __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4691     {
4692     + /*
4693     + * Try to use the main logbuf even in NMI. But avoid calling console
4694     + * drivers that might have their own locks.
4695     + */
4696     + if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
4697     + raw_spin_trylock(&logbuf_lock)) {
4698     + int len;
4699     +
4700     + len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
4701     + raw_spin_unlock(&logbuf_lock);
4702     + defer_console_output();
4703     + return len;
4704     + }
4705     +
4706     /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
4707     if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
4708     return vprintk_nmi(fmt, args);
4709     @@ -374,13 +397,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
4710     if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
4711     return vprintk_safe(fmt, args);
4712    
4713     - /*
4714     - * Use the main logbuf when logbuf_lock is available in NMI.
4715     - * But avoid calling console drivers that might have their own locks.
4716     - */
4717     - if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
4718     - return vprintk_deferred(fmt, args);
4719     -
4720     /* No obstacles. */
4721     return vprintk_default(fmt, args);
4722     }
4723     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
4724     index bba2217652ff..cb9a5b8532fa 100644
4725     --- a/kernel/sched/rt.c
4726     +++ b/kernel/sched/rt.c
4727     @@ -837,6 +837,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
4728     * can be time-consuming. Try to avoid it when possible.
4729     */
4730     raw_spin_lock(&rt_rq->rt_runtime_lock);
4731     + if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
4732     + rt_rq->rt_runtime = rt_b->rt_runtime;
4733     skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
4734     raw_spin_unlock(&rt_rq->rt_runtime_lock);
4735     if (skip)
4736     diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
4737     index e190d1ef3a23..067cb83f37ea 100644
4738     --- a/kernel/stop_machine.c
4739     +++ b/kernel/stop_machine.c
4740     @@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4741     unsigned long flags;
4742     bool enabled;
4743    
4744     + preempt_disable();
4745     raw_spin_lock_irqsave(&stopper->lock, flags);
4746     enabled = stopper->enabled;
4747     if (enabled)
4748     @@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
4749     raw_spin_unlock_irqrestore(&stopper->lock, flags);
4750    
4751     wake_up_q(&wakeq);
4752     + preempt_enable();
4753    
4754     return enabled;
4755     }
4756     @@ -236,13 +238,24 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
4757     struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
4758     DEFINE_WAKE_Q(wakeq);
4759     int err;
4760     +
4761     retry:
4762     + /*
4763     + * The waking up of stopper threads has to happen in the same
4764     + * scheduling context as the queueing. Otherwise, there is a
4765     + * possibility of one of the above stoppers being woken up by another
4766     + * CPU, and preempting us. This will cause us to not wake up the other
4767     + * stopper forever.
4768     + */
4769     + preempt_disable();
4770     raw_spin_lock_irq(&stopper1->lock);
4771     raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
4772    
4773     - err = -ENOENT;
4774     - if (!stopper1->enabled || !stopper2->enabled)
4775     + if (!stopper1->enabled || !stopper2->enabled) {
4776     + err = -ENOENT;
4777     goto unlock;
4778     + }
4779     +
4780     /*
4781     * Ensure that if we race with __stop_cpus() the stoppers won't get
4782     * queued up in reverse order leading to system deadlock.
4783     @@ -253,36 +266,30 @@ retry:
4784     * It can be falsely true but it is safe to spin until it is cleared,
4785     * queue_stop_cpus_work() does everything under preempt_disable().
4786     */
4787     - err = -EDEADLK;
4788     - if (unlikely(stop_cpus_in_progress))
4789     - goto unlock;
4790     + if (unlikely(stop_cpus_in_progress)) {
4791     + err = -EDEADLK;
4792     + goto unlock;
4793     + }
4794    
4795     err = 0;
4796     __cpu_stop_queue_work(stopper1, work1, &wakeq);
4797     __cpu_stop_queue_work(stopper2, work2, &wakeq);
4798     - /*
4799     - * The waking up of stopper threads has to happen
4800     - * in the same scheduling context as the queueing.
4801     - * Otherwise, there is a possibility of one of the
4802     - * above stoppers being woken up by another CPU,
4803     - * and preempting us. This will cause us to n ot
4804     - * wake up the other stopper forever.
4805     - */
4806     - preempt_disable();
4807     +
4808     unlock:
4809     raw_spin_unlock(&stopper2->lock);
4810     raw_spin_unlock_irq(&stopper1->lock);
4811    
4812     if (unlikely(err == -EDEADLK)) {
4813     + preempt_enable();
4814     +
4815     while (stop_cpus_in_progress)
4816     cpu_relax();
4817     +
4818     goto retry;
4819     }
4820    
4821     - if (!err) {
4822     - wake_up_q(&wakeq);
4823     - preempt_enable();
4824     - }
4825     + wake_up_q(&wakeq);
4826     + preempt_enable();
4827    
4828     return err;
4829     }
4830     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4831     index fbc75c84076e..b7302c37c064 100644
4832     --- a/kernel/trace/trace.c
4833     +++ b/kernel/trace/trace.c
4834     @@ -8187,6 +8187,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4835     tracing_off();
4836    
4837     local_irq_save(flags);
4838     + printk_nmi_direct_enter();
4839    
4840     /* Simulate the iterator */
4841     trace_init_global_iter(&iter);
4842     @@ -8266,7 +8267,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4843     for_each_tracing_cpu(cpu) {
4844     atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
4845     }
4846     - atomic_dec(&dump_running);
4847     + atomic_dec(&dump_running);
4848     + printk_nmi_direct_exit();
4849     local_irq_restore(flags);
4850     }
4851     EXPORT_SYMBOL_GPL(ftrace_dump);
4852     diff --git a/kernel/watchdog.c b/kernel/watchdog.c
4853     index c8e06703e44c..087994b23f8b 100644
4854     --- a/kernel/watchdog.c
4855     +++ b/kernel/watchdog.c
4856     @@ -265,7 +265,7 @@ static void __touch_watchdog(void)
4857     * entering idle state. This should only be used for scheduler events.
4858     * Use touch_softlockup_watchdog() for everything else.
4859     */
4860     -void touch_softlockup_watchdog_sched(void)
4861     +notrace void touch_softlockup_watchdog_sched(void)
4862     {
4863     /*
4864     * Preemption can be enabled. It doesn't matter which CPU's timestamp
4865     @@ -274,7 +274,7 @@ void touch_softlockup_watchdog_sched(void)
4866     raw_cpu_write(watchdog_touch_ts, 0);
4867     }
4868    
4869     -void touch_softlockup_watchdog(void)
4870     +notrace void touch_softlockup_watchdog(void)
4871     {
4872     touch_softlockup_watchdog_sched();
4873     wq_watchdog_touch(raw_smp_processor_id());
4874     diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
4875     index e449a23e9d59..4ece6028007a 100644
4876     --- a/kernel/watchdog_hld.c
4877     +++ b/kernel/watchdog_hld.c
4878     @@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
4879     static unsigned long hardlockup_allcpu_dumped;
4880     static atomic_t watchdog_cpus = ATOMIC_INIT(0);
4881    
4882     -void arch_touch_nmi_watchdog(void)
4883     +notrace void arch_touch_nmi_watchdog(void)
4884     {
4885     /*
4886     * Using __raw here because some code paths have
4887     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4888     index d8a7f8939c81..08bc551976b2 100644
4889     --- a/kernel/workqueue.c
4890     +++ b/kernel/workqueue.c
4891     @@ -5484,7 +5484,7 @@ static void wq_watchdog_timer_fn(unsigned long data)
4892     mod_timer(&wq_watchdog_timer, jiffies + thresh);
4893     }
4894    
4895     -void wq_watchdog_touch(int cpu)
4896     +notrace void wq_watchdog_touch(int cpu)
4897     {
4898     if (cpu >= 0)
4899     per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
4900     diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
4901     index 46e4c749e4eb..70b1f9d830cd 100644
4902     --- a/lib/nmi_backtrace.c
4903     +++ b/lib/nmi_backtrace.c
4904     @@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
4905    
4906     bool nmi_cpu_backtrace(struct pt_regs *regs)
4907     {
4908     - static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
4909     int cpu = smp_processor_id();
4910    
4911     if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
4912     - arch_spin_lock(&lock);
4913     if (regs && cpu_in_idle(instruction_pointer(regs))) {
4914     pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
4915     cpu, instruction_pointer(regs));
4916     @@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
4917     else
4918     dump_stack();
4919     }
4920     - arch_spin_unlock(&lock);
4921     cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
4922     return true;
4923     }
4924     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4925     index db69d938e9ed..6a9a7e1066ef 100644
4926     --- a/mm/memcontrol.c
4927     +++ b/mm/memcontrol.c
4928     @@ -4110,6 +4110,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
4929    
4930     static DEFINE_IDR(mem_cgroup_idr);
4931    
4932     +static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4933     +{
4934     + if (memcg->id.id > 0) {
4935     + idr_remove(&mem_cgroup_idr, memcg->id.id);
4936     + memcg->id.id = 0;
4937     + }
4938     +}
4939     +
4940     static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4941     {
4942     VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4943     @@ -4120,8 +4128,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4944     {
4945     VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4946     if (atomic_sub_and_test(n, &memcg->id.ref)) {
4947     - idr_remove(&mem_cgroup_idr, memcg->id.id);
4948     - memcg->id.id = 0;
4949     + mem_cgroup_id_remove(memcg);
4950    
4951     /* Memcg ID pins CSS */
4952     css_put(&memcg->css);
4953     @@ -4258,8 +4265,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4954     idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4955     return memcg;
4956     fail:
4957     - if (memcg->id.id > 0)
4958     - idr_remove(&mem_cgroup_idr, memcg->id.id);
4959     + mem_cgroup_id_remove(memcg);
4960     __mem_cgroup_free(memcg);
4961     return NULL;
4962     }
4963     @@ -4318,6 +4324,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4964    
4965     return &memcg->css;
4966     fail:
4967     + mem_cgroup_id_remove(memcg);
4968     mem_cgroup_free(memcg);
4969     return ERR_PTR(-ENOMEM);
4970     }
4971     diff --git a/mm/memory.c b/mm/memory.c
4972     index 5539b1975091..c9657f013a4d 100644
4973     --- a/mm/memory.c
4974     +++ b/mm/memory.c
4975     @@ -246,9 +246,6 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
4976    
4977     tlb_flush(tlb);
4978     mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
4979     -#ifdef CONFIG_HAVE_RCU_TABLE_FREE
4980     - tlb_table_flush(tlb);
4981     -#endif
4982     __tlb_reset_range(tlb);
4983     }
4984    
4985     @@ -256,6 +253,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
4986     {
4987     struct mmu_gather_batch *batch;
4988    
4989     +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
4990     + tlb_table_flush(tlb);
4991     +#endif
4992     for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
4993     free_pages_and_swap_cache(batch->pages, batch->nr);
4994     batch->nr = 0;
4995     @@ -331,6 +331,21 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
4996     * See the comment near struct mmu_table_batch.
4997     */
4998    
4999     +/*
5000     + * If we want tlb_remove_table() to imply TLB invalidates.
5001     + */
5002     +static inline void tlb_table_invalidate(struct mmu_gather *tlb)
5003     +{
5004     +#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
5005     + /*
5006     + * Invalidate page-table caches used by hardware walkers. Then we still
5007     + * need to RCU-sched wait while freeing the pages because software
5008     + * walkers can still be in-flight.
5009     + */
5010     + tlb_flush_mmu_tlbonly(tlb);
5011     +#endif
5012     +}
5013     +
5014     static void tlb_remove_table_smp_sync(void *arg)
5015     {
5016     /* Simply deliver the interrupt */
5017     @@ -367,6 +382,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
5018     struct mmu_table_batch **batch = &tlb->batch;
5019    
5020     if (*batch) {
5021     + tlb_table_invalidate(tlb);
5022     call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
5023     *batch = NULL;
5024     }
5025     @@ -388,11 +404,13 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
5026     if (*batch == NULL) {
5027     *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
5028     if (*batch == NULL) {
5029     + tlb_table_invalidate(tlb);
5030     tlb_remove_table_one(table);
5031     return;
5032     }
5033     (*batch)->nr = 0;
5034     }
5035     +
5036     (*batch)->tables[(*batch)->nr++] = table;
5037     if ((*batch)->nr == MAX_TABLE_BATCH)
5038     tlb_table_flush(tlb);
5039     @@ -1417,11 +1435,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
5040     do {
5041     next = pmd_addr_end(addr, end);
5042     if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
5043     - if (next - addr != HPAGE_PMD_SIZE) {
5044     - VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
5045     - !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
5046     + if (next - addr != HPAGE_PMD_SIZE)
5047     __split_huge_pmd(vma, pmd, addr, false, NULL);
5048     - } else if (zap_huge_pmd(tlb, vma, pmd, addr))
5049     + else if (zap_huge_pmd(tlb, vma, pmd, addr))
5050     goto next;
5051     /* fall through */
5052     }
5053     @@ -4350,6 +4366,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5054     return -EINVAL;
5055    
5056     maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5057     + if (!maddr)
5058     + return -ENOMEM;
5059     +
5060     if (write)
5061     memcpy_toio(maddr + offset, buf, len);
5062     else
5063     diff --git a/mm/zswap.c b/mm/zswap.c
5064     index 597008a44f70..ebb0bc88c5f7 100644
5065     --- a/mm/zswap.c
5066     +++ b/mm/zswap.c
5067     @@ -989,6 +989,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
5068     ret = -ENOMEM;
5069     goto reject;
5070     }
5071     +
5072     + /* A second zswap_is_full() check after
5073     + * zswap_shrink() to make sure it's now
5074     + * under the max_pool_percent
5075     + */
5076     + if (zswap_is_full()) {
5077     + ret = -ENOMEM;
5078     + goto reject;
5079     + }
5080     }
5081    
5082     /* allocate entry */
5083     diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
5084     index 2d38b6e34203..98b62a7990aa 100644
5085     --- a/net/caif/caif_dev.c
5086     +++ b/net/caif/caif_dev.c
5087     @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
5088     caifd = caif_get(skb->dev);
5089    
5090     WARN_ON(caifd == NULL);
5091     - if (caifd == NULL)
5092     + if (!caifd) {
5093     + rcu_read_unlock();
5094     return;
5095     + }
5096    
5097     caifd_hold(caifd);
5098     rcu_read_unlock();
5099     diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
5100     index 1307731ddfe4..832d69649cb6 100644
5101     --- a/net/core/lwt_bpf.c
5102     +++ b/net/core/lwt_bpf.c
5103     @@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
5104     if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
5105     return -EINVAL;
5106    
5107     - prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL);
5108     + prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
5109     if (!prog->name)
5110     return -ENOMEM;
5111    
5112     diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
5113     index 89910e2c10f4..f112fef79216 100644
5114     --- a/net/ipv6/esp6.c
5115     +++ b/net/ipv6/esp6.c
5116     @@ -651,8 +651,10 @@ skip_cow:
5117    
5118     sg_init_table(sg, nfrags);
5119     ret = skb_to_sgvec(skb, sg, 0, skb->len);
5120     - if (unlikely(ret < 0))
5121     + if (unlikely(ret < 0)) {
5122     + kfree(tmp);
5123     goto out;
5124     + }
5125    
5126     skb->ip_summed = CHECKSUM_NONE;
5127    
5128     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5129     index 0e0ab90a4334..b9e638cc955f 100644
5130     --- a/net/ipv6/ip6_vti.c
5131     +++ b/net/ipv6/ip6_vti.c
5132     @@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5133     goto tx_err_dst_release;
5134     }
5135    
5136     - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
5137     - skb_dst_set(skb, dst);
5138     - skb->dev = skb_dst(skb)->dev;
5139     -
5140     mtu = dst_mtu(dst);
5141     if (!skb->ignore_df && skb->len > mtu) {
5142     skb_dst_update_pmtu(skb, mtu);
5143     @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5144     htonl(mtu));
5145     }
5146    
5147     - return -EMSGSIZE;
5148     + err = -EMSGSIZE;
5149     + goto tx_err_dst_release;
5150     }
5151    
5152     + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
5153     + skb_dst_set(skb, dst);
5154     + skb->dev = skb_dst(skb)->dev;
5155     +
5156     err = dst_output(t->net, skb->sk, skb);
5157     if (net_xmit_eval(err) == 0) {
5158     struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
5159     diff --git a/net/mac80211/util.c b/net/mac80211/util.c
5160     index 6aef6793d052..81f120466c38 100644
5161     --- a/net/mac80211/util.c
5162     +++ b/net/mac80211/util.c
5163     @@ -2068,7 +2068,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
5164     if (!sta->uploaded)
5165     continue;
5166    
5167     - if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
5168     + if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
5169     + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
5170     continue;
5171    
5172     for (state = IEEE80211_STA_NOTEXIST;
5173     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5174     index 9a945024a0b6..742aacb317e5 100644
5175     --- a/net/netfilter/nf_tables_api.c
5176     +++ b/net/netfilter/nf_tables_api.c
5177     @@ -1480,7 +1480,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5178     struct nft_base_chain *basechain;
5179     struct nft_stats *stats = NULL;
5180     struct nft_chain_hook hook;
5181     - const struct nlattr *name;
5182     struct nf_hook_ops *ops;
5183     struct nft_trans *trans;
5184     int err, i;
5185     @@ -1531,12 +1530,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5186     return PTR_ERR(stats);
5187     }
5188    
5189     + err = -ENOMEM;
5190     trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
5191     sizeof(struct nft_trans_chain));
5192     - if (trans == NULL) {
5193     - free_percpu(stats);
5194     - return -ENOMEM;
5195     - }
5196     + if (trans == NULL)
5197     + goto err;
5198    
5199     nft_trans_chain_stats(trans) = stats;
5200     nft_trans_chain_update(trans) = true;
5201     @@ -1546,19 +1544,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
5202     else
5203     nft_trans_chain_policy(trans) = -1;
5204    
5205     - name = nla[NFTA_CHAIN_NAME];
5206     - if (nla[NFTA_CHAIN_HANDLE] && name) {
5207     - nft_trans_chain_name(trans) =
5208     - nla_strdup(name, GFP_KERNEL);
5209     - if (!nft_trans_chain_name(trans)) {
5210     - kfree(trans);
5211     - free_percpu(stats);
5212     - return -ENOMEM;
5213     + if (nla[NFTA_CHAIN_HANDLE] &&
5214     + nla[NFTA_CHAIN_NAME]) {
5215     + struct nft_trans *tmp;
5216     + char *name;
5217     +
5218     + err = -ENOMEM;
5219     + name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
5220     + if (!name)
5221     + goto err;
5222     +
5223     + err = -EEXIST;
5224     + list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
5225     + if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
5226     + tmp->ctx.table == table &&
5227     + nft_trans_chain_update(tmp) &&
5228     + nft_trans_chain_name(tmp) &&
5229     + strcmp(name, nft_trans_chain_name(tmp)) == 0) {
5230     + kfree(name);
5231     + goto err;
5232     + }
5233     }
5234     +
5235     + nft_trans_chain_name(trans) = name;
5236     }
5237     list_add_tail(&trans->list, &ctx->net->nft.commit_list);
5238    
5239     return 0;
5240     +err:
5241     + free_percpu(stats);
5242     + kfree(trans);
5243     + return err;
5244     }
5245    
5246     static int nf_tables_newchain(struct net *net, struct sock *nlsk,
5247     @@ -5043,6 +5059,9 @@ static void nf_tables_commit_release(struct nft_trans *trans)
5248     case NFT_MSG_DELTABLE:
5249     nf_tables_table_destroy(&trans->ctx);
5250     break;
5251     + case NFT_MSG_NEWCHAIN:
5252     + kfree(nft_trans_chain_name(trans));
5253     + break;
5254     case NFT_MSG_DELCHAIN:
5255     nf_tables_chain_destroy(trans->ctx.chain);
5256     break;
5257     @@ -5100,13 +5119,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5258     nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
5259     break;
5260     case NFT_MSG_NEWCHAIN:
5261     - if (nft_trans_chain_update(trans))
5262     + if (nft_trans_chain_update(trans)) {
5263     nft_chain_commit_update(trans);
5264     - else
5265     + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5266     + /* trans destroyed after rcu grace period */
5267     + } else {
5268     nft_clear(net, trans->ctx.chain);
5269     -
5270     - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5271     - nft_trans_destroy(trans);
5272     + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
5273     + nft_trans_destroy(trans);
5274     + }
5275     break;
5276     case NFT_MSG_DELCHAIN:
5277     list_del_rcu(&trans->ctx.chain->list);
5278     @@ -5246,7 +5267,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
5279     case NFT_MSG_NEWCHAIN:
5280     if (nft_trans_chain_update(trans)) {
5281     free_percpu(nft_trans_chain_stats(trans));
5282     -
5283     + kfree(nft_trans_chain_name(trans));
5284     nft_trans_destroy(trans);
5285     } else {
5286     trans->ctx.table->use--;
5287     diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
5288     index 9c0d5a7ce5f9..33aa2ac3a62e 100644
5289     --- a/net/netfilter/nft_set_hash.c
5290     +++ b/net/netfilter/nft_set_hash.c
5291     @@ -359,6 +359,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
5292     struct nft_rhash *priv = nft_set_priv(set);
5293    
5294     cancel_delayed_work_sync(&priv->gc_work);
5295     + rcu_barrier();
5296     rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
5297     (void *)set);
5298     }
5299     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5300     index 4cd351b74e48..753f3e73c498 100644
5301     --- a/net/wireless/nl80211.c
5302     +++ b/net/wireless/nl80211.c
5303     @@ -4186,6 +4186,7 @@ static int parse_station_flags(struct genl_info *info,
5304     params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
5305     BIT(NL80211_STA_FLAG_MFP) |
5306     BIT(NL80211_STA_FLAG_AUTHORIZED);
5307     + break;
5308     default:
5309     return -EINVAL;
5310     }
5311     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5312     index 9c57d6a5816c..a6c0027cadb5 100644
5313     --- a/net/xfrm/xfrm_policy.c
5314     +++ b/net/xfrm/xfrm_policy.c
5315     @@ -2285,6 +2285,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
5316     if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
5317     return make_blackhole(net, dst_orig->ops->family, dst_orig);
5318    
5319     + if (IS_ERR(dst))
5320     + dst_release(dst_orig);
5321     +
5322     return dst;
5323     }
5324     EXPORT_SYMBOL(xfrm_lookup_route);
5325     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
5326     index dde40f995ac0..5554d28a32eb 100644
5327     --- a/net/xfrm/xfrm_user.c
5328     +++ b/net/xfrm/xfrm_user.c
5329     @@ -1021,10 +1021,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
5330     {
5331     struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
5332    
5333     - if (nlsk)
5334     - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
5335     - else
5336     - return -1;
5337     + if (!nlsk) {
5338     + kfree_skb(skb);
5339     + return -EPIPE;
5340     + }
5341     +
5342     + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
5343     }
5344    
5345     static inline size_t xfrm_spdinfo_msgsize(void)
5346     diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
5347     index ffd1dfaa1cc1..f46750053377 100644
5348     --- a/scripts/gcc-plugins/gcc-common.h
5349     +++ b/scripts/gcc-plugins/gcc-common.h
5350     @@ -97,6 +97,10 @@
5351     #include "predict.h"
5352     #include "ipa-utils.h"
5353    
5354     +#if BUILDING_GCC_VERSION >= 8000
5355     +#include "stringpool.h"
5356     +#endif
5357     +
5358     #if BUILDING_GCC_VERSION >= 4009
5359     #include "attribs.h"
5360     #include "varasm.h"
5361     diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
5362     index 65264960910d..cbe1d6c4b1a5 100644
5363     --- a/scripts/gcc-plugins/latent_entropy_plugin.c
5364     +++ b/scripts/gcc-plugins/latent_entropy_plugin.c
5365     @@ -255,21 +255,14 @@ static tree handle_latent_entropy_attribute(tree *node, tree name,
5366     return NULL_TREE;
5367     }
5368    
5369     -static struct attribute_spec latent_entropy_attr = {
5370     - .name = "latent_entropy",
5371     - .min_length = 0,
5372     - .max_length = 0,
5373     - .decl_required = true,
5374     - .type_required = false,
5375     - .function_type_required = false,
5376     - .handler = handle_latent_entropy_attribute,
5377     -#if BUILDING_GCC_VERSION >= 4007
5378     - .affects_type_identity = false
5379     -#endif
5380     -};
5381     +static struct attribute_spec latent_entropy_attr = { };
5382    
5383     static void register_attributes(void *event_data __unused, void *data __unused)
5384     {
5385     + latent_entropy_attr.name = "latent_entropy";
5386     + latent_entropy_attr.decl_required = true;
5387     + latent_entropy_attr.handler = handle_latent_entropy_attribute;
5388     +
5389     register_attribute(&latent_entropy_attr);
5390     }
5391    
5392     diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
5393     index 0073af326449..c4a345c3715b 100644
5394     --- a/scripts/gcc-plugins/randomize_layout_plugin.c
5395     +++ b/scripts/gcc-plugins/randomize_layout_plugin.c
5396     @@ -580,68 +580,35 @@ static void finish_type(void *event_data, void *data)
5397     return;
5398     }
5399    
5400     -static struct attribute_spec randomize_layout_attr = {
5401     - .name = "randomize_layout",
5402     - // related to args
5403     - .min_length = 0,
5404     - .max_length = 0,
5405     - .decl_required = false,
5406     - // need type declaration
5407     - .type_required = true,
5408     - .function_type_required = false,
5409     - .handler = handle_randomize_layout_attr,
5410     -#if BUILDING_GCC_VERSION >= 4007
5411     - .affects_type_identity = true
5412     -#endif
5413     -};
5414     +static struct attribute_spec randomize_layout_attr = { };
5415     +static struct attribute_spec no_randomize_layout_attr = { };
5416     +static struct attribute_spec randomize_considered_attr = { };
5417     +static struct attribute_spec randomize_performed_attr = { };
5418    
5419     -static struct attribute_spec no_randomize_layout_attr = {
5420     - .name = "no_randomize_layout",
5421     - // related to args
5422     - .min_length = 0,
5423     - .max_length = 0,
5424     - .decl_required = false,
5425     - // need type declaration
5426     - .type_required = true,
5427     - .function_type_required = false,
5428     - .handler = handle_randomize_layout_attr,
5429     +static void register_attributes(void *event_data, void *data)
5430     +{
5431     + randomize_layout_attr.name = "randomize_layout";
5432     + randomize_layout_attr.type_required = true;
5433     + randomize_layout_attr.handler = handle_randomize_layout_attr;
5434     #if BUILDING_GCC_VERSION >= 4007
5435     - .affects_type_identity = true
5436     + randomize_layout_attr.affects_type_identity = true;
5437     #endif
5438     -};
5439    
5440     -static struct attribute_spec randomize_considered_attr = {
5441     - .name = "randomize_considered",
5442     - // related to args
5443     - .min_length = 0,
5444     - .max_length = 0,
5445     - .decl_required = false,
5446     - // need type declaration
5447     - .type_required = true,
5448     - .function_type_required = false,
5449     - .handler = handle_randomize_considered_attr,
5450     + no_randomize_layout_attr.name = "no_randomize_layout";
5451     + no_randomize_layout_attr.type_required = true;
5452     + no_randomize_layout_attr.handler = handle_randomize_layout_attr;
5453     #if BUILDING_GCC_VERSION >= 4007
5454     - .affects_type_identity = false
5455     + no_randomize_layout_attr.affects_type_identity = true;
5456     #endif
5457     -};
5458    
5459     -static struct attribute_spec randomize_performed_attr = {
5460     - .name = "randomize_performed",
5461     - // related to args
5462     - .min_length = 0,
5463     - .max_length = 0,
5464     - .decl_required = false,
5465     - // need type declaration
5466     - .type_required = true,
5467     - .function_type_required = false,
5468     - .handler = handle_randomize_performed_attr,
5469     -#if BUILDING_GCC_VERSION >= 4007
5470     - .affects_type_identity = false
5471     -#endif
5472     -};
5473     + randomize_considered_attr.name = "randomize_considered";
5474     + randomize_considered_attr.type_required = true;
5475     + randomize_considered_attr.handler = handle_randomize_considered_attr;
5476     +
5477     + randomize_performed_attr.name = "randomize_performed";
5478     + randomize_performed_attr.type_required = true;
5479     + randomize_performed_attr.handler = handle_randomize_performed_attr;
5480    
5481     -static void register_attributes(void *event_data, void *data)
5482     -{
5483     register_attribute(&randomize_layout_attr);
5484     register_attribute(&no_randomize_layout_attr);
5485     register_attribute(&randomize_considered_attr);
5486     diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c
5487     index 3f8dd4868178..10292f791e99 100644
5488     --- a/scripts/gcc-plugins/structleak_plugin.c
5489     +++ b/scripts/gcc-plugins/structleak_plugin.c
5490     @@ -57,21 +57,16 @@ static tree handle_user_attribute(tree *node, tree name, tree args, int flags, b
5491     return NULL_TREE;
5492     }
5493    
5494     -static struct attribute_spec user_attr = {
5495     - .name = "user",
5496     - .min_length = 0,
5497     - .max_length = 0,
5498     - .decl_required = false,
5499     - .type_required = false,
5500     - .function_type_required = false,
5501     - .handler = handle_user_attribute,
5502     -#if BUILDING_GCC_VERSION >= 4007
5503     - .affects_type_identity = true
5504     -#endif
5505     -};
5506     +static struct attribute_spec user_attr = { };
5507    
5508     static void register_attributes(void *event_data, void *data)
5509     {
5510     + user_attr.name = "user";
5511     + user_attr.handler = handle_user_attribute;
5512     +#if BUILDING_GCC_VERSION >= 4007
5513     + user_attr.affects_type_identity = true;
5514     +#endif
5515     +
5516     register_attribute(&user_attr);
5517     }
5518    
5519     diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
5520     index 77e7dcf969d0..d70fcd4a1adf 100644
5521     --- a/sound/soc/sirf/sirf-usp.c
5522     +++ b/sound/soc/sirf/sirf-usp.c
5523     @@ -370,10 +370,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
5524     platform_set_drvdata(pdev, usp);
5525    
5526     mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5527     - base = devm_ioremap(&pdev->dev, mem_res->start,
5528     - resource_size(mem_res));
5529     - if (base == NULL)
5530     - return -ENOMEM;
5531     + base = devm_ioremap_resource(&pdev->dev, mem_res);
5532     + if (IS_ERR(base))
5533     + return PTR_ERR(base);
5534     usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
5535     &sirf_usp_regmap_config);
5536     if (IS_ERR(usp->regmap))
5537     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5538     index 3d0dab8282ad..6fc85199ac73 100644
5539     --- a/sound/soc/soc-pcm.c
5540     +++ b/sound/soc/soc-pcm.c
5541     @@ -1607,6 +1607,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
5542     int i;
5543    
5544     for (i = 0; i < be->num_codecs; i++) {
5545     + /*
5546     + * Skip CODECs which don't support the current stream
5547     + * type. See soc_pcm_init_runtime_hw() for more details
5548     + */
5549     + if (!snd_soc_dai_stream_valid(be->codec_dais[i],
5550     + stream))
5551     + continue;
5552     +
5553     codec_dai_drv = be->codec_dais[i]->driver;
5554     if (stream == SNDRV_PCM_STREAM_PLAYBACK)
5555     codec_stream = &codec_dai_drv->playback;
5556     diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
5557     index dc955272f58b..389272eeba9a 100644
5558     --- a/sound/soc/zte/zx-tdm.c
5559     +++ b/sound/soc/zte/zx-tdm.c
5560     @@ -144,8 +144,8 @@ static void zx_tdm_rx_dma_en(struct zx_tdm_info *tdm, bool on)
5561     #define ZX_TDM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000)
5562    
5563     #define ZX_TDM_FMTBIT \
5564     - (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_MU_LAW | \
5565     - SNDRV_PCM_FORMAT_A_LAW)
5566     + (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_MU_LAW | \
5567     + SNDRV_PCM_FMTBIT_A_LAW)
5568    
5569     static int zx_tdm_dai_probe(struct snd_soc_dai *dai)
5570     {
5571     diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
5572     index bd9c6b31a504..1512086c8cb8 100644
5573     --- a/tools/power/x86/turbostat/turbostat.c
5574     +++ b/tools/power/x86/turbostat/turbostat.c
5575     @@ -1038,9 +1038,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
5576     if (!printed || !summary_only)
5577     print_header("\t");
5578    
5579     - if (topo.num_cpus > 1)
5580     - format_counters(&average.threads, &average.cores,
5581     - &average.packages);
5582     + format_counters(&average.threads, &average.cores, &average.packages);
5583    
5584     printed = 1;
5585    
5586     @@ -4031,7 +4029,9 @@ void process_cpuid()
5587     family = (fms >> 8) & 0xf;
5588     model = (fms >> 4) & 0xf;
5589     stepping = fms & 0xf;
5590     - if (family == 6 || family == 0xf)
5591     + if (family == 0xf)
5592     + family += (fms >> 20) & 0xff;
5593     + if (family >= 6)
5594     model += ((fms >> 16) & 0xf) << 4;
5595    
5596     if (!quiet) {
5597     diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
5598     new file mode 100644
5599     index 000000000000..3b1f45e13a2e
5600     --- /dev/null
5601     +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
5602     @@ -0,0 +1,28 @@
5603     +#!/bin/sh
5604     +# description: Snapshot and tracing setting
5605     +# flags: instance
5606     +
5607     +[ ! -f snapshot ] && exit_unsupported
5608     +
5609     +echo "Set tracing off"
5610     +echo 0 > tracing_on
5611     +
5612     +echo "Allocate and take a snapshot"
5613     +echo 1 > snapshot
5614     +
5615     +# Since trace buffer is empty, snapshot is also empty, but allocated
5616     +grep -q "Snapshot is allocated" snapshot
5617     +
5618     +echo "Ensure keep tracing off"
5619     +test `cat tracing_on` -eq 0
5620     +
5621     +echo "Set tracing on"
5622     +echo 1 > tracing_on
5623     +
5624     +echo "Take a snapshot again"
5625     +echo 1 > snapshot
5626     +
5627     +echo "Ensure keep tracing on"
5628     +test `cat tracing_on` -eq 1
5629     +
5630     +exit 0
5631     diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
5632     index 95dd14648ba5..0f395dfb7774 100644
5633     --- a/tools/usb/ffs-test.c
5634     +++ b/tools/usb/ffs-test.c
5635     @@ -44,12 +44,25 @@
5636    
5637     /******************** Little Endian Handling ********************************/
5638    
5639     -#define cpu_to_le16(x) htole16(x)
5640     -#define cpu_to_le32(x) htole32(x)
5641     +/*
5642     + * cpu_to_le16/32 are used when initializing structures, a context where a
5643     + * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
5644     + * that allows them to be used when initializing structures.
5645     + */
5646     +
5647     +#if __BYTE_ORDER == __LITTLE_ENDIAN
5648     +#define cpu_to_le16(x) (x)
5649     +#define cpu_to_le32(x) (x)
5650     +#else
5651     +#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
5652     +#define cpu_to_le32(x) \
5653     + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
5654     + (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
5655     +#endif
5656     +
5657     #define le32_to_cpu(x) le32toh(x)
5658     #define le16_to_cpu(x) le16toh(x)
5659    
5660     -
5661     /******************** Messages and Errors ***********************************/
5662    
5663     static const char argv0[] = "ffs-test";
5664     diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5665     index b69798a7880e..ec275b8472a9 100644
5666     --- a/virt/kvm/arm/mmu.c
5667     +++ b/virt/kvm/arm/mmu.c
5668     @@ -901,19 +901,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
5669     pmd = stage2_get_pmd(kvm, cache, addr);
5670     VM_BUG_ON(!pmd);
5671    
5672     - /*
5673     - * Mapping in huge pages should only happen through a fault. If a
5674     - * page is merged into a transparent huge page, the individual
5675     - * subpages of that huge page should be unmapped through MMU
5676     - * notifiers before we get here.
5677     - *
5678     - * Merging of CompoundPages is not supported; they should become
5679     - * splitting first, unmapped, merged, and mapped back in on-demand.
5680     - */
5681     - VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
5682     -
5683     old_pmd = *pmd;
5684     if (pmd_present(old_pmd)) {
5685     + /*
5686     + * Multiple vcpus faulting on the same PMD entry, can
5687     + * lead to them sequentially updating the PMD with the
5688     + * same value. Following the break-before-make
5689     + * (pmd_clear() followed by tlb_flush()) process can
5690     + * hinder forward progress due to refaults generated
5691     + * on missing translations.
5692     + *
5693     + * Skip updating the page table if the entry is
5694     + * unchanged.
5695     + */
5696     + if (pmd_val(old_pmd) == pmd_val(*new_pmd))
5697     + return 0;
5698     +
5699     + /*
5700     + * Mapping in huge pages should only happen through a
5701     + * fault. If a page is merged into a transparent huge
5702     + * page, the individual subpages of that huge page
5703     + * should be unmapped through MMU notifiers before we
5704     + * get here.
5705     + *
5706     + * Merging of CompoundPages is not supported; they
5707     + * should become splitting first, unmapped, merged,
5708     + * and mapped back in on-demand.
5709     + */
5710     + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
5711     +
5712     pmd_clear(pmd);
5713     kvm_tlb_flush_vmid_ipa(kvm, addr);
5714     } else {
5715     @@ -969,6 +985,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
5716     /* Create 2nd stage page table mapping - Level 3 */
5717     old_pte = *pte;
5718     if (pte_present(old_pte)) {
5719     + /* Skip page table update if there is no change */
5720     + if (pte_val(old_pte) == pte_val(*new_pte))
5721     + return 0;
5722     +
5723     kvm_set_pte(pte, __pte(0));
5724     kvm_tlb_flush_vmid_ipa(kvm, addr);
5725     } else {