Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0224-4.9.125-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3222 - (show annotations) (download)
Thu Sep 6 07:41:37 2018 UTC (5 years, 7 months ago) by niro
File size: 107060 byte(s)
-linux-4.9.125
1 diff --git a/Makefile b/Makefile
2 index 53d57acfc17e..aef09ca7a924 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 124
9 +SUBLEVEL = 125
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
14 index d5da2115d78a..03d6bb0f4e13 100644
15 --- a/arch/arc/include/asm/delay.h
16 +++ b/arch/arc/include/asm/delay.h
17 @@ -17,8 +17,11 @@
18 #ifndef __ASM_ARC_UDELAY_H
19 #define __ASM_ARC_UDELAY_H
20
21 +#include <asm-generic/types.h>
22 #include <asm/param.h> /* HZ */
23
24 +extern unsigned long loops_per_jiffy;
25 +
26 static inline void __delay(unsigned long loops)
27 {
28 __asm__ __volatile__(
29 diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
30 index bbdfeb31dee6..fefe357c3d31 100644
31 --- a/arch/arc/mm/cache.c
32 +++ b/arch/arc/mm/cache.c
33 @@ -840,7 +840,7 @@ void flush_cache_mm(struct mm_struct *mm)
34 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
35 unsigned long pfn)
36 {
37 - unsigned int paddr = pfn << PAGE_SHIFT;
38 + phys_addr_t paddr = pfn << PAGE_SHIFT;
39
40 u_vaddr &= PAGE_MASK;
41
42 @@ -860,8 +860,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
43 unsigned long u_vaddr)
44 {
45 /* TBD: do we really need to clear the kernel mapping */
46 - __flush_dcache_page(page_address(page), u_vaddr);
47 - __flush_dcache_page(page_address(page), page_address(page));
48 + __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
49 + __flush_dcache_page((phys_addr_t)page_address(page),
50 + (phys_addr_t)page_address(page));
51
52 }
53
54 diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
55 index 9d6718c1a199..3c401ce0351e 100644
56 --- a/arch/arc/plat-eznps/include/plat/ctop.h
57 +++ b/arch/arc/plat-eznps/include/plat/ctop.h
58 @@ -21,6 +21,7 @@
59 #error "Incorrect ctop.h include"
60 #endif
61
62 +#include <linux/types.h>
63 #include <soc/nps/common.h>
64
65 /* core auxiliary registers */
66 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
67 index 7f868d9bb5ed..b3d268a79f05 100644
68 --- a/arch/arm/kvm/mmu.c
69 +++ b/arch/arm/kvm/mmu.c
70 @@ -894,19 +894,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
71 pmd = stage2_get_pmd(kvm, cache, addr);
72 VM_BUG_ON(!pmd);
73
74 - /*
75 - * Mapping in huge pages should only happen through a fault. If a
76 - * page is merged into a transparent huge page, the individual
77 - * subpages of that huge page should be unmapped through MMU
78 - * notifiers before we get here.
79 - *
80 - * Merging of CompoundPages is not supported; they should become
81 - * splitting first, unmapped, merged, and mapped back in on-demand.
82 - */
83 - VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
84 -
85 old_pmd = *pmd;
86 if (pmd_present(old_pmd)) {
87 + /*
88 + * Multiple vcpus faulting on the same PMD entry, can
89 + * lead to them sequentially updating the PMD with the
90 + * same value. Following the break-before-make
91 + * (pmd_clear() followed by tlb_flush()) process can
92 + * hinder forward progress due to refaults generated
93 + * on missing translations.
94 + *
95 + * Skip updating the page table if the entry is
96 + * unchanged.
97 + */
98 + if (pmd_val(old_pmd) == pmd_val(*new_pmd))
99 + return 0;
100 +
101 + /*
102 + * Mapping in huge pages should only happen through a
103 + * fault. If a page is merged into a transparent huge
104 + * page, the individual subpages of that huge page
105 + * should be unmapped through MMU notifiers before we
106 + * get here.
107 + *
108 + * Merging of CompoundPages is not supported; they
109 + * should become splitting first, unmapped, merged,
110 + * and mapped back in on-demand.
111 + */
112 + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
113 +
114 pmd_clear(pmd);
115 kvm_tlb_flush_vmid_ipa(kvm, addr);
116 } else {
117 @@ -962,6 +978,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
118 /* Create 2nd stage page table mapping - Level 3 */
119 old_pte = *pte;
120 if (pte_present(old_pte)) {
121 + /* Skip page table update if there is no change */
122 + if (pte_val(old_pte) == pte_val(*new_pte))
123 + return 0;
124 +
125 kvm_set_pte(pte, __pte(0));
126 kvm_tlb_flush_vmid_ipa(kvm, addr);
127 } else {
128 diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
129 index f5077ea7af6d..30bcae0aef2a 100644
130 --- a/arch/arm64/kernel/probes/kprobes.c
131 +++ b/arch/arm64/kernel/probes/kprobes.c
132 @@ -274,7 +274,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
133 break;
134 case KPROBE_HIT_SS:
135 case KPROBE_REENTER:
136 - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
137 + pr_warn("Unrecoverable kprobe detected.\n");
138 dump_kprobe(p);
139 BUG();
140 break;
141 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
142 index 9d07b421f090..fa6b2fad7a3d 100644
143 --- a/arch/arm64/mm/init.c
144 +++ b/arch/arm64/mm/init.c
145 @@ -147,7 +147,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
146 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
147 int pfn_valid(unsigned long pfn)
148 {
149 - return memblock_is_map_memory(pfn << PAGE_SHIFT);
150 + phys_addr_t addr = pfn << PAGE_SHIFT;
151 +
152 + if ((addr >> PAGE_SHIFT) != pfn)
153 + return 0;
154 + return memblock_is_map_memory(addr);
155 }
156 EXPORT_SYMBOL(pfn_valid);
157 #endif
158 diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
159 index 8c9cbf13d32a..6054d49e608e 100644
160 --- a/arch/mips/bcm47xx/setup.c
161 +++ b/arch/mips/bcm47xx/setup.c
162 @@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
163 */
164 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
165 cpu_wait = NULL;
166 -
167 - /*
168 - * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
169 - * Enable ExternalSync for sync instruction to take effect
170 - */
171 - set_c0_config7(MIPS_CONF7_ES);
172 break;
173 #endif
174 }
175 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
176 index 22a6782f84f5..df78b2ca70eb 100644
177 --- a/arch/mips/include/asm/mipsregs.h
178 +++ b/arch/mips/include/asm/mipsregs.h
179 @@ -663,8 +663,6 @@
180 #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
181
182 #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
183 -/* ExternalSync */
184 -#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
185
186 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
187 #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
188 @@ -2643,7 +2641,6 @@ __BUILD_SET_C0(status)
189 __BUILD_SET_C0(cause)
190 __BUILD_SET_C0(config)
191 __BUILD_SET_C0(config5)
192 -__BUILD_SET_C0(config7)
193 __BUILD_SET_C0(intcontrol)
194 __BUILD_SET_C0(intctl)
195 __BUILD_SET_C0(srsmap)
196 diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
197 index 0d36c87acbe2..ad6f019ff776 100644
198 --- a/arch/mips/include/asm/processor.h
199 +++ b/arch/mips/include/asm/processor.h
200 @@ -141,7 +141,7 @@ struct mips_fpu_struct {
201
202 #define NUM_DSP_REGS 6
203
204 -typedef __u32 dspreg_t;
205 +typedef unsigned long dspreg_t;
206
207 struct mips_dsp_state {
208 dspreg_t dspr[NUM_DSP_REGS];
209 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
210 index 4f64913b4b4c..b702ba3a0df3 100644
211 --- a/arch/mips/kernel/ptrace.c
212 +++ b/arch/mips/kernel/ptrace.c
213 @@ -876,7 +876,7 @@ long arch_ptrace(struct task_struct *child, long request,
214 goto out;
215 }
216 dregs = __get_dsp_regs(child);
217 - tmp = (unsigned long) (dregs[addr - DSP_BASE]);
218 + tmp = dregs[addr - DSP_BASE];
219 break;
220 }
221 case DSP_CONTROL:
222 diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
223 index b1e945738138..4840af169683 100644
224 --- a/arch/mips/kernel/ptrace32.c
225 +++ b/arch/mips/kernel/ptrace32.c
226 @@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
227 goto out;
228 }
229 dregs = __get_dsp_regs(child);
230 - tmp = (unsigned long) (dregs[addr - DSP_BASE]);
231 + tmp = dregs[addr - DSP_BASE];
232 break;
233 }
234 case DSP_CONTROL:
235 diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
236 index 111ad475aa0c..4c2483f410c2 100644
237 --- a/arch/mips/lib/multi3.c
238 +++ b/arch/mips/lib/multi3.c
239 @@ -4,12 +4,12 @@
240 #include "libgcc.h"
241
242 /*
243 - * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
244 - * specific case only we'll implement it here.
245 + * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
246 + * that specific case only we implement that intrinsic here.
247 *
248 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
249 */
250 -#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
251 +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
252
253 /* multiply 64-bit values, low 64-bits returned */
254 static inline long long notrace dmulu(long long a, long long b)
255 diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
256 index c0e817f35e69..bdbbc320b006 100644
257 --- a/arch/powerpc/net/bpf_jit_comp64.c
258 +++ b/arch/powerpc/net/bpf_jit_comp64.c
259 @@ -326,6 +326,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
260 u64 imm64;
261 u8 *func;
262 u32 true_cond;
263 + u32 tmp_idx;
264
265 /*
266 * addrs[] maps a BPF bytecode address into a real offset from
267 @@ -685,11 +686,7 @@ emit_clear:
268 case BPF_STX | BPF_XADD | BPF_W:
269 /* Get EA into TMP_REG_1 */
270 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
271 - /* error if EA is not word-aligned */
272 - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
273 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
274 - PPC_LI(b2p[BPF_REG_0], 0);
275 - PPC_JMP(exit_addr);
276 + tmp_idx = ctx->idx * 4;
277 /* load value from memory into TMP_REG_2 */
278 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
279 /* add value from src_reg into this */
280 @@ -697,32 +694,16 @@ emit_clear:
281 /* store result back */
282 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
283 /* we're done if this succeeded */
284 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
285 - /* otherwise, let's try once more */
286 - PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
287 - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
288 - PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
289 - /* exit if the store was not successful */
290 - PPC_LI(b2p[BPF_REG_0], 0);
291 - PPC_BCC(COND_NE, exit_addr);
292 + PPC_BCC_SHORT(COND_NE, tmp_idx);
293 break;
294 /* *(u64 *)(dst + off) += src */
295 case BPF_STX | BPF_XADD | BPF_DW:
296 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
297 - /* error if EA is not doubleword-aligned */
298 - PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
299 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
300 - PPC_LI(b2p[BPF_REG_0], 0);
301 - PPC_JMP(exit_addr);
302 - PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
303 - PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
304 - PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
305 - PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
306 + tmp_idx = ctx->idx * 4;
307 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
308 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
309 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
310 - PPC_LI(b2p[BPF_REG_0], 0);
311 - PPC_BCC(COND_NE, exit_addr);
312 + PPC_BCC_SHORT(COND_NE, tmp_idx);
313 break;
314
315 /*
316 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
317 index 998b61cd0e56..4b39ba700d32 100644
318 --- a/arch/s390/include/asm/qdio.h
319 +++ b/arch/s390/include/asm/qdio.h
320 @@ -261,7 +261,6 @@ struct qdio_outbuf_state {
321 void *user;
322 };
323
324 -#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
325 #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
326
327 #define CHSC_AC1_INITIATE_INPUTQ 0x80
328 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
329 index 661d9fe63c43..ba2f21873cbd 100644
330 --- a/arch/s390/mm/fault.c
331 +++ b/arch/s390/mm/fault.c
332 @@ -462,6 +462,8 @@ retry:
333 /* No reason to continue if interrupted by SIGKILL. */
334 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
335 fault = VM_FAULT_SIGNAL;
336 + if (flags & FAULT_FLAG_RETRY_NOWAIT)
337 + goto out_up;
338 goto out;
339 }
340 if (unlikely(fault & VM_FAULT_ERROR))
341 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
342 index 949a871e9506..8bd25aebf488 100644
343 --- a/arch/s390/net/bpf_jit_comp.c
344 +++ b/arch/s390/net/bpf_jit_comp.c
345 @@ -517,8 +517,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
346 /* br %r1 */
347 _EMIT2(0x07f1);
348 } else {
349 - /* larl %r1,.+14 */
350 - EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
351 /* ex 0,S390_lowcore.br_r1_tampoline */
352 EMIT4_DISP(0x44000000, REG_0, REG_0,
353 offsetof(struct lowcore, br_r1_trampoline));
354 diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
355 index f576f1073378..0dac2640c3a7 100644
356 --- a/arch/s390/numa/numa.c
357 +++ b/arch/s390/numa/numa.c
358 @@ -133,26 +133,14 @@ void __init numa_setup(void)
359 {
360 pr_info("NUMA mode: %s\n", mode->name);
361 nodes_clear(node_possible_map);
362 + /* Initially attach all possible CPUs to node 0. */
363 + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
364 if (mode->setup)
365 mode->setup();
366 numa_setup_memory();
367 memblock_dump_all();
368 }
369
370 -/*
371 - * numa_init_early() - Initialization initcall
372 - *
373 - * This runs when only one CPU is online and before the first
374 - * topology update is called for by the scheduler.
375 - */
376 -static int __init numa_init_early(void)
377 -{
378 - /* Attach all possible CPUs to node 0 for now. */
379 - cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
380 - return 0;
381 -}
382 -early_initcall(numa_init_early);
383 -
384 /*
385 * numa_init_late() - Initialization initcall
386 *
387 diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
388 index 03a1d5976ff5..87574110394d 100644
389 --- a/arch/s390/pci/pci.c
390 +++ b/arch/s390/pci/pci.c
391 @@ -407,6 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
392 hwirq = 0;
393 for_each_pci_msi_entry(msi, pdev) {
394 rc = -EIO;
395 + if (hwirq >= msi_vecs)
396 + break;
397 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
398 if (irq < 0)
399 goto out_msi;
400 diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
401 index 24384e1dc33d..a7aeb036b070 100644
402 --- a/arch/sparc/kernel/pcic.c
403 +++ b/arch/sparc/kernel/pcic.c
404 @@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
405 {
406 struct pci_dev *dev;
407 int i, has_io, has_mem;
408 - unsigned int cmd;
409 + unsigned int cmd = 0;
410 struct linux_pcic *pcic;
411 /* struct linux_pbm_info* pbm = &pcic->pbm; */
412 int node;
413 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
414 index 4669b3a931ed..cda8e14bd72a 100644
415 --- a/arch/x86/boot/compressed/Makefile
416 +++ b/arch/x86/boot/compressed/Makefile
417 @@ -101,9 +101,13 @@ define cmd_check_data_rel
418 done
419 endef
420
421 +# We need to run two commands under "if_changed", so merge them into a
422 +# single invocation.
423 +quiet_cmd_check-and-link-vmlinux = LD $@
424 + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
425 +
426 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
427 - $(call if_changed,check_data_rel)
428 - $(call if_changed,ld)
429 + $(call if_changed,check-and-link-vmlinux)
430
431 OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
432 $(obj)/vmlinux.bin: vmlinux FORCE
433 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
434 index b26ee32f73e8..fd4484ae3ffc 100644
435 --- a/arch/x86/events/amd/ibs.c
436 +++ b/arch/x86/events/amd/ibs.c
437 @@ -578,7 +578,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
438 {
439 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
440 struct perf_event *event = pcpu->event;
441 - struct hw_perf_event *hwc = &event->hw;
442 + struct hw_perf_event *hwc;
443 struct perf_sample_data data;
444 struct perf_raw_record raw;
445 struct pt_regs regs;
446 @@ -601,6 +601,10 @@ fail:
447 return 0;
448 }
449
450 + if (WARN_ON_ONCE(!event))
451 + goto fail;
452 +
453 + hwc = &event->hw;
454 msr = hwc->config_base;
455 buf = ibs_data.regs;
456 rdmsrl(msr, *buf);
457 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
458 index 5b1177f5a963..508a062e6cf1 100644
459 --- a/arch/x86/include/asm/irqflags.h
460 +++ b/arch/x86/include/asm/irqflags.h
461 @@ -32,7 +32,8 @@ extern inline unsigned long native_save_fl(void)
462 return flags;
463 }
464
465 -static inline void native_restore_fl(unsigned long flags)
466 +extern inline void native_restore_fl(unsigned long flags);
467 +extern inline void native_restore_fl(unsigned long flags)
468 {
469 asm volatile("push %0 ; popf"
470 : /* no output */
471 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
472 index d5525a7e119e..ee8c6290c421 100644
473 --- a/arch/x86/include/asm/processor.h
474 +++ b/arch/x86/include/asm/processor.h
475 @@ -136,6 +136,8 @@ struct cpuinfo_x86 {
476 /* Index into per_cpu list: */
477 u16 cpu_index;
478 u32 microcode;
479 + /* Address space bits used by the cache internally */
480 + u8 x86_cache_bits;
481 };
482
483 #define X86_VENDOR_INTEL 0
484 @@ -173,9 +175,9 @@ extern const struct seq_operations cpuinfo_op;
485
486 extern void cpu_detect(struct cpuinfo_x86 *c);
487
488 -static inline unsigned long l1tf_pfn_limit(void)
489 +static inline unsigned long long l1tf_pfn_limit(void)
490 {
491 - return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
492 + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
493 }
494
495 extern void early_cpu_init(void);
496 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
497 index ac67a76550bd..8103adacbc83 100644
498 --- a/arch/x86/kernel/cpu/bugs.c
499 +++ b/arch/x86/kernel/cpu/bugs.c
500 @@ -651,6 +651,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
501 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
502 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
503
504 +/*
505 + * These CPUs all support 44bits physical address space internally in the
506 + * cache but CPUID can report a smaller number of physical address bits.
507 + *
508 + * The L1TF mitigation uses the top most address bit for the inversion of
509 + * non present PTEs. When the installed memory reaches into the top most
510 + * address bit due to memory holes, which has been observed on machines
511 + * which report 36bits physical address bits and have 32G RAM installed,
512 + * then the mitigation range check in l1tf_select_mitigation() triggers.
513 + * This is a false positive because the mitigation is still possible due to
514 + * the fact that the cache uses 44bit internally. Use the cache bits
515 + * instead of the reported physical bits and adjust them on the affected
516 + * machines to 44bit if the reported bits are less than 44.
517 + */
518 +static void override_cache_bits(struct cpuinfo_x86 *c)
519 +{
520 + if (c->x86 != 6)
521 + return;
522 +
523 + switch (c->x86_model) {
524 + case INTEL_FAM6_NEHALEM:
525 + case INTEL_FAM6_WESTMERE:
526 + case INTEL_FAM6_SANDYBRIDGE:
527 + case INTEL_FAM6_IVYBRIDGE:
528 + case INTEL_FAM6_HASWELL_CORE:
529 + case INTEL_FAM6_HASWELL_ULT:
530 + case INTEL_FAM6_HASWELL_GT3E:
531 + case INTEL_FAM6_BROADWELL_CORE:
532 + case INTEL_FAM6_BROADWELL_GT3E:
533 + case INTEL_FAM6_SKYLAKE_MOBILE:
534 + case INTEL_FAM6_SKYLAKE_DESKTOP:
535 + case INTEL_FAM6_KABYLAKE_MOBILE:
536 + case INTEL_FAM6_KABYLAKE_DESKTOP:
537 + if (c->x86_cache_bits < 44)
538 + c->x86_cache_bits = 44;
539 + break;
540 + }
541 +}
542 +
543 static void __init l1tf_select_mitigation(void)
544 {
545 u64 half_pa;
546 @@ -658,6 +697,8 @@ static void __init l1tf_select_mitigation(void)
547 if (!boot_cpu_has_bug(X86_BUG_L1TF))
548 return;
549
550 + override_cache_bits(&boot_cpu_data);
551 +
552 switch (l1tf_mitigation) {
553 case L1TF_MITIGATION_OFF:
554 case L1TF_MITIGATION_FLUSH_NOWARN:
555 @@ -677,14 +718,13 @@ static void __init l1tf_select_mitigation(void)
556 return;
557 #endif
558
559 - /*
560 - * This is extremely unlikely to happen because almost all
561 - * systems have far more MAX_PA/2 than RAM can be fit into
562 - * DIMM slots.
563 - */
564 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
565 if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
566 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
567 + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
568 + half_pa);
569 + pr_info("However, doing so will make a part of your RAM unusable.\n");
570 + pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
571 return;
572 }
573
574 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
575 index 13471b71bec7..dc0850bb74be 100644
576 --- a/arch/x86/kernel/cpu/common.c
577 +++ b/arch/x86/kernel/cpu/common.c
578 @@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
579 }
580 }
581 #endif
582 + c->x86_cache_bits = c->x86_phys_bits;
583 }
584
585 static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
586 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
587 index 9ad86c4bf360..cee0fec0d232 100644
588 --- a/arch/x86/kernel/cpu/intel.c
589 +++ b/arch/x86/kernel/cpu/intel.c
590 @@ -109,6 +109,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
591 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
592 return false;
593
594 + if (c->x86 != 6)
595 + return false;
596 +
597 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
598 if (c->x86_model == spectre_bad_microcodes[i].model &&
599 c->x86_stepping == spectre_bad_microcodes[i].stepping)
600 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
601 index 85f854b98a9d..3576ece9ef88 100644
602 --- a/arch/x86/kernel/dumpstack.c
603 +++ b/arch/x86/kernel/dumpstack.c
604 @@ -15,6 +15,7 @@
605 #include <linux/bug.h>
606 #include <linux/nmi.h>
607 #include <linux/sysfs.h>
608 +#include <linux/kasan.h>
609
610 #include <asm/stacktrace.h>
611 #include <asm/unwind.h>
612 @@ -229,7 +230,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
613 * We're not going to return, but we might be on an IST stack or
614 * have very little stack space left. Rewind the stack and kill
615 * the task.
616 + * Before we rewind the stack, we have to tell KASAN that we're going to
617 + * reuse the task stack and that existing poisons are invalid.
618 */
619 + kasan_unpoison_task_stack(current);
620 rewind_stack_do_exit(signr);
621 }
622 NOKPROBE_SYMBOL(oops_end);
623 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
624 index dffe81d3c261..a2661814bde0 100644
625 --- a/arch/x86/kernel/process_64.c
626 +++ b/arch/x86/kernel/process_64.c
627 @@ -360,6 +360,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
628 start_thread_common(regs, new_ip, new_sp,
629 __USER_CS, __USER_DS, 0);
630 }
631 +EXPORT_SYMBOL_GPL(start_thread);
632
633 #ifdef CONFIG_COMPAT
634 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
635 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
636 index c855080c7a71..5f44d63a9d69 100644
637 --- a/arch/x86/kvm/svm.c
638 +++ b/arch/x86/kvm/svm.c
639 @@ -4973,8 +4973,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
640
641 clgi();
642
643 - local_irq_enable();
644 -
645 /*
646 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
647 * it's non-zero. Since vmentry is serialising on affected CPUs, there
648 @@ -4983,6 +4981,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
649 */
650 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
651
652 + local_irq_enable();
653 +
654 asm volatile (
655 "push %%" _ASM_BP "; \n\t"
656 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
657 @@ -5105,12 +5105,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
658 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
659 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
660
661 - x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
662 -
663 reload_tss(vcpu);
664
665 local_irq_disable();
666
667 + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
668 +
669 vcpu->arch.cr2 = svm->vmcb->save.cr2;
670 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
671 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
672 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
673 index 12826607a995..8e4ac0a91309 100644
674 --- a/arch/x86/kvm/vmx.c
675 +++ b/arch/x86/kvm/vmx.c
676 @@ -8670,9 +8670,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
677 * information but as all relevant affected CPUs have 32KiB L1D cache size
678 * there is no point in doing so.
679 */
680 -#define L1D_CACHE_ORDER 4
681 -static void *vmx_l1d_flush_pages;
682 -
683 static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
684 {
685 int size = PAGE_SIZE << L1D_CACHE_ORDER;
686 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
687 index 5d35b555115a..90801a8f19c9 100644
688 --- a/arch/x86/mm/init.c
689 +++ b/arch/x86/mm/init.c
690 @@ -792,7 +792,7 @@ unsigned long max_swapfile_size(void)
691
692 if (boot_cpu_has_bug(X86_BUG_L1TF)) {
693 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
694 - unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
695 + unsigned long long l1tf_limit = l1tf_pfn_limit();
696 /*
697 * We encode swap offsets also with 3 bits below those for pfn
698 * which makes the usable limit higher.
699 @@ -800,7 +800,7 @@ unsigned long max_swapfile_size(void)
700 #if CONFIG_PGTABLE_LEVELS > 2
701 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
702 #endif
703 - pages = min_t(unsigned long, l1tf_limit, pages);
704 + pages = min_t(unsigned long long, l1tf_limit, pages);
705 }
706 return pages;
707 }
708 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
709 index 5aad869fa205..74609a957c49 100644
710 --- a/arch/x86/mm/mmap.c
711 +++ b/arch/x86/mm/mmap.c
712 @@ -138,7 +138,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
713 /* If it's real memory always allow */
714 if (pfn_valid(pfn))
715 return true;
716 - if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
717 + if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
718 return false;
719 return true;
720 }
721 diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
722 index 8e2e4757adcb..5a42ae4078c2 100644
723 --- a/drivers/base/power/clock_ops.c
724 +++ b/drivers/base/power/clock_ops.c
725 @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
726 int of_pm_clk_add_clks(struct device *dev)
727 {
728 struct clk **clks;
729 - unsigned int i, count;
730 + int i, count;
731 int ret;
732
733 if (!dev || !dev->of_node)
734 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
735 index 07b77fb102a1..987e8f503522 100644
736 --- a/drivers/cdrom/cdrom.c
737 +++ b/drivers/cdrom/cdrom.c
738 @@ -2536,7 +2536,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
739 if (!CDROM_CAN(CDC_SELECT_DISC) ||
740 (arg == CDSL_CURRENT || arg == CDSL_NONE))
741 return cdi->ops->drive_status(cdi, CDSL_CURRENT);
742 - if (((int)arg >= cdi->capacity))
743 + if (arg >= cdi->capacity)
744 return -EINVAL;
745 return cdrom_slot_status(cdi, arg);
746 }
747 diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
748 index 8387c7a40bda..05671c03efe2 100644
749 --- a/drivers/clk/rockchip/clk-rk3399.c
750 +++ b/drivers/clk/rockchip/clk-rk3399.c
751 @@ -629,7 +629,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
752 MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
753 RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
754 COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
755 - RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
756 + RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
757 RK3399_CLKGATE_CON(8), 12, GFLAGS),
758
759 /* uart */
760 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
761 index a68f94daf9b6..32ab5c32834b 100644
762 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
763 +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
764 @@ -424,6 +424,18 @@ static void adv7511_hpd_work(struct work_struct *work)
765 else
766 status = connector_status_disconnected;
767
768 + /*
769 + * The bridge resets its registers on unplug. So when we get a plug
770 + * event and we're already supposed to be powered, cycle the bridge to
771 + * restore its state.
772 + */
773 + if (status == connector_status_connected &&
774 + adv7511->connector.status == connector_status_disconnected &&
775 + adv7511->powered) {
776 + regcache_mark_dirty(adv7511->regmap);
777 + adv7511_power_on(adv7511);
778 + }
779 +
780 if (adv7511->connector.status != status) {
781 adv7511->connector.status = status;
782 drm_kms_helper_hotplug_event(adv7511->connector.dev);
783 diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
784 index 3ce391c239b0..67881e5517fb 100644
785 --- a/drivers/gpu/drm/imx/imx-ldb.c
786 +++ b/drivers/gpu/drm/imx/imx-ldb.c
787 @@ -634,6 +634,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
788 return PTR_ERR(imx_ldb->regmap);
789 }
790
791 + /* disable LDB by resetting the control register to POR default */
792 + regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
793 +
794 imx_ldb->dev = dev;
795
796 if (of_id)
797 @@ -675,14 +678,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
798 if (ret || i < 0 || i > 1)
799 return -EINVAL;
800
801 + if (!of_device_is_available(child))
802 + continue;
803 +
804 if (dual && i > 0) {
805 dev_warn(dev, "dual-channel mode, ignoring second output\n");
806 continue;
807 }
808
809 - if (!of_device_is_available(child))
810 - continue;
811 -
812 channel = &imx_ldb->channel[i];
813 channel->ldb = imx_ldb;
814 channel->chno = i;
815 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
816 index 39d0fdcb17d2..6a7994a79f55 100644
817 --- a/drivers/gpu/drm/udl/udl_fb.c
818 +++ b/drivers/gpu/drm/udl/udl_fb.c
819 @@ -217,7 +217,7 @@ static int udl_fb_open(struct fb_info *info, int user)
820
821 struct fb_deferred_io *fbdefio;
822
823 - fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
824 + fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
825
826 if (fbdefio) {
827 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
828 diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
829 index 873f010d9616..10e2c198ad72 100644
830 --- a/drivers/gpu/drm/udl/udl_main.c
831 +++ b/drivers/gpu/drm/udl/udl_main.c
832 @@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev)
833 struct list_head *node;
834 struct urb_node *unode;
835 struct urb *urb;
836 - int ret;
837 unsigned long flags;
838
839 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
840
841 /* keep waiting and freeing, until we've got 'em all */
842 while (count--) {
843 -
844 - /* Getting interrupted means a leak, but ok at shutdown*/
845 - ret = down_interruptible(&udl->urbs.limit_sem);
846 - if (ret)
847 - break;
848 + down(&udl->urbs.limit_sem);
849
850 spin_lock_irqsave(&udl->urbs.lock, flags);
851
852 @@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
853 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
854 {
855 struct udl_device *udl = dev->dev_private;
856 - int i = 0;
857 struct urb *urb;
858 struct urb_node *unode;
859 char *buf;
860 + size_t wanted_size = count * size;
861
862 spin_lock_init(&udl->urbs.lock);
863
864 +retry:
865 udl->urbs.size = size;
866 INIT_LIST_HEAD(&udl->urbs.list);
867
868 - while (i < count) {
869 + sema_init(&udl->urbs.limit_sem, 0);
870 + udl->urbs.count = 0;
871 + udl->urbs.available = 0;
872 +
873 + while (udl->urbs.count * size < wanted_size) {
874 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
875 if (!unode)
876 break;
877 @@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
878 }
879 unode->urb = urb;
880
881 - buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
882 + buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
883 &urb->transfer_dma);
884 if (!buf) {
885 kfree(unode);
886 usb_free_urb(urb);
887 + if (size > PAGE_SIZE) {
888 + size /= 2;
889 + udl_free_urb_list(dev);
890 + goto retry;
891 + }
892 break;
893 }
894
895 @@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
896
897 list_add_tail(&unode->entry, &udl->urbs.list);
898
899 - i++;
900 + up(&udl->urbs.limit_sem);
901 + udl->urbs.count++;
902 + udl->urbs.available++;
903 }
904
905 - sema_init(&udl->urbs.limit_sem, i);
906 - udl->urbs.count = i;
907 - udl->urbs.available = i;
908 -
909 - DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
910 + DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
911
912 - return i;
913 + return udl->urbs.count;
914 }
915
916 struct urb *udl_get_urb(struct drm_device *dev)
917 diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
918 index 9e7ef5cf5d49..b2d8b63176db 100644
919 --- a/drivers/i2c/busses/i2c-davinci.c
920 +++ b/drivers/i2c/busses/i2c-davinci.c
921 @@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
922 /*
923 * It's not always possible to have 1 to 2 ratio when d=7, so fall back
924 * to minimal possible clkh in this case.
925 + *
926 + * Note:
927 + * CLKH is not allowed to be 0, in this case I2C clock is not generated
928 + * at all
929 */
930 - if (clk >= clkl + d) {
931 + if (clk > clkl + d) {
932 clkh = clk - clkl - d;
933 clkl -= d;
934 } else {
935 - clkh = 0;
936 + clkh = 1;
937 clkl = clk - (d << 1);
938 }
939
940 diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
941 index 60f5a8ded8dd..8904491dfda4 100644
942 --- a/drivers/misc/mei/main.c
943 +++ b/drivers/misc/mei/main.c
944 @@ -304,7 +304,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
945 goto out;
946 }
947
948 - *offset = 0;
949 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
950 if (!cb) {
951 rets = -ENOMEM;
952 diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
953 index c7427bdd3a4b..2949a381a94d 100644
954 --- a/drivers/net/can/mscan/mpc5xxx_can.c
955 +++ b/drivers/net/can/mscan/mpc5xxx_can.c
956 @@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
957 return 0;
958 }
959 cdm = of_iomap(np_cdm, 0);
960 + if (!cdm) {
961 + of_node_put(np_cdm);
962 + dev_err(&ofdev->dev, "can't map clock node!\n");
963 + return 0;
964 + }
965
966 if (in_8(&cdm->ipb_clk_sel) & 0x1)
967 freq *= 2;
968 diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
969 index 5b7658bcf020..5c3ef9fc8207 100644
970 --- a/drivers/net/ethernet/3com/Kconfig
971 +++ b/drivers/net/ethernet/3com/Kconfig
972 @@ -32,7 +32,7 @@ config EL3
973
974 config 3C515
975 tristate "3c515 ISA \"Fast EtherLink\""
976 - depends on ISA && ISA_DMA_API
977 + depends on ISA && ISA_DMA_API && !PPC32
978 ---help---
979 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
980 network card, say Y here.
981 diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
982 index 0038709fd317..ec59425fdbff 100644
983 --- a/drivers/net/ethernet/amd/Kconfig
984 +++ b/drivers/net/ethernet/amd/Kconfig
985 @@ -44,7 +44,7 @@ config AMD8111_ETH
986
987 config LANCE
988 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
989 - depends on ISA && ISA_DMA_API && !ARM
990 + depends on ISA && ISA_DMA_API && !ARM && !PPC32
991 ---help---
992 If you have a network (Ethernet) card of this type, say Y here.
993 Some LinkSys cards are of this type.
994 @@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
995
996 config NI65
997 tristate "NI6510 support"
998 - depends on ISA && ISA_DMA_API && !ARM
999 + depends on ISA && ISA_DMA_API && !ARM && !PPC32
1000 ---help---
1001 If you have a network (Ethernet) card of this type, say Y here.
1002
1003 diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1004 index a3200ea6d765..85e7177c479f 100644
1005 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1006 +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1007 @@ -1678,6 +1678,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
1008 skb = build_skb(page_address(page) + adapter->rx_page_offset,
1009 adapter->rx_frag_size);
1010 if (likely(skb)) {
1011 + skb_reserve(skb, NET_SKB_PAD);
1012 adapter->rx_page_offset += adapter->rx_frag_size;
1013 if (adapter->rx_page_offset >= PAGE_SIZE)
1014 adapter->rx_page = NULL;
1015 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1016 index 5f19427c7b27..8aecd8ef6542 100644
1017 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1018 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1019 @@ -3367,14 +3367,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
1020 DP(BNX2X_MSG_ETHTOOL,
1021 "rss re-configured, UDP 4-tupple %s\n",
1022 udp_rss_requested ? "enabled" : "disabled");
1023 - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
1024 + if (bp->state == BNX2X_STATE_OPEN)
1025 + return bnx2x_rss(bp, &bp->rss_conf_obj, false,
1026 + true);
1027 } else if ((info->flow_type == UDP_V6_FLOW) &&
1028 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
1029 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
1030 DP(BNX2X_MSG_ETHTOOL,
1031 "rss re-configured, UDP 4-tupple %s\n",
1032 udp_rss_requested ? "enabled" : "disabled");
1033 - return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
1034 + if (bp->state == BNX2X_STATE_OPEN)
1035 + return bnx2x_rss(bp, &bp->rss_conf_obj, false,
1036 + true);
1037 }
1038 return 0;
1039
1040 @@ -3488,7 +3492,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
1041 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
1042 }
1043
1044 - return bnx2x_config_rss_eth(bp, false);
1045 + if (bp->state == BNX2X_STATE_OPEN)
1046 + return bnx2x_config_rss_eth(bp, false);
1047 +
1048 + return 0;
1049 }
1050
1051 /**
1052 diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
1053 index 5ab912937aff..ec0b545197e2 100644
1054 --- a/drivers/net/ethernet/cirrus/Kconfig
1055 +++ b/drivers/net/ethernet/cirrus/Kconfig
1056 @@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
1057 config CS89x0
1058 tristate "CS89x0 support"
1059 depends on ISA || EISA || ARM
1060 + depends on !PPC32
1061 ---help---
1062 Support for CS89x0 chipset based Ethernet cards. If you have a
1063 network (Ethernet) card of this type, say Y and read the file
1064 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
1065 index 2e9bab45d419..f7e7b79c6050 100644
1066 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
1067 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
1068 @@ -1842,10 +1842,32 @@ static int enic_stop(struct net_device *netdev)
1069 return 0;
1070 }
1071
1072 +static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
1073 +{
1074 + bool running = netif_running(netdev);
1075 + int err = 0;
1076 +
1077 + ASSERT_RTNL();
1078 + if (running) {
1079 + err = enic_stop(netdev);
1080 + if (err)
1081 + return err;
1082 + }
1083 +
1084 + netdev->mtu = new_mtu;
1085 +
1086 + if (running) {
1087 + err = enic_open(netdev);
1088 + if (err)
1089 + return err;
1090 + }
1091 +
1092 + return 0;
1093 +}
1094 +
1095 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1096 {
1097 struct enic *enic = netdev_priv(netdev);
1098 - int running = netif_running(netdev);
1099
1100 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1101 return -EINVAL;
1102 @@ -1853,20 +1875,12 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1103 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1104 return -EOPNOTSUPP;
1105
1106 - if (running)
1107 - enic_stop(netdev);
1108 -
1109 - netdev->mtu = new_mtu;
1110 -
1111 if (netdev->mtu > enic->port_mtu)
1112 netdev_warn(netdev,
1113 - "interface MTU (%d) set higher than port MTU (%d)\n",
1114 - netdev->mtu, enic->port_mtu);
1115 + "interface MTU (%d) set higher than port MTU (%d)\n",
1116 + netdev->mtu, enic->port_mtu);
1117
1118 - if (running)
1119 - enic_open(netdev);
1120 -
1121 - return 0;
1122 + return _enic_change_mtu(netdev, new_mtu);
1123 }
1124
1125 static void enic_change_mtu_work(struct work_struct *work)
1126 @@ -1874,47 +1888,9 @@ static void enic_change_mtu_work(struct work_struct *work)
1127 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1128 struct net_device *netdev = enic->netdev;
1129 int new_mtu = vnic_dev_mtu(enic->vdev);
1130 - int err;
1131 - unsigned int i;
1132 -
1133 - new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1134
1135 rtnl_lock();
1136 -
1137 - /* Stop RQ */
1138 - del_timer_sync(&enic->notify_timer);
1139 -
1140 - for (i = 0; i < enic->rq_count; i++)
1141 - napi_disable(&enic->napi[i]);
1142 -
1143 - vnic_intr_mask(&enic->intr[0]);
1144 - enic_synchronize_irqs(enic);
1145 - err = vnic_rq_disable(&enic->rq[0]);
1146 - if (err) {
1147 - rtnl_unlock();
1148 - netdev_err(netdev, "Unable to disable RQ.\n");
1149 - return;
1150 - }
1151 - vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1152 - vnic_cq_clean(&enic->cq[0]);
1153 - vnic_intr_clean(&enic->intr[0]);
1154 -
1155 - /* Fill RQ with new_mtu-sized buffers */
1156 - netdev->mtu = new_mtu;
1157 - vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1158 - /* Need at least one buffer on ring to get going */
1159 - if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1160 - rtnl_unlock();
1161 - netdev_err(netdev, "Unable to alloc receive buffers.\n");
1162 - return;
1163 - }
1164 -
1165 - /* Start RQ */
1166 - vnic_rq_enable(&enic->rq[0]);
1167 - napi_enable(&enic->napi[0]);
1168 - vnic_intr_unmask(&enic->intr[0]);
1169 - enic_notify_timer_start(enic);
1170 -
1171 + (void)_enic_change_mtu(netdev, new_mtu);
1172 rtnl_unlock();
1173
1174 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1175 diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
1176 index ddd410a91e13..715776e2cfe5 100644
1177 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
1178 +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
1179 @@ -313,7 +313,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
1180
1181 p_ramrod->common.update_approx_mcast_flg = 1;
1182 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1183 - u32 *p_bins = (u32 *)p_params->bins;
1184 + u32 *p_bins = p_params->bins;
1185
1186 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
1187 }
1188 @@ -1182,8 +1182,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1189 enum spq_mode comp_mode,
1190 struct qed_spq_comp_cb *p_comp_data)
1191 {
1192 - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1193 struct vport_update_ramrod_data *p_ramrod = NULL;
1194 + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1195 struct qed_spq_entry *p_ent = NULL;
1196 struct qed_sp_init_data init_data;
1197 u8 abs_vport_id = 0;
1198 @@ -1219,26 +1219,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1199 /* explicitly clear out the entire vector */
1200 memset(&p_ramrod->approx_mcast.bins, 0,
1201 sizeof(p_ramrod->approx_mcast.bins));
1202 - memset(bins, 0, sizeof(unsigned long) *
1203 - ETH_MULTICAST_MAC_BINS_IN_REGS);
1204 + memset(bins, 0, sizeof(bins));
1205 /* filter ADD op is explicit set op and it removes
1206 * any existing filters for the vport
1207 */
1208 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1209 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1210 - u32 bit;
1211 + u32 bit, nbits;
1212
1213 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1214 - __set_bit(bit, bins);
1215 + nbits = sizeof(u32) * BITS_PER_BYTE;
1216 + bins[bit / nbits] |= 1 << (bit % nbits);
1217 }
1218
1219 /* Convert to correct endianity */
1220 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1221 struct vport_update_ramrod_mcast *p_ramrod_bins;
1222 - u32 *p_bins = (u32 *)bins;
1223
1224 p_ramrod_bins = &p_ramrod->approx_mcast;
1225 - p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1226 + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1227 }
1228 }
1229
1230 diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
1231 index e495d62fcc03..14d00173cad0 100644
1232 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
1233 +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
1234 @@ -156,7 +156,7 @@ struct qed_sp_vport_update_params {
1235 u8 anti_spoofing_en;
1236 u8 update_accept_any_vlan_flg;
1237 u8 accept_any_vlan;
1238 - unsigned long bins[8];
1239 + u32 bins[8];
1240 struct qed_rss_params *rss_params;
1241 struct qed_filter_accept_flags accept_flags;
1242 struct qed_sge_tpa_params *sge_tpa_params;
1243 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
1244 index 8b7d2f963ee1..eaa242df4131 100644
1245 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
1246 +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
1247 @@ -613,6 +613,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1248 break;
1249 default:
1250 p_link->speed = 0;
1251 + p_link->link_up = 0;
1252 }
1253
1254 if (p_link->link_up && p_link->speed)
1255 diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1256 index 48bc5c151336..6379bfedc9f0 100644
1257 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1258 +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
1259 @@ -2157,7 +2157,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
1260
1261 p_data->update_approx_mcast_flg = 1;
1262 memcpy(p_data->bins, p_mcast_tlv->bins,
1263 - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1264 + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1265 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
1266 }
1267
1268 diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1269 index 0645124a887b..faf8215872de 100644
1270 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
1271 +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
1272 @@ -786,7 +786,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1273 resp_size += sizeof(struct pfvf_def_resp_tlv);
1274
1275 memcpy(p_mcast_tlv->bins, p_params->bins,
1276 - sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1277 + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1278 }
1279
1280 update_rx = p_params->accept_flags.update_rx_mode_config;
1281 @@ -972,7 +972,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1282 u32 bit;
1283
1284 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1285 - __set_bit(bit, sp_params.bins);
1286 + sp_params.bins[bit / 32] |= 1 << (bit % 32);
1287 }
1288 }
1289
1290 diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
1291 index 35db7a28aa13..b962ef8e98ef 100644
1292 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
1293 +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
1294 @@ -336,7 +336,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
1295 struct channel_tlv tl;
1296 u8 padding[4];
1297
1298 - u64 bins[8];
1299 + /* There are only 256 approx bins, and in HSI they're divided into
1300 + * 32-bit values. As old VFs used to set-bit to the values on its side,
1301 + * the upper half of the array is never expected to contain any data.
1302 + */
1303 + u64 bins[4];
1304 + u64 obsolete_bins[4];
1305 };
1306
1307 struct vfpf_vport_update_accept_param_tlv {
1308 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
1309 index 63307ea97846..9beea13e2e1f 100644
1310 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
1311 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
1312 @@ -217,6 +217,7 @@ issue:
1313 ret = of_mdiobus_register(bus, np1);
1314 if (ret) {
1315 mdiobus_free(bus);
1316 + lp->mii_bus = NULL;
1317 return ret;
1318 }
1319 return 0;
1320 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1321 index 31a6d87b61b2..0d4440f28f6b 100644
1322 --- a/drivers/net/usb/qmi_wwan.c
1323 +++ b/drivers/net/usb/qmi_wwan.c
1324 @@ -946,7 +946,7 @@ static const struct usb_device_id products[] = {
1325 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1326 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1327 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1328 - {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
1329 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1330 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1331 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1332 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
1333 diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
1334 index 299140c04556..04b60ed59ea0 100644
1335 --- a/drivers/net/wan/lmc/lmc_main.c
1336 +++ b/drivers/net/wan/lmc/lmc_main.c
1337 @@ -1372,7 +1372,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1338 case 0x001:
1339 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1340 break;
1341 - case 0x010:
1342 + case 0x002:
1343 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1344 break;
1345 default:
1346 diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
1347 index cb987c2ecc6b..87131f663292 100644
1348 --- a/drivers/net/wireless/broadcom/b43/leds.c
1349 +++ b/drivers/net/wireless/broadcom/b43/leds.c
1350 @@ -131,7 +131,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
1351 led->wl = dev->wl;
1352 led->index = led_index;
1353 led->activelow = activelow;
1354 - strncpy(led->name, name, sizeof(led->name));
1355 + strlcpy(led->name, name, sizeof(led->name));
1356 atomic_set(&led->state, 0);
1357
1358 led->led_dev.name = led->name;
1359 diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
1360 index fd4565389c77..bc922118b6ac 100644
1361 --- a/drivers/net/wireless/broadcom/b43legacy/leds.c
1362 +++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
1363 @@ -101,7 +101,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
1364 led->dev = dev;
1365 led->index = led_index;
1366 led->activelow = activelow;
1367 - strncpy(led->name, name, sizeof(led->name));
1368 + strlcpy(led->name, name, sizeof(led->name));
1369
1370 led->led_dev.name = led->name;
1371 led->led_dev.default_trigger = default_trigger;
1372 diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
1373 index a4e9f430d452..e2cca91fd266 100644
1374 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
1375 +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
1376 @@ -433,7 +433,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
1377 const char *name;
1378 int i, ret;
1379
1380 - if (group > info->ngroups)
1381 + if (group >= info->ngroups)
1382 return;
1383
1384 seq_puts(s, "\n");
1385 diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
1386 index edb36bf781b0..f627b39f64bf 100644
1387 --- a/drivers/power/supply/generic-adc-battery.c
1388 +++ b/drivers/power/supply/generic-adc-battery.c
1389 @@ -243,10 +243,10 @@ static int gab_probe(struct platform_device *pdev)
1390 struct power_supply_desc *psy_desc;
1391 struct power_supply_config psy_cfg = {};
1392 struct gab_platform_data *pdata = pdev->dev.platform_data;
1393 - enum power_supply_property *properties;
1394 int ret = 0;
1395 int chan;
1396 - int index = 0;
1397 + int index = ARRAY_SIZE(gab_props);
1398 + bool any = false;
1399
1400 adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
1401 if (!adc_bat) {
1402 @@ -280,8 +280,6 @@ static int gab_probe(struct platform_device *pdev)
1403 }
1404
1405 memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
1406 - properties = (enum power_supply_property *)
1407 - ((char *)psy_desc->properties + sizeof(gab_props));
1408
1409 /*
1410 * getting channel from iio and copying the battery properties
1411 @@ -295,15 +293,22 @@ static int gab_probe(struct platform_device *pdev)
1412 adc_bat->channel[chan] = NULL;
1413 } else {
1414 /* copying properties for supported channels only */
1415 - memcpy(properties + sizeof(*(psy_desc->properties)) * index,
1416 - &gab_dyn_props[chan],
1417 - sizeof(gab_dyn_props[chan]));
1418 - index++;
1419 + int index2;
1420 +
1421 + for (index2 = 0; index2 < index; index2++) {
1422 + if (psy_desc->properties[index2] ==
1423 + gab_dyn_props[chan])
1424 + break; /* already known */
1425 + }
1426 + if (index2 == index) /* really new */
1427 + psy_desc->properties[index++] =
1428 + gab_dyn_props[chan];
1429 + any = true;
1430 }
1431 }
1432
1433 /* none of the channels are supported so let's bail out */
1434 - if (index == 0) {
1435 + if (!any) {
1436 ret = -ENODEV;
1437 goto second_mem_fail;
1438 }
1439 @@ -314,7 +319,7 @@ static int gab_probe(struct platform_device *pdev)
1440 * as come channels may be not be supported by the device.So
1441 * we need to take care of that.
1442 */
1443 - psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
1444 + psy_desc->num_properties = index;
1445
1446 adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
1447 if (IS_ERR(adc_bat->psy)) {
1448 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
1449 index 66e9bb053629..18ab84e9c6b2 100644
1450 --- a/drivers/s390/cio/qdio_main.c
1451 +++ b/drivers/s390/cio/qdio_main.c
1452 @@ -640,21 +640,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
1453 unsigned long phys_aob = 0;
1454
1455 if (!q->use_cq)
1456 - goto out;
1457 + return 0;
1458
1459 if (!q->aobs[bufnr]) {
1460 struct qaob *aob = qdio_allocate_aob();
1461 q->aobs[bufnr] = aob;
1462 }
1463 if (q->aobs[bufnr]) {
1464 - q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
1465 q->sbal_state[bufnr].aob = q->aobs[bufnr];
1466 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
1467 phys_aob = virt_to_phys(q->aobs[bufnr]);
1468 WARN_ON_ONCE(phys_aob & 0xFF);
1469 }
1470
1471 -out:
1472 + q->sbal_state[bufnr].flags = 0;
1473 return phys_aob;
1474 }
1475
1476 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
1477 index dcf36537a767..cc3994d4e7bc 100644
1478 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
1479 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
1480 @@ -755,9 +755,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
1481 case ELS_LOGO:
1482 if (fip->mode == FIP_MODE_VN2VN) {
1483 if (fip->state != FIP_ST_VNMP_UP)
1484 - return -EINVAL;
1485 + goto drop;
1486 if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
1487 - return -EINVAL;
1488 + goto drop;
1489 } else {
1490 if (fip->state != FIP_ST_ENABLED)
1491 return 0;
1492 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
1493 index 97aeaddd600d..e3ffd244603e 100644
1494 --- a/drivers/scsi/libfc/fc_rport.c
1495 +++ b/drivers/scsi/libfc/fc_rport.c
1496 @@ -1935,6 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1497 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1498 fc_rport_state(rdata));
1499
1500 + rdata->flags &= ~FC_RP_STARTED;
1501 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
1502 mutex_unlock(&rdata->rp_mutex);
1503 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1504 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1505 index c2b682916337..cc8f2a7c2463 100644
1506 --- a/drivers/scsi/libiscsi.c
1507 +++ b/drivers/scsi/libiscsi.c
1508 @@ -283,11 +283,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
1509 */
1510 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
1511 iscsi_conn_printk(KERN_INFO, conn,
1512 - "task [op %x/%x itt "
1513 + "task [op %x itt "
1514 "0x%x/0x%x] "
1515 "rejected.\n",
1516 - task->hdr->opcode, opcode,
1517 - task->itt, task->hdr_itt);
1518 + opcode, task->itt,
1519 + task->hdr_itt);
1520 return -EACCES;
1521 }
1522 /*
1523 @@ -296,10 +296,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
1524 */
1525 if (conn->session->fast_abort) {
1526 iscsi_conn_printk(KERN_INFO, conn,
1527 - "task [op %x/%x itt "
1528 + "task [op %x itt "
1529 "0x%x/0x%x] fast abort.\n",
1530 - task->hdr->opcode, opcode,
1531 - task->itt, task->hdr_itt);
1532 + opcode, task->itt,
1533 + task->hdr_itt);
1534 return -EACCES;
1535 }
1536 break;
1537 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1538 index 3a6f557ec128..56b65b85b121 100644
1539 --- a/drivers/scsi/scsi_sysfs.c
1540 +++ b/drivers/scsi/scsi_sysfs.c
1541 @@ -709,8 +709,24 @@ static ssize_t
1542 sdev_store_delete(struct device *dev, struct device_attribute *attr,
1543 const char *buf, size_t count)
1544 {
1545 - if (device_remove_file_self(dev, attr))
1546 - scsi_remove_device(to_scsi_device(dev));
1547 + struct kernfs_node *kn;
1548 +
1549 + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
1550 + WARN_ON_ONCE(!kn);
1551 + /*
1552 + * Concurrent writes into the "delete" sysfs attribute may trigger
1553 + * concurrent calls to device_remove_file() and scsi_remove_device().
1554 + * device_remove_file() handles concurrent removal calls by
1555 + * serializing these and by ignoring the second and later removal
1556 + * attempts. Concurrent calls of scsi_remove_device() are
1557 + * serialized. The second and later calls of scsi_remove_device() are
1558 + * ignored because the first call of that function changes the device
1559 + * state into SDEV_DEL.
1560 + */
1561 + device_remove_file(dev, attr);
1562 + scsi_remove_device(to_scsi_device(dev));
1563 + if (kn)
1564 + sysfs_unbreak_active_protection(kn);
1565 return count;
1566 };
1567 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
1568 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
1569 index 15ca09cd16f3..874e9f085326 100644
1570 --- a/drivers/scsi/vmw_pvscsi.c
1571 +++ b/drivers/scsi/vmw_pvscsi.c
1572 @@ -564,9 +564,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
1573 (btstat == BTSTAT_SUCCESS ||
1574 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
1575 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
1576 - cmd->result = (DID_OK << 16) | sdstat;
1577 - if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
1578 - cmd->result |= (DRIVER_SENSE << 24);
1579 + if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
1580 + cmd->result = (DID_RESET << 16);
1581 + } else {
1582 + cmd->result = (DID_OK << 16) | sdstat;
1583 + if (sdstat == SAM_STAT_CHECK_CONDITION &&
1584 + cmd->sense_buffer)
1585 + cmd->result |= (DRIVER_SENSE << 24);
1586 + }
1587 } else
1588 switch (btstat) {
1589 case BTSTAT_SUCCESS:
1590 diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
1591 index 2b700e8455c6..e3596855a703 100644
1592 --- a/drivers/staging/android/ion/ion-ioctl.c
1593 +++ b/drivers/staging/android/ion/ion-ioctl.c
1594 @@ -128,11 +128,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1595 {
1596 struct ion_handle *handle;
1597
1598 - handle = ion_handle_get_by_id(client, data.handle.handle);
1599 - if (IS_ERR(handle))
1600 + mutex_lock(&client->lock);
1601 + handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1602 + if (IS_ERR(handle)) {
1603 + mutex_unlock(&client->lock);
1604 return PTR_ERR(handle);
1605 - data.fd.fd = ion_share_dma_buf_fd(client, handle);
1606 - ion_handle_put(handle);
1607 + }
1608 + data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
1609 + ion_handle_put_nolock(handle);
1610 + mutex_unlock(&client->lock);
1611 if (data.fd.fd < 0)
1612 ret = data.fd.fd;
1613 break;
1614 diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
1615 index 6f9974cb0e15..806e9b30b9dc 100644
1616 --- a/drivers/staging/android/ion/ion.c
1617 +++ b/drivers/staging/android/ion/ion.c
1618 @@ -15,6 +15,7 @@
1619 *
1620 */
1621
1622 +#include <linux/atomic.h>
1623 #include <linux/device.h>
1624 #include <linux/err.h>
1625 #include <linux/file.h>
1626 @@ -305,6 +306,16 @@ static void ion_handle_get(struct ion_handle *handle)
1627 kref_get(&handle->ref);
1628 }
1629
1630 +/* Must hold the client lock */
1631 +static struct ion_handle *ion_handle_get_check_overflow(
1632 + struct ion_handle *handle)
1633 +{
1634 + if (atomic_read(&handle->ref.refcount) + 1 == 0)
1635 + return ERR_PTR(-EOVERFLOW);
1636 + ion_handle_get(handle);
1637 + return handle;
1638 +}
1639 +
1640 int ion_handle_put_nolock(struct ion_handle *handle)
1641 {
1642 return kref_put(&handle->ref, ion_handle_destroy);
1643 @@ -347,21 +358,9 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
1644
1645 handle = idr_find(&client->idr, id);
1646 if (handle)
1647 - ion_handle_get(handle);
1648 -
1649 - return handle ? handle : ERR_PTR(-EINVAL);
1650 -}
1651 -
1652 -struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
1653 - int id)
1654 -{
1655 - struct ion_handle *handle;
1656 + return ion_handle_get_check_overflow(handle);
1657
1658 - mutex_lock(&client->lock);
1659 - handle = ion_handle_get_by_id_nolock(client, id);
1660 - mutex_unlock(&client->lock);
1661 -
1662 - return handle;
1663 + return ERR_PTR(-EINVAL);
1664 }
1665
1666 static bool ion_handle_validate(struct ion_client *client,
1667 @@ -1029,24 +1028,28 @@ static struct dma_buf_ops dma_buf_ops = {
1668 .kunmap = ion_dma_buf_kunmap,
1669 };
1670
1671 -struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1672 - struct ion_handle *handle)
1673 +static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
1674 + struct ion_handle *handle,
1675 + bool lock_client)
1676 {
1677 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1678 struct ion_buffer *buffer;
1679 struct dma_buf *dmabuf;
1680 bool valid_handle;
1681
1682 - mutex_lock(&client->lock);
1683 + if (lock_client)
1684 + mutex_lock(&client->lock);
1685 valid_handle = ion_handle_validate(client, handle);
1686 if (!valid_handle) {
1687 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1688 - mutex_unlock(&client->lock);
1689 + if (lock_client)
1690 + mutex_unlock(&client->lock);
1691 return ERR_PTR(-EINVAL);
1692 }
1693 buffer = handle->buffer;
1694 ion_buffer_get(buffer);
1695 - mutex_unlock(&client->lock);
1696 + if (lock_client)
1697 + mutex_unlock(&client->lock);
1698
1699 exp_info.ops = &dma_buf_ops;
1700 exp_info.size = buffer->size;
1701 @@ -1061,14 +1064,21 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1702
1703 return dmabuf;
1704 }
1705 +
1706 +struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1707 + struct ion_handle *handle)
1708 +{
1709 + return __ion_share_dma_buf(client, handle, true);
1710 +}
1711 EXPORT_SYMBOL(ion_share_dma_buf);
1712
1713 -int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1714 +static int __ion_share_dma_buf_fd(struct ion_client *client,
1715 + struct ion_handle *handle, bool lock_client)
1716 {
1717 struct dma_buf *dmabuf;
1718 int fd;
1719
1720 - dmabuf = ion_share_dma_buf(client, handle);
1721 + dmabuf = __ion_share_dma_buf(client, handle, lock_client);
1722 if (IS_ERR(dmabuf))
1723 return PTR_ERR(dmabuf);
1724
1725 @@ -1078,8 +1088,19 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1726
1727 return fd;
1728 }
1729 +
1730 +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1731 +{
1732 + return __ion_share_dma_buf_fd(client, handle, true);
1733 +}
1734 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1735
1736 +int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1737 + struct ion_handle *handle)
1738 +{
1739 + return __ion_share_dma_buf_fd(client, handle, false);
1740 +}
1741 +
1742 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1743 struct dma_buf *dmabuf)
1744 {
1745 @@ -1100,7 +1121,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1746 /* if a handle exists for this buffer just take a reference to it */
1747 handle = ion_handle_lookup(client, buffer);
1748 if (!IS_ERR(handle)) {
1749 - ion_handle_get(handle);
1750 + handle = ion_handle_get_check_overflow(handle);
1751 mutex_unlock(&client->lock);
1752 goto end;
1753 }
1754 diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
1755 index 3c3b3245275d..760e41885448 100644
1756 --- a/drivers/staging/android/ion/ion_priv.h
1757 +++ b/drivers/staging/android/ion/ion_priv.h
1758 @@ -463,11 +463,11 @@ void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
1759
1760 int ion_handle_put_nolock(struct ion_handle *handle);
1761
1762 -struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
1763 - int id);
1764 -
1765 int ion_handle_put(struct ion_handle *handle);
1766
1767 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
1768
1769 +int ion_share_dma_buf_fd_nolock(struct ion_client *client,
1770 + struct ion_handle *handle);
1771 +
1772 #endif /* _ION_PRIV_H */
1773 diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
1774 index c16927ac8eb0..395c7a2244ff 100644
1775 --- a/drivers/staging/media/omap4iss/iss_video.c
1776 +++ b/drivers/staging/media/omap4iss/iss_video.c
1777 @@ -11,7 +11,6 @@
1778 * (at your option) any later version.
1779 */
1780
1781 -#include <asm/cacheflush.h>
1782 #include <linux/clk.h>
1783 #include <linux/mm.h>
1784 #include <linux/pagemap.h>
1785 @@ -24,6 +23,8 @@
1786 #include <media/v4l2-ioctl.h>
1787 #include <media/v4l2-mc.h>
1788
1789 +#include <asm/cacheflush.h>
1790 +
1791 #include "iss_video.h"
1792 #include "iss.h"
1793
1794 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1795 index 9ccd5da8f204..d2f82aaf6a85 100644
1796 --- a/drivers/target/iscsi/iscsi_target_login.c
1797 +++ b/drivers/target/iscsi/iscsi_target_login.c
1798 @@ -333,8 +333,7 @@ static int iscsi_login_zero_tsih_s1(
1799 pr_err("idr_alloc() for sess_idr failed\n");
1800 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1801 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1802 - kfree(sess);
1803 - return -ENOMEM;
1804 + goto free_sess;
1805 }
1806
1807 sess->creation_time = get_jiffies_64();
1808 @@ -350,20 +349,28 @@ static int iscsi_login_zero_tsih_s1(
1809 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1810 pr_err("Unable to allocate memory for"
1811 " struct iscsi_sess_ops.\n");
1812 - kfree(sess);
1813 - return -ENOMEM;
1814 + goto remove_idr;
1815 }
1816
1817 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
1818 if (IS_ERR(sess->se_sess)) {
1819 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1820 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1821 - kfree(sess->sess_ops);
1822 - kfree(sess);
1823 - return -ENOMEM;
1824 + goto free_ops;
1825 }
1826
1827 return 0;
1828 +
1829 +free_ops:
1830 + kfree(sess->sess_ops);
1831 +remove_idr:
1832 + spin_lock_bh(&sess_idr_lock);
1833 + idr_remove(&sess_idr, sess->session_index);
1834 + spin_unlock_bh(&sess_idr_lock);
1835 +free_sess:
1836 + kfree(sess);
1837 + conn->sess = NULL;
1838 + return -ENOMEM;
1839 }
1840
1841 static int iscsi_login_zero_tsih_s2(
1842 @@ -1152,13 +1159,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1843 ISCSI_LOGIN_STATUS_INIT_ERR);
1844 if (!zero_tsih || !conn->sess)
1845 goto old_sess_out;
1846 - if (conn->sess->se_sess)
1847 - transport_free_session(conn->sess->se_sess);
1848 - if (conn->sess->session_index != 0) {
1849 - spin_lock_bh(&sess_idr_lock);
1850 - idr_remove(&sess_idr, conn->sess->session_index);
1851 - spin_unlock_bh(&sess_idr_lock);
1852 - }
1853 +
1854 + transport_free_session(conn->sess->se_sess);
1855 +
1856 + spin_lock_bh(&sess_idr_lock);
1857 + idr_remove(&sess_idr, conn->sess->session_index);
1858 + spin_unlock_bh(&sess_idr_lock);
1859 +
1860 kfree(conn->sess->sess_ops);
1861 kfree(conn->sess);
1862 conn->sess = NULL;
1863 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
1864 index 5474b5187be0..f4bd08cfac11 100644
1865 --- a/drivers/usb/gadget/function/f_uac2.c
1866 +++ b/drivers/usb/gadget/function/f_uac2.c
1867 @@ -929,14 +929,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
1868 };
1869
1870 struct cntrl_cur_lay3 {
1871 - __u32 dCUR;
1872 + __le32 dCUR;
1873 };
1874
1875 struct cntrl_range_lay3 {
1876 - __u16 wNumSubRanges;
1877 - __u32 dMIN;
1878 - __u32 dMAX;
1879 - __u32 dRES;
1880 + __le16 wNumSubRanges;
1881 + __le32 dMIN;
1882 + __le32 dMAX;
1883 + __le32 dRES;
1884 } __packed;
1885
1886 static inline void
1887 @@ -1285,9 +1285,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
1888 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
1889
1890 if (entity_id == USB_IN_CLK_ID)
1891 - c.dCUR = p_srate;
1892 + c.dCUR = cpu_to_le32(p_srate);
1893 else if (entity_id == USB_OUT_CLK_ID)
1894 - c.dCUR = c_srate;
1895 + c.dCUR = cpu_to_le32(c_srate);
1896
1897 value = min_t(unsigned, w_length, sizeof c);
1898 memcpy(req->buf, &c, value);
1899 @@ -1325,15 +1325,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
1900
1901 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
1902 if (entity_id == USB_IN_CLK_ID)
1903 - r.dMIN = p_srate;
1904 + r.dMIN = cpu_to_le32(p_srate);
1905 else if (entity_id == USB_OUT_CLK_ID)
1906 - r.dMIN = c_srate;
1907 + r.dMIN = cpu_to_le32(c_srate);
1908 else
1909 return -EOPNOTSUPP;
1910
1911 r.dMAX = r.dMIN;
1912 r.dRES = 0;
1913 - r.wNumSubRanges = 1;
1914 + r.wNumSubRanges = cpu_to_le16(1);
1915
1916 value = min_t(unsigned, w_length, sizeof r);
1917 memcpy(req->buf, &r, value);
1918 diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
1919 index f2c8862093a2..230e3248f386 100644
1920 --- a/drivers/usb/gadget/udc/r8a66597-udc.c
1921 +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
1922 @@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597)
1923
1924 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
1925
1926 - msleep(3);
1927 + mdelay(3);
1928
1929 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
1930
1931 - msleep(1);
1932 + mdelay(1);
1933
1934 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
1935
1936 @@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock)
1937 r8a66597->ep0_req->length = 2;
1938 /* AV: what happens if we get called again before that gets through? */
1939 spin_unlock(&r8a66597->lock);
1940 - r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
1941 + r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1942 spin_lock(&r8a66597->lock);
1943 }
1944
1945 diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
1946 index 94eb2923afed..85d031ce85c1 100644
1947 --- a/drivers/usb/phy/phy-fsl-usb.c
1948 +++ b/drivers/usb/phy/phy-fsl-usb.c
1949 @@ -879,6 +879,7 @@ int usb_otg_start(struct platform_device *pdev)
1950 if (pdata->init && pdata->init(pdev) != 0)
1951 return -EINVAL;
1952
1953 +#ifdef CONFIG_PPC32
1954 if (pdata->big_endian_mmio) {
1955 _fsl_readl = _fsl_readl_be;
1956 _fsl_writel = _fsl_writel_be;
1957 @@ -886,6 +887,7 @@ int usb_otg_start(struct platform_device *pdev)
1958 _fsl_readl = _fsl_readl_le;
1959 _fsl_writel = _fsl_writel_le;
1960 }
1961 +#endif
1962
1963 /* request irq */
1964 p_otg->irq = platform_get_irq(pdev, 0);
1965 @@ -976,7 +978,7 @@ int usb_otg_start(struct platform_device *pdev)
1966 /*
1967 * state file in sysfs
1968 */
1969 -static int show_fsl_usb2_otg_state(struct device *dev,
1970 +static ssize_t show_fsl_usb2_otg_state(struct device *dev,
1971 struct device_attribute *attr, char *buf)
1972 {
1973 struct otg_fsm *fsm = &fsl_otg_dev->fsm;
1974 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
1975 index 41df8a27d7eb..2026885702a2 100644
1976 --- a/fs/cachefiles/namei.c
1977 +++ b/fs/cachefiles/namei.c
1978 @@ -195,7 +195,6 @@ wait_for_old_object:
1979 pr_err("\n");
1980 pr_err("Error: Unexpected object collision\n");
1981 cachefiles_printk_object(object, xobject);
1982 - BUG();
1983 }
1984 atomic_inc(&xobject->usage);
1985 write_unlock(&cache->active_lock);
1986 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
1987 index afbdc418966d..5e3bc9de7a16 100644
1988 --- a/fs/cachefiles/rdwr.c
1989 +++ b/fs/cachefiles/rdwr.c
1990 @@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
1991 struct cachefiles_one_read *monitor =
1992 container_of(wait, struct cachefiles_one_read, monitor);
1993 struct cachefiles_object *object;
1994 + struct fscache_retrieval *op = monitor->op;
1995 struct wait_bit_key *key = _key;
1996 struct page *page = wait->private;
1997
1998 @@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
1999 list_del(&wait->task_list);
2000
2001 /* move onto the action list and queue for FS-Cache thread pool */
2002 - ASSERT(monitor->op);
2003 + ASSERT(op);
2004
2005 - object = container_of(monitor->op->op.object,
2006 - struct cachefiles_object, fscache);
2007 + /* We need to temporarily bump the usage count as we don't own a ref
2008 + * here otherwise cachefiles_read_copier() may free the op between the
2009 + * monitor being enqueued on the op->to_do list and the op getting
2010 + * enqueued on the work queue.
2011 + */
2012 + fscache_get_retrieval(op);
2013
2014 + object = container_of(op->op.object, struct cachefiles_object, fscache);
2015 spin_lock(&object->work_lock);
2016 - list_add_tail(&monitor->op_link, &monitor->op->to_do);
2017 + list_add_tail(&monitor->op_link, &op->to_do);
2018 spin_unlock(&object->work_lock);
2019
2020 - fscache_enqueue_retrieval(monitor->op);
2021 + fscache_enqueue_retrieval(op);
2022 + fscache_put_retrieval(op);
2023 return 0;
2024 }
2025
2026 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
2027 index 3d03e48a9213..ad8bd96093f7 100644
2028 --- a/fs/cifs/cifs_debug.c
2029 +++ b/fs/cifs/cifs_debug.c
2030 @@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
2031 seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
2032 seq_printf(m, "Features:");
2033 #ifdef CONFIG_CIFS_DFS_UPCALL
2034 - seq_printf(m, " dfs");
2035 + seq_printf(m, " DFS");
2036 #endif
2037 #ifdef CONFIG_CIFS_FSCACHE
2038 - seq_printf(m, " fscache");
2039 + seq_printf(m, ",FSCACHE");
2040 +#endif
2041 +#ifdef CONFIG_CIFS_SMB_DIRECT
2042 + seq_printf(m, ",SMB_DIRECT");
2043 +#endif
2044 +#ifdef CONFIG_CIFS_STATS2
2045 + seq_printf(m, ",STATS2");
2046 +#elif defined(CONFIG_CIFS_STATS)
2047 + seq_printf(m, ",STATS");
2048 +#endif
2049 +#ifdef CONFIG_CIFS_DEBUG2
2050 + seq_printf(m, ",DEBUG2");
2051 +#elif defined(CONFIG_CIFS_DEBUG)
2052 + seq_printf(m, ",DEBUG");
2053 +#endif
2054 +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2055 + seq_printf(m, ",ALLOW_INSECURE_LEGACY");
2056 #endif
2057 #ifdef CONFIG_CIFS_WEAK_PW_HASH
2058 - seq_printf(m, " lanman");
2059 + seq_printf(m, ",WEAK_PW_HASH");
2060 #endif
2061 #ifdef CONFIG_CIFS_POSIX
2062 - seq_printf(m, " posix");
2063 + seq_printf(m, ",CIFS_POSIX");
2064 #endif
2065 #ifdef CONFIG_CIFS_UPCALL
2066 - seq_printf(m, " spnego");
2067 + seq_printf(m, ",UPCALL(SPNEGO)");
2068 #endif
2069 #ifdef CONFIG_CIFS_XATTR
2070 - seq_printf(m, " xattr");
2071 + seq_printf(m, ",XATTR");
2072 #endif
2073 #ifdef CONFIG_CIFS_ACL
2074 - seq_printf(m, " acl");
2075 + seq_printf(m, ",ACL");
2076 #endif
2077 seq_putc(m, '\n');
2078 seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
2079 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2080 index 24c19eb94fa3..a012f70bba5c 100644
2081 --- a/fs/cifs/inode.c
2082 +++ b/fs/cifs/inode.c
2083 @@ -1116,6 +1116,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
2084 if (!server->ops->set_file_info)
2085 return -ENOSYS;
2086
2087 + info_buf.Pad = 0;
2088 +
2089 if (attrs->ia_valid & ATTR_ATIME) {
2090 set_time = true;
2091 info_buf.LastAccessTime =
2092 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
2093 index d031af8d3d4d..38d26cbcad07 100644
2094 --- a/fs/cifs/link.c
2095 +++ b/fs/cifs/link.c
2096 @@ -419,7 +419,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
2097 struct cifs_io_parms io_parms;
2098 int buf_type = CIFS_NO_BUFFER;
2099 __le16 *utf16_path;
2100 - __u8 oplock = SMB2_OPLOCK_LEVEL_II;
2101 + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2102 struct smb2_file_all_info *pfile_info = NULL;
2103
2104 oparms.tcon = tcon;
2105 @@ -481,7 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
2106 struct cifs_io_parms io_parms;
2107 int create_options = CREATE_NOT_DIR;
2108 __le16 *utf16_path;
2109 - __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
2110 + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2111 struct kvec iov[2];
2112
2113 if (backup_cred(cifs_sb))
2114 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
2115 index c3db2a882aee..bb208076cb71 100644
2116 --- a/fs/cifs/sess.c
2117 +++ b/fs/cifs/sess.c
2118 @@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
2119 goto setup_ntlmv2_ret;
2120 }
2121 *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
2122 + if (!*pbuffer) {
2123 + rc = -ENOMEM;
2124 + cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
2125 + *buflen = 0;
2126 + goto setup_ntlmv2_ret;
2127 + }
2128 sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
2129
2130 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
2131 diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
2132 index 1238cd3552f9..0267d8cbc996 100644
2133 --- a/fs/cifs/smb2inode.c
2134 +++ b/fs/cifs/smb2inode.c
2135 @@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
2136 int rc;
2137
2138 if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
2139 - (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
2140 + (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
2141 (buf->Attributes == 0))
2142 return 0; /* would be a no op, no sense sending this */
2143
2144 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2145 index 812e4884c392..68622f1e706b 100644
2146 --- a/fs/cifs/smb2ops.c
2147 +++ b/fs/cifs/smb2ops.c
2148 @@ -894,6 +894,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2149
2150 }
2151
2152 +/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2153 +#define GMT_TOKEN_SIZE 50
2154 +
2155 +/*
2156 + * Input buffer contains (empty) struct smb_snapshot array with size filled in
2157 + * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2158 + */
2159 static int
2160 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2161 struct cifsFileInfo *cfile, void __user *ioc_buf)
2162 @@ -922,14 +929,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2163 kfree(retbuf);
2164 return rc;
2165 }
2166 - if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
2167 - rc = -ERANGE;
2168 - kfree(retbuf);
2169 - return rc;
2170 - }
2171
2172 - if (ret_data_len > snapshot_in.snapshot_array_size)
2173 - ret_data_len = snapshot_in.snapshot_array_size;
2174 + /*
2175 + * Check for min size, ie not large enough to fit even one GMT
2176 + * token (snapshot). On the first ioctl some users may pass in
2177 + * smaller size (or zero) to simply get the size of the array
2178 + * so the user space caller can allocate sufficient memory
2179 + * and retry the ioctl again with larger array size sufficient
2180 + * to hold all of the snapshot GMT tokens on the second try.
2181 + */
2182 + if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2183 + ret_data_len = sizeof(struct smb_snapshot_array);
2184 +
2185 + /*
2186 + * We return struct SRV_SNAPSHOT_ARRAY, followed by
2187 + * the snapshot array (of 50 byte GMT tokens) each
2188 + * representing an available previous version of the data
2189 + */
2190 + if (ret_data_len > (snapshot_in.snapshot_array_size +
2191 + sizeof(struct smb_snapshot_array)))
2192 + ret_data_len = snapshot_in.snapshot_array_size +
2193 + sizeof(struct smb_snapshot_array);
2194
2195 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2196 rc = -EFAULT;
2197 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2198 index 248c43b63f13..a225a21d04ad 100644
2199 --- a/fs/ext4/namei.c
2200 +++ b/fs/ext4/namei.c
2201 @@ -1415,6 +1415,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
2202 goto cleanup_and_exit;
2203 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
2204 "falling back\n"));
2205 + ret = NULL;
2206 }
2207 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
2208 if (!nblocks) {
2209 diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
2210 index 5dc655e410b4..54942d60e72a 100644
2211 --- a/fs/ext4/sysfs.c
2212 +++ b/fs/ext4/sysfs.c
2213 @@ -277,8 +277,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
2214 case attr_pointer_ui:
2215 if (!ptr)
2216 return 0;
2217 - return snprintf(buf, PAGE_SIZE, "%u\n",
2218 - *((unsigned int *) ptr));
2219 + if (a->attr_ptr == ptr_ext4_super_block_offset)
2220 + return snprintf(buf, PAGE_SIZE, "%u\n",
2221 + le32_to_cpup(ptr));
2222 + else
2223 + return snprintf(buf, PAGE_SIZE, "%u\n",
2224 + *((unsigned int *) ptr));
2225 case attr_pointer_atomic:
2226 if (!ptr)
2227 return 0;
2228 @@ -311,7 +315,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
2229 ret = kstrtoul(skip_spaces(buf), 0, &t);
2230 if (ret)
2231 return ret;
2232 - *((unsigned int *) ptr) = t;
2233 + if (a->attr_ptr == ptr_ext4_super_block_offset)
2234 + *((__le32 *) ptr) = cpu_to_le32(t);
2235 + else
2236 + *((unsigned int *) ptr) = t;
2237 return len;
2238 case attr_inode_readahead:
2239 return inode_readahead_blks_store(a, sbi, buf, len);
2240 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2241 index 3fadfabcac39..fdcbe0f2814f 100644
2242 --- a/fs/ext4/xattr.c
2243 +++ b/fs/ext4/xattr.c
2244 @@ -184,6 +184,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
2245 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
2246 if ((void *)next >= end)
2247 return -EFSCORRUPTED;
2248 + if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
2249 + return -EFSCORRUPTED;
2250 e = next;
2251 }
2252
2253 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
2254 index de67745e1cd7..77946d6f617d 100644
2255 --- a/fs/fscache/operation.c
2256 +++ b/fs/fscache/operation.c
2257 @@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
2258 ASSERT(op->processor != NULL);
2259 ASSERT(fscache_object_is_available(op->object));
2260 ASSERTCMP(atomic_read(&op->usage), >, 0);
2261 - ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
2262 + ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
2263 + op->state, ==, FSCACHE_OP_ST_CANCELLED);
2264
2265 fscache_stat(&fscache_n_op_enqueue);
2266 switch (op->flags & FSCACHE_OP_TYPE) {
2267 @@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op)
2268 struct fscache_cache *cache;
2269
2270 _enter("{OBJ%x OP%x,%d}",
2271 - op->object->debug_id, op->debug_id, atomic_read(&op->usage));
2272 + op->object ? op->object->debug_id : 0,
2273 + op->debug_id, atomic_read(&op->usage));
2274
2275 ASSERTCMP(atomic_read(&op->usage), >, 0);
2276
2277 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2278 index f11792672977..c94bab6103f5 100644
2279 --- a/fs/fuse/dev.c
2280 +++ b/fs/fuse/dev.c
2281 @@ -130,6 +130,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
2282 return !fc->initialized || (for_background && fc->blocked);
2283 }
2284
2285 +static void fuse_drop_waiting(struct fuse_conn *fc)
2286 +{
2287 + if (fc->connected) {
2288 + atomic_dec(&fc->num_waiting);
2289 + } else if (atomic_dec_and_test(&fc->num_waiting)) {
2290 + /* wake up aborters */
2291 + wake_up_all(&fc->blocked_waitq);
2292 + }
2293 +}
2294 +
2295 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
2296 bool for_background)
2297 {
2298 @@ -170,7 +180,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
2299 return req;
2300
2301 out:
2302 - atomic_dec(&fc->num_waiting);
2303 + fuse_drop_waiting(fc);
2304 return ERR_PTR(err);
2305 }
2306
2307 @@ -277,7 +287,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
2308
2309 if (test_bit(FR_WAITING, &req->flags)) {
2310 __clear_bit(FR_WAITING, &req->flags);
2311 - atomic_dec(&fc->num_waiting);
2312 + fuse_drop_waiting(fc);
2313 }
2314
2315 if (req->stolen_file)
2316 @@ -363,7 +373,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
2317 struct fuse_iqueue *fiq = &fc->iq;
2318
2319 if (test_and_set_bit(FR_FINISHED, &req->flags))
2320 - return;
2321 + goto put_request;
2322
2323 spin_lock(&fiq->waitq.lock);
2324 list_del_init(&req->intr_entry);
2325 @@ -393,6 +403,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
2326 wake_up(&req->waitq);
2327 if (req->end)
2328 req->end(fc, req);
2329 +put_request:
2330 fuse_put_request(fc, req);
2331 }
2332
2333 @@ -1935,11 +1946,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2334 if (!fud)
2335 return -EPERM;
2336
2337 + pipe_lock(pipe);
2338 +
2339 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
2340 - if (!bufs)
2341 + if (!bufs) {
2342 + pipe_unlock(pipe);
2343 return -ENOMEM;
2344 + }
2345
2346 - pipe_lock(pipe);
2347 nbuf = 0;
2348 rem = 0;
2349 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
2350 @@ -2094,6 +2108,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
2351 set_bit(FR_ABORTED, &req->flags);
2352 if (!test_bit(FR_LOCKED, &req->flags)) {
2353 set_bit(FR_PRIVATE, &req->flags);
2354 + __fuse_get_request(req);
2355 list_move(&req->list, &to_end1);
2356 }
2357 spin_unlock(&req->waitq.lock);
2358 @@ -2120,7 +2135,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
2359
2360 while (!list_empty(&to_end1)) {
2361 req = list_first_entry(&to_end1, struct fuse_req, list);
2362 - __fuse_get_request(req);
2363 list_del_init(&req->list);
2364 request_end(fc, req);
2365 }
2366 @@ -2131,6 +2145,11 @@ void fuse_abort_conn(struct fuse_conn *fc)
2367 }
2368 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2369
2370 +void fuse_wait_aborted(struct fuse_conn *fc)
2371 +{
2372 + wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
2373 +}
2374 +
2375 int fuse_dev_release(struct inode *inode, struct file *file)
2376 {
2377 struct fuse_dev *fud = fuse_get_dev(file);
2378 @@ -2138,9 +2157,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
2379 if (fud) {
2380 struct fuse_conn *fc = fud->fc;
2381 struct fuse_pqueue *fpq = &fud->pq;
2382 + LIST_HEAD(to_end);
2383
2384 + spin_lock(&fpq->lock);
2385 WARN_ON(!list_empty(&fpq->io));
2386 - end_requests(fc, &fpq->processing);
2387 + list_splice_init(&fpq->processing, &to_end);
2388 + spin_unlock(&fpq->lock);
2389 +
2390 + end_requests(fc, &to_end);
2391 +
2392 /* Are we the last open device? */
2393 if (atomic_dec_and_test(&fc->dev_count)) {
2394 WARN_ON(fc->iq.fasync != NULL);
2395 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2396 index cca8dd3bda09..60dd2bc10776 100644
2397 --- a/fs/fuse/dir.c
2398 +++ b/fs/fuse/dir.c
2399 @@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
2400 struct inode *inode;
2401 struct dentry *newent;
2402 bool outarg_valid = true;
2403 + bool locked;
2404
2405 - fuse_lock_inode(dir);
2406 + locked = fuse_lock_inode(dir);
2407 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
2408 &outarg, &inode);
2409 - fuse_unlock_inode(dir);
2410 + fuse_unlock_inode(dir, locked);
2411 if (err == -ENOENT) {
2412 outarg_valid = false;
2413 err = 0;
2414 @@ -1336,6 +1337,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
2415 struct fuse_conn *fc = get_fuse_conn(inode);
2416 struct fuse_req *req;
2417 u64 attr_version = 0;
2418 + bool locked;
2419
2420 if (is_bad_inode(inode))
2421 return -EIO;
2422 @@ -1363,9 +1365,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
2423 fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
2424 FUSE_READDIR);
2425 }
2426 - fuse_lock_inode(inode);
2427 + locked = fuse_lock_inode(inode);
2428 fuse_request_send(fc, req);
2429 - fuse_unlock_inode(inode);
2430 + fuse_unlock_inode(inode, locked);
2431 nbytes = req->out.args[0].size;
2432 err = req->out.h.error;
2433 fuse_put_request(fc, req);
2434 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
2435 index 996aa23c409e..4408abf6675b 100644
2436 --- a/fs/fuse/file.c
2437 +++ b/fs/fuse/file.c
2438 @@ -868,6 +868,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
2439 }
2440
2441 if (WARN_ON(req->num_pages >= req->max_pages)) {
2442 + unlock_page(page);
2443 fuse_put_request(fc, req);
2444 return -EIO;
2445 }
2446 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
2447 index 91307940c8ac..1c905c7666de 100644
2448 --- a/fs/fuse/fuse_i.h
2449 +++ b/fs/fuse/fuse_i.h
2450 @@ -854,6 +854,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
2451
2452 /* Abort all requests */
2453 void fuse_abort_conn(struct fuse_conn *fc);
2454 +void fuse_wait_aborted(struct fuse_conn *fc);
2455
2456 /**
2457 * Invalidate inode attributes
2458 @@ -967,8 +968,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
2459
2460 void fuse_set_initialized(struct fuse_conn *fc);
2461
2462 -void fuse_unlock_inode(struct inode *inode);
2463 -void fuse_lock_inode(struct inode *inode);
2464 +void fuse_unlock_inode(struct inode *inode, bool locked);
2465 +bool fuse_lock_inode(struct inode *inode);
2466
2467 int fuse_setxattr(struct inode *inode, const char *name, const void *value,
2468 size_t size, int flags);
2469 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
2470 index f95e1d49b048..7a9b1069d267 100644
2471 --- a/fs/fuse/inode.c
2472 +++ b/fs/fuse/inode.c
2473 @@ -356,15 +356,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
2474 return 0;
2475 }
2476
2477 -void fuse_lock_inode(struct inode *inode)
2478 +bool fuse_lock_inode(struct inode *inode)
2479 {
2480 - if (!get_fuse_conn(inode)->parallel_dirops)
2481 + bool locked = false;
2482 +
2483 + if (!get_fuse_conn(inode)->parallel_dirops) {
2484 mutex_lock(&get_fuse_inode(inode)->mutex);
2485 + locked = true;
2486 + }
2487 +
2488 + return locked;
2489 }
2490
2491 -void fuse_unlock_inode(struct inode *inode)
2492 +void fuse_unlock_inode(struct inode *inode, bool locked)
2493 {
2494 - if (!get_fuse_conn(inode)->parallel_dirops)
2495 + if (locked)
2496 mutex_unlock(&get_fuse_inode(inode)->mutex);
2497 }
2498
2499 @@ -396,9 +402,6 @@ static void fuse_put_super(struct super_block *sb)
2500 {
2501 struct fuse_conn *fc = get_fuse_conn_super(sb);
2502
2503 - fuse_send_destroy(fc);
2504 -
2505 - fuse_abort_conn(fc);
2506 mutex_lock(&fuse_mutex);
2507 list_del(&fc->entry);
2508 fuse_ctl_remove_conn(fc);
2509 @@ -1198,16 +1201,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
2510 return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
2511 }
2512
2513 -static void fuse_kill_sb_anon(struct super_block *sb)
2514 +static void fuse_sb_destroy(struct super_block *sb)
2515 {
2516 struct fuse_conn *fc = get_fuse_conn_super(sb);
2517
2518 if (fc) {
2519 + fuse_send_destroy(fc);
2520 +
2521 + fuse_abort_conn(fc);
2522 + fuse_wait_aborted(fc);
2523 +
2524 down_write(&fc->killsb);
2525 fc->sb = NULL;
2526 up_write(&fc->killsb);
2527 }
2528 +}
2529
2530 +static void fuse_kill_sb_anon(struct super_block *sb)
2531 +{
2532 + fuse_sb_destroy(sb);
2533 kill_anon_super(sb);
2534 }
2535
2536 @@ -1230,14 +1242,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
2537
2538 static void fuse_kill_sb_blk(struct super_block *sb)
2539 {
2540 - struct fuse_conn *fc = get_fuse_conn_super(sb);
2541 -
2542 - if (fc) {
2543 - down_write(&fc->killsb);
2544 - fc->sb = NULL;
2545 - up_write(&fc->killsb);
2546 - }
2547 -
2548 + fuse_sb_destroy(sb);
2549 kill_block_super(sb);
2550 }
2551
2552 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
2553 index fcff2e0487fe..f1c1430ae721 100644
2554 --- a/fs/squashfs/file.c
2555 +++ b/fs/squashfs/file.c
2556 @@ -374,13 +374,29 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
2557 return squashfs_block_size(size);
2558 }
2559
2560 +void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
2561 +{
2562 + int copied;
2563 + void *pageaddr;
2564 +
2565 + pageaddr = kmap_atomic(page);
2566 + copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
2567 + memset(pageaddr + copied, 0, PAGE_SIZE - copied);
2568 + kunmap_atomic(pageaddr);
2569 +
2570 + flush_dcache_page(page);
2571 + if (copied == avail)
2572 + SetPageUptodate(page);
2573 + else
2574 + SetPageError(page);
2575 +}
2576 +
2577 /* Copy data into page cache */
2578 void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
2579 int bytes, int offset)
2580 {
2581 struct inode *inode = page->mapping->host;
2582 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
2583 - void *pageaddr;
2584 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
2585 int start_index = page->index & ~mask, end_index = start_index | mask;
2586
2587 @@ -406,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
2588 if (PageUptodate(push_page))
2589 goto skip_page;
2590
2591 - pageaddr = kmap_atomic(push_page);
2592 - squashfs_copy_data(pageaddr, buffer, offset, avail);
2593 - memset(pageaddr + avail, 0, PAGE_SIZE - avail);
2594 - kunmap_atomic(pageaddr);
2595 - flush_dcache_page(push_page);
2596 - SetPageUptodate(push_page);
2597 + squashfs_fill_page(push_page, buffer, offset, avail);
2598 skip_page:
2599 unlock_page(push_page);
2600 if (i != page->index)
2601 @@ -420,10 +431,9 @@ skip_page:
2602 }
2603
2604 /* Read datablock stored packed inside a fragment (tail-end packed block) */
2605 -static int squashfs_readpage_fragment(struct page *page)
2606 +static int squashfs_readpage_fragment(struct page *page, int expected)
2607 {
2608 struct inode *inode = page->mapping->host;
2609 - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
2610 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
2611 squashfs_i(inode)->fragment_block,
2612 squashfs_i(inode)->fragment_size);
2613 @@ -434,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
2614 squashfs_i(inode)->fragment_block,
2615 squashfs_i(inode)->fragment_size);
2616 else
2617 - squashfs_copy_cache(page, buffer, i_size_read(inode) &
2618 - (msblk->block_size - 1),
2619 + squashfs_copy_cache(page, buffer, expected,
2620 squashfs_i(inode)->fragment_offset);
2621
2622 squashfs_cache_put(buffer);
2623 return res;
2624 }
2625
2626 -static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
2627 +static int squashfs_readpage_sparse(struct page *page, int expected)
2628 {
2629 - struct inode *inode = page->mapping->host;
2630 - struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
2631 - int bytes = index == file_end ?
2632 - (i_size_read(inode) & (msblk->block_size - 1)) :
2633 - msblk->block_size;
2634 -
2635 - squashfs_copy_cache(page, NULL, bytes, 0);
2636 + squashfs_copy_cache(page, NULL, expected, 0);
2637 return 0;
2638 }
2639
2640 @@ -460,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
2641 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
2642 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
2643 int file_end = i_size_read(inode) >> msblk->block_log;
2644 + int expected = index == file_end ?
2645 + (i_size_read(inode) & (msblk->block_size - 1)) :
2646 + msblk->block_size;
2647 int res;
2648 void *pageaddr;
2649
2650 @@ -478,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
2651 goto error_out;
2652
2653 if (bsize == 0)
2654 - res = squashfs_readpage_sparse(page, index, file_end);
2655 + res = squashfs_readpage_sparse(page, expected);
2656 else
2657 - res = squashfs_readpage_block(page, block, bsize);
2658 + res = squashfs_readpage_block(page, block, bsize, expected);
2659 } else
2660 - res = squashfs_readpage_fragment(page);
2661 + res = squashfs_readpage_fragment(page, expected);
2662
2663 if (!res)
2664 return 0;
2665 diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
2666 index f2310d2a2019..a9ba8d96776a 100644
2667 --- a/fs/squashfs/file_cache.c
2668 +++ b/fs/squashfs/file_cache.c
2669 @@ -20,7 +20,7 @@
2670 #include "squashfs.h"
2671
2672 /* Read separately compressed datablock and memcopy into page cache */
2673 -int squashfs_readpage_block(struct page *page, u64 block, int bsize)
2674 +int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
2675 {
2676 struct inode *i = page->mapping->host;
2677 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
2678 @@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
2679 ERROR("Unable to read page, block %llx, size %x\n", block,
2680 bsize);
2681 else
2682 - squashfs_copy_cache(page, buffer, buffer->length, 0);
2683 + squashfs_copy_cache(page, buffer, expected, 0);
2684
2685 squashfs_cache_put(buffer);
2686 return res;
2687 diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
2688 index cb485d8e0e91..80db1b86a27c 100644
2689 --- a/fs/squashfs/file_direct.c
2690 +++ b/fs/squashfs/file_direct.c
2691 @@ -21,10 +21,11 @@
2692 #include "page_actor.h"
2693
2694 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
2695 - int pages, struct page **page);
2696 + int pages, struct page **page, int bytes);
2697
2698 /* Read separately compressed datablock directly into page cache */
2699 -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
2700 +int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
2701 + int expected)
2702
2703 {
2704 struct inode *inode = target_page->mapping->host;
2705 @@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
2706 * using an intermediate buffer.
2707 */
2708 res = squashfs_read_cache(target_page, block, bsize, pages,
2709 - page);
2710 + page, expected);
2711 if (res < 0)
2712 goto mark_errored;
2713
2714 @@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
2715 if (res < 0)
2716 goto mark_errored;
2717
2718 + if (res != expected) {
2719 + res = -EIO;
2720 + goto mark_errored;
2721 + }
2722 +
2723 /* Last page may have trailing bytes not filled */
2724 bytes = res % PAGE_SIZE;
2725 if (bytes) {
2726 @@ -138,13 +144,12 @@ out:
2727
2728
2729 static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
2730 - int pages, struct page **page)
2731 + int pages, struct page **page, int bytes)
2732 {
2733 struct inode *i = target_page->mapping->host;
2734 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
2735 block, bsize);
2736 - int bytes = buffer->length, res = buffer->error, n, offset = 0;
2737 - void *pageaddr;
2738 + int res = buffer->error, n, offset = 0;
2739
2740 if (res) {
2741 ERROR("Unable to read page, block %llx, size %x\n", block,
2742 @@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
2743 if (page[n] == NULL)
2744 continue;
2745
2746 - pageaddr = kmap_atomic(page[n]);
2747 - squashfs_copy_data(pageaddr, buffer, offset, avail);
2748 - memset(pageaddr + avail, 0, PAGE_SIZE - avail);
2749 - kunmap_atomic(pageaddr);
2750 - flush_dcache_page(page[n]);
2751 - SetPageUptodate(page[n]);
2752 + squashfs_fill_page(page[n], buffer, offset, avail);
2753 unlock_page(page[n]);
2754 if (page[n] != target_page)
2755 put_page(page[n]);
2756 diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
2757 index 887d6d270080..f89f8a74c6ce 100644
2758 --- a/fs/squashfs/squashfs.h
2759 +++ b/fs/squashfs/squashfs.h
2760 @@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
2761 u64, u64, unsigned int);
2762
2763 /* file.c */
2764 +void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
2765 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
2766 int);
2767
2768 /* file_xxx.c */
2769 -extern int squashfs_readpage_block(struct page *, u64, int);
2770 +extern int squashfs_readpage_block(struct page *, u64, int, int);
2771
2772 /* id.c */
2773 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
2774 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
2775 index 39c75a86c67f..666986b95c5d 100644
2776 --- a/fs/sysfs/file.c
2777 +++ b/fs/sysfs/file.c
2778 @@ -407,6 +407,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
2779 }
2780 EXPORT_SYMBOL_GPL(sysfs_chmod_file);
2781
2782 +/**
2783 + * sysfs_break_active_protection - break "active" protection
2784 + * @kobj: The kernel object @attr is associated with.
2785 + * @attr: The attribute to break the "active" protection for.
2786 + *
2787 + * With sysfs, just like kernfs, deletion of an attribute is postponed until
2788 + * all active .show() and .store() callbacks have finished unless this function
2789 + * is called. Hence this function is useful in methods that implement self
2790 + * deletion.
2791 + */
2792 +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
2793 + const struct attribute *attr)
2794 +{
2795 + struct kernfs_node *kn;
2796 +
2797 + kobject_get(kobj);
2798 + kn = kernfs_find_and_get(kobj->sd, attr->name);
2799 + if (kn)
2800 + kernfs_break_active_protection(kn);
2801 + return kn;
2802 +}
2803 +EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
2804 +
2805 +/**
2806 + * sysfs_unbreak_active_protection - restore "active" protection
2807 + * @kn: Pointer returned by sysfs_break_active_protection().
2808 + *
2809 + * Undo the effects of sysfs_break_active_protection(). Since this function
2810 + * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
2811 + * argument passed to sysfs_break_active_protection() that attribute may have
2812 + * been removed between the sysfs_break_active_protection() and
2813 + * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
2814 + * this function has returned.
2815 + */
2816 +void sysfs_unbreak_active_protection(struct kernfs_node *kn)
2817 +{
2818 + struct kobject *kobj = kn->parent->priv;
2819 +
2820 + kernfs_unbreak_active_protection(kn);
2821 + kernfs_put(kn);
2822 + kobject_put(kobj);
2823 +}
2824 +EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
2825 +
2826 /**
2827 * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
2828 * @kobj: object we're acting for
2829 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
2830 index 00a1f330f93a..d3c19f8c4564 100644
2831 --- a/include/linux/sysfs.h
2832 +++ b/include/linux/sysfs.h
2833 @@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
2834 const struct attribute **attr);
2835 int __must_check sysfs_chmod_file(struct kobject *kobj,
2836 const struct attribute *attr, umode_t mode);
2837 +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
2838 + const struct attribute *attr);
2839 +void sysfs_unbreak_active_protection(struct kernfs_node *kn);
2840 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
2841 const void *ns);
2842 bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
2843 @@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
2844 return 0;
2845 }
2846
2847 +static inline struct kernfs_node *
2848 +sysfs_break_active_protection(struct kobject *kobj,
2849 + const struct attribute *attr)
2850 +{
2851 + return NULL;
2852 +}
2853 +
2854 +static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
2855 +{
2856 +}
2857 +
2858 static inline void sysfs_remove_file_ns(struct kobject *kobj,
2859 const struct attribute *attr,
2860 const void *ns)
2861 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2862 index 69485183af79..b9e966bcdd20 100644
2863 --- a/kernel/kprobes.c
2864 +++ b/kernel/kprobes.c
2865 @@ -2441,7 +2441,7 @@ static int __init debugfs_kprobe_init(void)
2866 if (!dir)
2867 return -ENOMEM;
2868
2869 - file = debugfs_create_file("list", 0444, dir, NULL,
2870 + file = debugfs_create_file("list", 0400, dir, NULL,
2871 &debugfs_kprobes_operations);
2872 if (!file)
2873 goto error;
2874 @@ -2451,7 +2451,7 @@ static int __init debugfs_kprobe_init(void)
2875 if (!file)
2876 goto error;
2877
2878 - file = debugfs_create_file("blacklist", 0444, dir, NULL,
2879 + file = debugfs_create_file("blacklist", 0400, dir, NULL,
2880 &debugfs_kprobe_blacklist_ops);
2881 if (!file)
2882 goto error;
2883 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2884 index 24d603d29512..7df6be31be36 100644
2885 --- a/kernel/sysctl.c
2886 +++ b/kernel/sysctl.c
2887 @@ -345,7 +345,8 @@ static struct ctl_table kern_table[] = {
2888 .data = &sysctl_sched_time_avg,
2889 .maxlen = sizeof(unsigned int),
2890 .mode = 0644,
2891 - .proc_handler = proc_dointvec,
2892 + .proc_handler = proc_dointvec_minmax,
2893 + .extra1 = &one,
2894 },
2895 {
2896 .procname = "sched_shares_window_ns",
2897 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2898 index 349f4a8e3c4f..86a6b331b964 100644
2899 --- a/mm/memcontrol.c
2900 +++ b/mm/memcontrol.c
2901 @@ -4072,6 +4072,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
2902
2903 static DEFINE_IDR(mem_cgroup_idr);
2904
2905 +static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
2906 +{
2907 + if (memcg->id.id > 0) {
2908 + idr_remove(&mem_cgroup_idr, memcg->id.id);
2909 + memcg->id.id = 0;
2910 + }
2911 +}
2912 +
2913 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
2914 {
2915 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
2916 @@ -4082,8 +4090,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
2917 {
2918 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
2919 if (atomic_sub_and_test(n, &memcg->id.ref)) {
2920 - idr_remove(&mem_cgroup_idr, memcg->id.id);
2921 - memcg->id.id = 0;
2922 + mem_cgroup_id_remove(memcg);
2923
2924 /* Memcg ID pins CSS */
2925 css_put(&memcg->css);
2926 @@ -4208,8 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2927 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
2928 return memcg;
2929 fail:
2930 - if (memcg->id.id > 0)
2931 - idr_remove(&mem_cgroup_idr, memcg->id.id);
2932 + mem_cgroup_id_remove(memcg);
2933 __mem_cgroup_free(memcg);
2934 return NULL;
2935 }
2936 @@ -4268,6 +4274,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2937
2938 return &memcg->css;
2939 fail:
2940 + mem_cgroup_id_remove(memcg);
2941 mem_cgroup_free(memcg);
2942 return ERR_PTR(-ENOMEM);
2943 }
2944 diff --git a/mm/memory.c b/mm/memory.c
2945 index 88f8d6a2af05..0ff735601654 100644
2946 --- a/mm/memory.c
2947 +++ b/mm/memory.c
2948 @@ -3861,6 +3861,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2949 return -EINVAL;
2950
2951 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
2952 + if (!maddr)
2953 + return -ENOMEM;
2954 +
2955 if (write)
2956 memcpy_toio(maddr + offset, buf, len);
2957 else
2958 diff --git a/mm/zswap.c b/mm/zswap.c
2959 index ded051e3433d..c2b5435fe617 100644
2960 --- a/mm/zswap.c
2961 +++ b/mm/zswap.c
2962 @@ -1018,6 +1018,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
2963 ret = -ENOMEM;
2964 goto reject;
2965 }
2966 +
2967 + /* A second zswap_is_full() check after
2968 + * zswap_shrink() to make sure it's now
2969 + * under the max_pool_percent
2970 + */
2971 + if (zswap_is_full()) {
2972 + ret = -ENOMEM;
2973 + goto reject;
2974 + }
2975 }
2976
2977 /* allocate entry */
2978 diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
2979 index d730a0f68f46..a0443d40d677 100644
2980 --- a/net/caif/caif_dev.c
2981 +++ b/net/caif/caif_dev.c
2982 @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
2983 caifd = caif_get(skb->dev);
2984
2985 WARN_ON(caifd == NULL);
2986 - if (caifd == NULL)
2987 + if (!caifd) {
2988 + rcu_read_unlock();
2989 return;
2990 + }
2991
2992 caifd_hold(caifd);
2993 rcu_read_unlock();
2994 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
2995 index 972353cd1778..65a15889d432 100644
2996 --- a/net/ipv4/cipso_ipv4.c
2997 +++ b/net/ipv4/cipso_ipv4.c
2998 @@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
2999 int taglen;
3000
3001 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
3002 - if (optptr[0] == IPOPT_CIPSO)
3003 + switch (optptr[0]) {
3004 + case IPOPT_CIPSO:
3005 return optptr;
3006 - taglen = optptr[1];
3007 + case IPOPT_END:
3008 + return NULL;
3009 + case IPOPT_NOOP:
3010 + taglen = 1;
3011 + break;
3012 + default:
3013 + taglen = optptr[1];
3014 + }
3015 optlen -= taglen;
3016 optptr += taglen;
3017 }
3018 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
3019 index beae93fd66d5..a5aeeb613fac 100644
3020 --- a/net/ipv6/ip6_vti.c
3021 +++ b/net/ipv6/ip6_vti.c
3022 @@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
3023 goto tx_err_dst_release;
3024 }
3025
3026 - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
3027 - skb_dst_set(skb, dst);
3028 - skb->dev = skb_dst(skb)->dev;
3029 -
3030 mtu = dst_mtu(dst);
3031 if (!skb->ignore_df && skb->len > mtu) {
3032 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
3033 @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
3034 htonl(mtu));
3035 }
3036
3037 - return -EMSGSIZE;
3038 + err = -EMSGSIZE;
3039 + goto tx_err_dst_release;
3040 }
3041
3042 + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
3043 + skb_dst_set(skb, dst);
3044 + skb->dev = skb_dst(skb)->dev;
3045 +
3046 err = dst_output(t->net, skb->sk, skb);
3047 if (net_xmit_eval(err) == 0) {
3048 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
3049 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
3050 index a2756096b94a..ca7de02e0a6e 100644
3051 --- a/net/mac80211/util.c
3052 +++ b/net/mac80211/util.c
3053 @@ -2061,7 +2061,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
3054 if (!sta->uploaded)
3055 continue;
3056
3057 - if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
3058 + if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
3059 + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3060 continue;
3061
3062 for (state = IEEE80211_STA_NOTEXIST;
3063 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3064 index 5b75468b5acd..146d83785b37 100644
3065 --- a/net/wireless/nl80211.c
3066 +++ b/net/wireless/nl80211.c
3067 @@ -4058,6 +4058,7 @@ static int parse_station_flags(struct genl_info *info,
3068 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3069 BIT(NL80211_STA_FLAG_MFP) |
3070 BIT(NL80211_STA_FLAG_AUTHORIZED);
3071 + break;
3072 default:
3073 return -EINVAL;
3074 }
3075 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3076 index 5b8fa6832687..1f943d97dc29 100644
3077 --- a/net/xfrm/xfrm_policy.c
3078 +++ b/net/xfrm/xfrm_policy.c
3079 @@ -2354,6 +2354,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3080 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
3081 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3082
3083 + if (IS_ERR(dst))
3084 + dst_release(dst_orig);
3085 +
3086 return dst;
3087 }
3088 EXPORT_SYMBOL(xfrm_lookup_route);
3089 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
3090 index bb61956c0f9c..6e768093d7c8 100644
3091 --- a/net/xfrm/xfrm_user.c
3092 +++ b/net/xfrm/xfrm_user.c
3093 @@ -984,10 +984,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
3094 {
3095 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
3096
3097 - if (nlsk)
3098 - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
3099 - else
3100 - return -1;
3101 + if (!nlsk) {
3102 + kfree_skb(skb);
3103 + return -EPIPE;
3104 + }
3105 +
3106 + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
3107 }
3108
3109 static inline size_t xfrm_spdinfo_msgsize(void)
3110 diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
3111 index 45fc06c0e0e5..6b504f407079 100644
3112 --- a/sound/soc/sirf/sirf-usp.c
3113 +++ b/sound/soc/sirf/sirf-usp.c
3114 @@ -367,10 +367,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
3115 platform_set_drvdata(pdev, usp);
3116
3117 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3118 - base = devm_ioremap(&pdev->dev, mem_res->start,
3119 - resource_size(mem_res));
3120 - if (base == NULL)
3121 - return -ENOMEM;
3122 + base = devm_ioremap_resource(&pdev->dev, mem_res);
3123 + if (IS_ERR(base))
3124 + return PTR_ERR(base);
3125 usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
3126 &sirf_usp_regmap_config);
3127 if (IS_ERR(usp->regmap))
3128 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3129 index 20680a490897..b111ecda6439 100644
3130 --- a/sound/soc/soc-pcm.c
3131 +++ b/sound/soc/soc-pcm.c
3132 @@ -1621,6 +1621,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
3133 int i;
3134
3135 for (i = 0; i < be->num_codecs; i++) {
3136 + /*
3137 + * Skip CODECs which don't support the current stream
3138 + * type. See soc_pcm_init_runtime_hw() for more details
3139 + */
3140 + if (!snd_soc_dai_stream_valid(be->codec_dais[i],
3141 + stream))
3142 + continue;
3143 +
3144 codec_dai_drv = be->codec_dais[i]->driver;
3145 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
3146 codec_stream = &codec_dai_drv->playback;
3147 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3148 index 9664b1ff4285..5ec2de8f49b4 100644
3149 --- a/tools/power/x86/turbostat/turbostat.c
3150 +++ b/tools/power/x86/turbostat/turbostat.c
3151 @@ -733,9 +733,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
3152 if (!printed || !summary_only)
3153 print_header();
3154
3155 - if (topo.num_cpus > 1)
3156 - format_counters(&average.threads, &average.cores,
3157 - &average.packages);
3158 + format_counters(&average.threads, &average.cores, &average.packages);
3159
3160 printed = 1;
3161
3162 @@ -3202,7 +3200,9 @@ void process_cpuid()
3163 family = (fms >> 8) & 0xf;
3164 model = (fms >> 4) & 0xf;
3165 stepping = fms & 0xf;
3166 - if (family == 6 || family == 0xf)
3167 + if (family == 0xf)
3168 + family += (fms >> 20) & 0xff;
3169 + if (family >= 6)
3170 model += ((fms >> 16) & 0xf) << 4;
3171
3172 if (debug) {
3173 diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
3174 new file mode 100644
3175 index 000000000000..3b1f45e13a2e
3176 --- /dev/null
3177 +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
3178 @@ -0,0 +1,28 @@
3179 +#!/bin/sh
3180 +# description: Snapshot and tracing setting
3181 +# flags: instance
3182 +
3183 +[ ! -f snapshot ] && exit_unsupported
3184 +
3185 +echo "Set tracing off"
3186 +echo 0 > tracing_on
3187 +
3188 +echo "Allocate and take a snapshot"
3189 +echo 1 > snapshot
3190 +
3191 +# Since trace buffer is empty, snapshot is also empty, but allocated
3192 +grep -q "Snapshot is allocated" snapshot
3193 +
3194 +echo "Ensure keep tracing off"
3195 +test `cat tracing_on` -eq 0
3196 +
3197 +echo "Set tracing on"
3198 +echo 1 > tracing_on
3199 +
3200 +echo "Take a snapshot again"
3201 +echo 1 > snapshot
3202 +
3203 +echo "Ensure keep tracing on"
3204 +test `cat tracing_on` -eq 1
3205 +
3206 +exit 0
3207 diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
3208 index 88d5e71be044..47dfa0b0fcd7 100644
3209 --- a/tools/usb/ffs-test.c
3210 +++ b/tools/usb/ffs-test.c
3211 @@ -44,12 +44,25 @@
3212
3213 /******************** Little Endian Handling ********************************/
3214
3215 -#define cpu_to_le16(x) htole16(x)
3216 -#define cpu_to_le32(x) htole32(x)
3217 +/*
3218 + * cpu_to_le16/32 are used when initializing structures, a context where a
3219 + * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
3220 + * that allows them to be used when initializing structures.
3221 + */
3222 +
3223 +#if __BYTE_ORDER == __LITTLE_ENDIAN
3224 +#define cpu_to_le16(x) (x)
3225 +#define cpu_to_le32(x) (x)
3226 +#else
3227 +#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
3228 +#define cpu_to_le32(x) \
3229 + ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
3230 + (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
3231 +#endif
3232 +
3233 #define le32_to_cpu(x) le32toh(x)
3234 #define le16_to_cpu(x) le16toh(x)
3235
3236 -
3237 /******************** Messages and Errors ***********************************/
3238
3239 static const char argv0[] = "ffs-test";