Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0108-4.1.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 3 months ago) by niro
File size: 195023 byte(s)
-linux-4.1 patches up to 4.1.15
1 diff --git a/Makefile b/Makefile
2 index dbf3baa5fabb..e071176b2ce6 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 1
8 -SUBLEVEL = 8
9 +SUBLEVEL = 9
10 EXTRAVERSION =
11 NAME = Series 4800
12
13 diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
14 index bd245d34952d..a0765e7ed6c7 100644
15 --- a/arch/arm/boot/compressed/decompress.c
16 +++ b/arch/arm/boot/compressed/decompress.c
17 @@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
18
19 int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
20 {
21 - return decompress(input, len, NULL, NULL, output, NULL, error);
22 + return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
23 }
24 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
25 index d9631ecddd56..d6223cbcb661 100644
26 --- a/arch/arm/kvm/arm.c
27 +++ b/arch/arm/kvm/arm.c
28 @@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
29 * Map the VGIC hardware resources before running a vcpu the first
30 * time on this VM.
31 */
32 - if (unlikely(!vgic_ready(kvm))) {
33 + if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
34 ret = kvm_vgic_map_resources(kvm);
35 if (ret)
36 return ret;
37 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
38 index 7796af4b1d6f..6f0a3b41b009 100644
39 --- a/arch/arm64/Kconfig
40 +++ b/arch/arm64/Kconfig
41 @@ -101,6 +101,10 @@ config NO_IOPORT_MAP
42 config STACKTRACE_SUPPORT
43 def_bool y
44
45 +config ILLEGAL_POINTER_VALUE
46 + hex
47 + default 0xdead000000000000
48 +
49 config LOCKDEP_SUPPORT
50 def_bool y
51
52 @@ -409,6 +413,22 @@ config ARM64_ERRATUM_845719
53
54 If unsure, say Y.
55
56 +config ARM64_ERRATUM_843419
57 + bool "Cortex-A53: 843419: A load or store might access an incorrect address"
58 + depends on MODULES
59 + default y
60 + help
61 + This option builds kernel modules using the large memory model in
62 + order to avoid the use of the ADRP instruction, which can cause
63 + a subsequent memory access to use an incorrect address on Cortex-A53
64 + parts up to r0p4.
65 +
66 + Note that the kernel itself must be linked with a version of ld
67 + which fixes potentially affected ADRP instructions through the
68 + use of veneers.
69 +
70 + If unsure, say Y.
71 +
72 endmenu
73
74
75 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
76 index 4d2a925998f9..81151663ef38 100644
77 --- a/arch/arm64/Makefile
78 +++ b/arch/arm64/Makefile
79 @@ -30,6 +30,10 @@ endif
80
81 CHECKFLAGS += -D__aarch64__
82
83 +ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
84 +CFLAGS_MODULE += -mcmodel=large
85 +endif
86 +
87 # Default value
88 head-y := arch/arm64/kernel/head.o
89
90 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
91 index f800d45ea226..44a59c20e773 100644
92 --- a/arch/arm64/include/asm/memory.h
93 +++ b/arch/arm64/include/asm/memory.h
94 @@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr;
95 #define PHYS_OFFSET ({ memstart_addr; })
96
97 /*
98 + * The maximum physical address that the linear direct mapping
99 + * of system RAM can cover. (PAGE_OFFSET can be interpreted as
100 + * a 2's complement signed quantity and negated to derive the
101 + * maximum size of the linear mapping.)
102 + */
103 +#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
104 +
105 +/*
106 * PFNs are used to describe any physical page; this means
107 * PFN 0 == physical address 0.
108 *
109 diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
110 index 3dca15634e69..c31e59fe2cb8 100644
111 --- a/arch/arm64/kernel/fpsimd.c
112 +++ b/arch/arm64/kernel/fpsimd.c
113 @@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next)
114 void fpsimd_flush_thread(void)
115 {
116 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
117 + fpsimd_flush_task_state(current);
118 set_thread_flag(TIF_FOREIGN_FPSTATE);
119 }
120
121 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
122 index 19f915e8f6e0..36aa31ff2c06 100644
123 --- a/arch/arm64/kernel/head.S
124 +++ b/arch/arm64/kernel/head.S
125 @@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
126 msr hstr_el2, xzr // Disable CP15 traps to EL2
127 #endif
128
129 + /* EL2 debug */
130 + mrs x0, pmcr_el0 // Disable debug access traps
131 + ubfx x0, x0, #11, #5 // to EL2 and allow access to
132 + msr mdcr_el2, x0 // all PMU counters from EL1
133 +
134 /* Stage-2 translation */
135 msr vttbr_el2, xzr
136
137 diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
138 index 67bf4107f6ef..876eb8df50bf 100644
139 --- a/arch/arm64/kernel/module.c
140 +++ b/arch/arm64/kernel/module.c
141 @@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
142 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
143 AARCH64_INSN_IMM_ADR);
144 break;
145 +#ifndef CONFIG_ARM64_ERRATUM_843419
146 case R_AARCH64_ADR_PREL_PG_HI21_NC:
147 overflow_check = false;
148 case R_AARCH64_ADR_PREL_PG_HI21:
149 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
150 AARCH64_INSN_IMM_ADR);
151 break;
152 +#endif
153 case R_AARCH64_ADD_ABS_LO12_NC:
154 case R_AARCH64_LDST8_ABS_LO12_NC:
155 overflow_check = false;
156 diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
157 index c0cff3410166..c58aee062590 100644
158 --- a/arch/arm64/kernel/signal32.c
159 +++ b/arch/arm64/kernel/signal32.c
160 @@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
161
162 /*
163 * VFP save/restore code.
164 + *
165 + * We have to be careful with endianness, since the fpsimd context-switch
166 + * code operates on 128-bit (Q) register values whereas the compat ABI
167 + * uses an array of 64-bit (D) registers. Consequently, we need to swap
168 + * the two halves of each Q register when running on a big-endian CPU.
169 */
170 +union __fpsimd_vreg {
171 + __uint128_t raw;
172 + struct {
173 +#ifdef __AARCH64EB__
174 + u64 hi;
175 + u64 lo;
176 +#else
177 + u64 lo;
178 + u64 hi;
179 +#endif
180 + };
181 +};
182 +
183 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
184 {
185 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
186 compat_ulong_t magic = VFP_MAGIC;
187 compat_ulong_t size = VFP_STORAGE_SIZE;
188 compat_ulong_t fpscr, fpexc;
189 - int err = 0;
190 + int i, err = 0;
191
192 /*
193 * Save the hardware registers to the fpsimd_state structure.
194 @@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
195 /*
196 * Now copy the FP registers. Since the registers are packed,
197 * we can copy the prefix we want (V0-V15) as it is.
198 - * FIXME: Won't work if big endian.
199 */
200 - err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
201 - sizeof(frame->ufp.fpregs));
202 + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
203 + union __fpsimd_vreg vreg = {
204 + .raw = fpsimd->vregs[i >> 1],
205 + };
206 +
207 + __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
208 + __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
209 + }
210
211 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
212 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
213 @@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
214 compat_ulong_t magic = VFP_MAGIC;
215 compat_ulong_t size = VFP_STORAGE_SIZE;
216 compat_ulong_t fpscr;
217 - int err = 0;
218 + int i, err = 0;
219
220 __get_user_error(magic, &frame->magic, err);
221 __get_user_error(size, &frame->size, err);
222 @@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
223 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
224 return -EINVAL;
225
226 - /*
227 - * Copy the FP registers into the start of the fpsimd_state.
228 - * FIXME: Won't work if big endian.
229 - */
230 - err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
231 - sizeof(frame->ufp.fpregs));
232 + /* Copy the FP registers into the start of the fpsimd_state. */
233 + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
234 + union __fpsimd_vreg vreg;
235 +
236 + __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
237 + __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
238 + fpsimd.vregs[i >> 1] = vreg.raw;
239 + }
240
241 /* Extract the fpsr and the fpcr from the fpscr */
242 __get_user_error(fpscr, &frame->ufp.fpscr, err);
243 diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
244 index 5befd010e232..64f9e60b31da 100644
245 --- a/arch/arm64/kvm/hyp.S
246 +++ b/arch/arm64/kvm/hyp.S
247 @@ -844,8 +844,6 @@
248 mrs x3, cntv_ctl_el0
249 and x3, x3, #3
250 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
251 - bic x3, x3, #1 // Clear Enable
252 - msr cntv_ctl_el0, x3
253
254 isb
255
256 @@ -853,6 +851,9 @@
257 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
258
259 1:
260 + // Disable the virtual timer
261 + msr cntv_ctl_el0, xzr
262 +
263 // Allow physical timer/counter access for the host
264 mrs x2, cnthctl_el2
265 orr x2, x2, #3
266 @@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run)
267 // Guest context
268 add x2, x0, #VCPU_CONTEXT
269
270 + // We must restore the 32-bit state before the sysregs, thanks
271 + // to Cortex-A57 erratum #852523.
272 + restore_guest_32bit_state
273 bl __restore_sysregs
274 bl __restore_fpsimd
275
276 skip_debug_state x3, 1f
277 bl __restore_debug
278 1:
279 - restore_guest_32bit_state
280 restore_guest_regs
281
282 // That's it, no more messing around.
283 diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
284 index 28a09529f206..3a7692745868 100644
285 --- a/arch/m32r/boot/compressed/misc.c
286 +++ b/arch/m32r/boot/compressed/misc.c
287 @@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
288 free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
289
290 puts("\nDecompressing Linux... ");
291 - decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
292 + __decompress(input_data, input_len, NULL, NULL, output_data, 0,
293 + NULL, error);
294 puts("done.\nBooting the kernel.\n");
295 }
296 diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
297 index 54831069a206..080cd53bac36 100644
298 --- a/arch/mips/boot/compressed/decompress.c
299 +++ b/arch/mips/boot/compressed/decompress.c
300 @@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
301 puts("\n");
302
303 /* Decompress the kernel with according algorithm */
304 - decompress((char *)zimage_start, zimage_size, 0, 0,
305 - (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
306 + __decompress((char *)zimage_start, zimage_size, 0, 0,
307 + (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
308
309 /* FIXME: should we flush cache here? */
310 puts("Now, booting the kernel...\n");
311 diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
312 index 6983fcd48131..2b95e34fa9e8 100644
313 --- a/arch/mips/math-emu/cp1emu.c
314 +++ b/arch/mips/math-emu/cp1emu.c
315 @@ -1137,7 +1137,7 @@ emul:
316 break;
317
318 case mfhc_op:
319 - if (!cpu_has_mips_r2)
320 + if (!cpu_has_mips_r2_r6)
321 goto sigill;
322
323 /* copregister rd -> gpr[rt] */
324 @@ -1148,7 +1148,7 @@ emul:
325 break;
326
327 case mthc_op:
328 - if (!cpu_has_mips_r2)
329 + if (!cpu_has_mips_r2_r6)
330 goto sigill;
331
332 /* copregister rd <- gpr[rt] */
333 @@ -1181,6 +1181,24 @@ emul:
334 }
335 break;
336
337 + case bc1eqz_op:
338 + case bc1nez_op:
339 + if (!cpu_has_mips_r6 || delay_slot(xcp))
340 + return SIGILL;
341 +
342 + cond = likely = 0;
343 + switch (MIPSInst_RS(ir)) {
344 + case bc1eqz_op:
345 + if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
346 + cond = 1;
347 + break;
348 + case bc1nez_op:
349 + if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
350 + cond = 1;
351 + break;
352 + }
353 + goto branch_common;
354 +
355 case bc_op:
356 if (delay_slot(xcp))
357 return SIGILL;
358 @@ -1207,7 +1225,7 @@ emul:
359 case bct_op:
360 break;
361 }
362 -
363 +branch_common:
364 set_delay_slot(xcp);
365 if (cond) {
366 /*
367 diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
368 index f3191db6e2e9..c0eab24f6a9e 100644
369 --- a/arch/parisc/kernel/irq.c
370 +++ b/arch/parisc/kernel/irq.c
371 @@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
372 struct pt_regs *old_regs;
373 unsigned long eirr_val;
374 int irq, cpu = smp_processor_id();
375 -#ifdef CONFIG_SMP
376 struct irq_desc *desc;
377 +#ifdef CONFIG_SMP
378 cpumask_t dest;
379 #endif
380
381 @@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
382 goto set_out;
383 irq = eirr_to_irq(eirr_val);
384
385 -#ifdef CONFIG_SMP
386 + /* Filter out spurious interrupts, mostly from serial port at bootup */
387 desc = irq_to_desc(irq);
388 + if (unlikely(!desc->action))
389 + goto set_out;
390 +
391 +#ifdef CONFIG_SMP
392 cpumask_copy(&dest, desc->irq_data.affinity);
393 if (irqd_is_per_cpu(&desc->irq_data) &&
394 !cpumask_test_cpu(smp_processor_id(), &dest)) {
395 diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
396 index 7ef22e3387e0..0b8d26d3ba43 100644
397 --- a/arch/parisc/kernel/syscall.S
398 +++ b/arch/parisc/kernel/syscall.S
399 @@ -821,7 +821,7 @@ cas2_action:
400 /* 64bit CAS */
401 #ifdef CONFIG_64BIT
402 19: ldd,ma 0(%sr3,%r26), %r29
403 - sub,= %r29, %r25, %r0
404 + sub,*= %r29, %r25, %r0
405 b,n cas2_end
406 20: std,ma %r24, 0(%sr3,%r26)
407 copy %r0, %r28
408 diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
409 index 73eddda53b8e..4eec430d8fa8 100644
410 --- a/arch/powerpc/boot/Makefile
411 +++ b/arch/powerpc/boot/Makefile
412 @@ -28,6 +28,9 @@ BOOTCFLAGS += -m64
413 endif
414 ifdef CONFIG_CPU_BIG_ENDIAN
415 BOOTCFLAGS += -mbig-endian
416 +else
417 +BOOTCFLAGS += -mlittle-endian
418 +BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
419 endif
420
421 BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
422 diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
423 index 43e6ad424c7f..88d27e3258d2 100644
424 --- a/arch/powerpc/include/asm/pgtable-ppc64.h
425 +++ b/arch/powerpc/include/asm/pgtable-ppc64.h
426 @@ -135,7 +135,19 @@
427 #define pte_iterate_hashed_end() } while(0)
428
429 #ifdef CONFIG_PPC_HAS_HASH_64K
430 -#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
431 +/*
432 + * We expect this to be called only for user addresses or kernel virtual
433 + * addresses other than the linear mapping.
434 + */
435 +#define pte_pagesize_index(mm, addr, pte) \
436 + ({ \
437 + unsigned int psize; \
438 + if (is_kernel_addr(addr)) \
439 + psize = MMU_PAGE_4K; \
440 + else \
441 + psize = get_slice_psize(mm, addr); \
442 + psize; \
443 + })
444 #else
445 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
446 #endif
447 diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
448 index 7a4ede16b283..b77ef369c0f0 100644
449 --- a/arch/powerpc/include/asm/rtas.h
450 +++ b/arch/powerpc/include/asm/rtas.h
451 @@ -343,6 +343,7 @@ extern void rtas_power_off(void);
452 extern void rtas_halt(void);
453 extern void rtas_os_term(char *str);
454 extern int rtas_get_sensor(int sensor, int index, int *state);
455 +extern int rtas_get_sensor_fast(int sensor, int index, int *state);
456 extern int rtas_get_power_level(int powerdomain, int *level);
457 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
458 extern bool rtas_indicator_present(int token, int *maxindex);
459 diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
460 index 58abeda64cb7..15cca17cba4b 100644
461 --- a/arch/powerpc/include/asm/switch_to.h
462 +++ b/arch/powerpc/include/asm/switch_to.h
463 @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
464
465 extern void enable_kernel_fp(void);
466 extern void enable_kernel_altivec(void);
467 +extern void enable_kernel_vsx(void);
468 extern int emulate_altivec(struct pt_regs *);
469 extern void __giveup_vsx(struct task_struct *);
470 extern void giveup_vsx(struct task_struct *);
471 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
472 index 9ee61d15653d..cb565ad0a5b6 100644
473 --- a/arch/powerpc/kernel/eeh.c
474 +++ b/arch/powerpc/kernel/eeh.c
475 @@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
476 if (!(pe->type & EEH_PE_PHB)) {
477 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
478 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
479 +
480 + /*
481 + * The config space of some PCI devices can't be accessed
482 + * when their PEs are in frozen state. Otherwise, fenced
483 + * PHB might be seen. Those PEs are identified with flag
484 + * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
485 + * is set automatically when the PE is put to EEH_PE_ISOLATED.
486 + *
487 + * Restoring BARs possibly triggers PCI config access in
488 + * (OPAL) firmware and then causes fenced PHB. If the
489 + * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
490 + * pointless to restore BARs and dump config space.
491 + */
492 eeh_ops->configure_bridge(pe);
493 - eeh_pe_restore_bars(pe);
494 + if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
495 + eeh_pe_restore_bars(pe);
496
497 - pci_regs_buf[0] = 0;
498 - eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
499 + pci_regs_buf[0] = 0;
500 + eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
501 + }
502 }
503
504 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
505 @@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev)
506 return;
507 }
508
509 - if (eeh_has_flag(EEH_PROBE_MODE_DEV))
510 - eeh_ops->probe(pdn, NULL);
511 -
512 /*
513 * The EEH cache might not be removed correctly because of
514 * unbalanced kref to the device during unplug time, which
515 @@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev)
516 dev->dev.archdata.edev = NULL;
517 }
518
519 + if (eeh_has_flag(EEH_PROBE_MODE_DEV))
520 + eeh_ops->probe(pdn, NULL);
521 +
522 edev->pdev = dev;
523 dev->dev.archdata.edev = edev;
524
525 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
526 index febb50dd5328..0596373cd1c3 100644
527 --- a/arch/powerpc/kernel/process.c
528 +++ b/arch/powerpc/kernel/process.c
529 @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
530 #endif /* CONFIG_ALTIVEC */
531
532 #ifdef CONFIG_VSX
533 -#if 0
534 -/* not currently used, but some crazy RAID module might want to later */
535 void enable_kernel_vsx(void)
536 {
537 WARN_ON(preemptible());
538 @@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
539 #endif /* CONFIG_SMP */
540 }
541 EXPORT_SYMBOL(enable_kernel_vsx);
542 -#endif
543
544 void giveup_vsx(struct task_struct *tsk)
545 {
546 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
547 index 7a488c108410..caffb10e7aa3 100644
548 --- a/arch/powerpc/kernel/rtas.c
549 +++ b/arch/powerpc/kernel/rtas.c
550 @@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
551 }
552 EXPORT_SYMBOL(rtas_get_sensor);
553
554 +int rtas_get_sensor_fast(int sensor, int index, int *state)
555 +{
556 + int token = rtas_token("get-sensor-state");
557 + int rc;
558 +
559 + if (token == RTAS_UNKNOWN_SERVICE)
560 + return -ENOENT;
561 +
562 + rc = rtas_call(token, 2, 2, state, sensor, index);
563 + WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
564 + rc <= RTAS_EXTENDED_DELAY_MAX));
565 +
566 + if (rc < 0)
567 + return rtas_error_rc(rc);
568 + return rc;
569 +}
570 +
571 bool rtas_indicator_present(int token, int *maxindex)
572 {
573 int proplen, count, i;
574 diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
575 index 43dafb9d6a46..4d87122cf6a7 100644
576 --- a/arch/powerpc/mm/hugepage-hash64.c
577 +++ b/arch/powerpc/mm/hugepage-hash64.c
578 @@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
579 BUG_ON(index >= 4096);
580
581 vpn = hpt_vpn(ea, vsid, ssize);
582 - hash = hpt_hash(vpn, shift, ssize);
583 hpte_slot_array = get_hpte_slot_array(pmdp);
584 if (psize == MMU_PAGE_4K) {
585 /*
586 @@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
587 valid = hpte_valid(hpte_slot_array, index);
588 if (valid) {
589 /* update the hpte bits */
590 + hash = hpt_hash(vpn, shift, ssize);
591 hidx = hpte_hash_index(hpte_slot_array, index);
592 if (hidx & _PTEIDX_SECONDARY)
593 hash = ~hash;
594 @@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
595 if (!valid) {
596 unsigned long hpte_group;
597
598 + hash = hpt_hash(vpn, shift, ssize);
599 /* insert new entry */
600 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
601 new_pmd |= _PAGE_HASHPTE;
602 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
603 index 02e4a1745516..3b6647e574b6 100644
604 --- a/arch/powerpc/platforms/pseries/ras.c
605 +++ b/arch/powerpc/platforms/pseries/ras.c
606 @@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
607 int state;
608 int critical;
609
610 - status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
611 + status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
612 + &state);
613
614 if (state > 3)
615 critical = 1; /* Time Critical */
616 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
617 index df6a7041922b..e6e8b241d717 100644
618 --- a/arch/powerpc/platforms/pseries/setup.c
619 +++ b/arch/powerpc/platforms/pseries/setup.c
620 @@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
621 eeh_dev_init(PCI_DN(np), pci->phb);
622 }
623 break;
624 + case OF_RECONFIG_DETACH_NODE:
625 + pci = PCI_DN(np);
626 + if (pci)
627 + list_del(&pci->list);
628 + break;
629 default:
630 err = NOTIFY_DONE;
631 break;
632 diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
633 index 42506b371b74..4da604ebf6fd 100644
634 --- a/arch/s390/boot/compressed/misc.c
635 +++ b/arch/s390/boot/compressed/misc.c
636 @@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
637 #endif
638
639 puts("Uncompressing Linux... ");
640 - decompress(input_data, input_len, NULL, NULL, output, NULL, error);
641 + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
642 puts("Ok, booting the kernel.\n");
643 return (unsigned long) output;
644 }
645 diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
646 index 95470a472d2c..208a9753ab38 100644
647 --- a/arch/sh/boot/compressed/misc.c
648 +++ b/arch/sh/boot/compressed/misc.c
649 @@ -132,7 +132,7 @@ void decompress_kernel(void)
650
651 puts("Uncompressing Linux... ");
652 cache_control(CACHE_ENABLE);
653 - decompress(input_data, input_len, NULL, NULL, output, NULL, error);
654 + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
655 cache_control(CACHE_DISABLE);
656 puts("Ok, booting the kernel.\n");
657 }
658 diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
659 index 176d5bda3559..5c65dfee278c 100644
660 --- a/arch/unicore32/boot/compressed/misc.c
661 +++ b/arch/unicore32/boot/compressed/misc.c
662 @@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
663 output_ptr = get_unaligned_le32(tmp);
664
665 arch_decomp_puts("Uncompressing Linux...");
666 - decompress(input_data, input_data_end - input_data, NULL, NULL,
667 - output_data, NULL, error);
668 + __decompress(input_data, input_data_end - input_data, NULL, NULL,
669 + output_data, 0, NULL, error);
670 arch_decomp_puts(" done, booting the kernel.\n");
671 return output_ptr;
672 }
673 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
674 index a107b935e22f..e28437e0f708 100644
675 --- a/arch/x86/boot/compressed/misc.c
676 +++ b/arch/x86/boot/compressed/misc.c
677 @@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
678 #endif
679
680 debug_putstr("\nDecompressing Linux... ");
681 - decompress(input_data, input_len, NULL, NULL, output, NULL, error);
682 + __decompress(input_data, input_len, NULL, NULL, output, output_len,
683 + NULL, error);
684 parse_elf(output);
685 /*
686 * 32-bit always performs relocations. 64-bit relocations are only
687 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
688 index c8140e12816a..c23ab1ee3a9a 100644
689 --- a/arch/x86/mm/init_32.c
690 +++ b/arch/x86/mm/init_32.c
691 @@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
692
693 vaddr = start;
694 pgd_idx = pgd_index(vaddr);
695 + pmd_idx = pmd_index(vaddr);
696
697 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
698 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
699 diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
700 index b79685e06b70..279c5d674edf 100644
701 --- a/block/blk-mq-sysfs.c
702 +++ b/block/blk-mq-sysfs.c
703 @@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
704
705 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
706 {
707 - char *start_page = page;
708 struct request *rq;
709 + int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
710 +
711 + list_for_each_entry(rq, list, queuelist) {
712 + const int rq_len = 2 * sizeof(rq) + 2;
713 +
714 + /* if the output will be truncated */
715 + if (PAGE_SIZE - 1 < len + rq_len) {
716 + /* backspacing if it can't hold '\t...\n' */
717 + if (PAGE_SIZE - 1 < len + 5)
718 + len -= rq_len;
719 + len += snprintf(page + len, PAGE_SIZE - 1 - len,
720 + "\t...\n");
721 + break;
722 + }
723 + len += snprintf(page + len, PAGE_SIZE - 1 - len,
724 + "\t%p\n", rq);
725 + }
726
727 - page += sprintf(page, "%s:\n", msg);
728 -
729 - list_for_each_entry(rq, list, queuelist)
730 - page += sprintf(page, "\t%p\n", rq);
731 -
732 - return page - start_page;
733 + return len;
734 }
735
736 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
737 diff --git a/drivers/base/node.c b/drivers/base/node.c
738 index a2aa65b4215d..b10479c87357 100644
739 --- a/drivers/base/node.c
740 +++ b/drivers/base/node.c
741 @@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
742 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
743 int page_nid;
744
745 + /*
746 + * memory block could have several absent sections from start.
747 + * skip pfn range from absent section
748 + */
749 + if (!pfn_present(pfn)) {
750 + pfn = round_down(pfn + PAGES_PER_SECTION,
751 + PAGES_PER_SECTION) - 1;
752 + continue;
753 + }
754 +
755 page_nid = get_nid_for_pfn(pfn);
756 if (page_nid < 0)
757 continue;
758 diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
759 index ab300ea19434..41f93334cc44 100644
760 --- a/drivers/crypto/vmx/aes.c
761 +++ b/drivers/crypto/vmx/aes.c
762 @@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
763
764 pagefault_disable();
765 enable_kernel_altivec();
766 + enable_kernel_vsx();
767 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
768 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
769 pagefault_enable();
770 @@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
771 } else {
772 pagefault_disable();
773 enable_kernel_altivec();
774 + enable_kernel_vsx();
775 aes_p8_encrypt(src, dst, &ctx->enc_key);
776 pagefault_enable();
777 }
778 @@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
779 } else {
780 pagefault_disable();
781 enable_kernel_altivec();
782 + enable_kernel_vsx();
783 aes_p8_decrypt(src, dst, &ctx->dec_key);
784 pagefault_enable();
785 }
786 diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
787 index 1a559b7dddb5..c8e7f653e5d3 100644
788 --- a/drivers/crypto/vmx/aes_cbc.c
789 +++ b/drivers/crypto/vmx/aes_cbc.c
790 @@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
791
792 pagefault_disable();
793 enable_kernel_altivec();
794 + enable_kernel_vsx();
795 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
796 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
797 pagefault_enable();
798 @@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
799 } else {
800 pagefault_disable();
801 enable_kernel_altivec();
802 + enable_kernel_vsx();
803
804 blkcipher_walk_init(&walk, dst, src, nbytes);
805 ret = blkcipher_walk_virt(desc, &walk);
806 @@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
807 } else {
808 pagefault_disable();
809 enable_kernel_altivec();
810 + enable_kernel_vsx();
811
812 blkcipher_walk_init(&walk, dst, src, nbytes);
813 ret = blkcipher_walk_virt(desc, &walk);
814 diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
815 index 96dbee4bf4a6..266e708d63df 100644
816 --- a/drivers/crypto/vmx/aes_ctr.c
817 +++ b/drivers/crypto/vmx/aes_ctr.c
818 @@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
819
820 pagefault_disable();
821 enable_kernel_altivec();
822 + enable_kernel_vsx();
823 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
824 pagefault_enable();
825
826 @@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
827
828 pagefault_disable();
829 enable_kernel_altivec();
830 + enable_kernel_vsx();
831 aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
832 pagefault_enable();
833
834 @@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
835 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
836 pagefault_disable();
837 enable_kernel_altivec();
838 + enable_kernel_vsx();
839 aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
840 (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
841 pagefault_enable();
842 diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
843 index d0ffe277af5c..917b3f09e724 100644
844 --- a/drivers/crypto/vmx/ghash.c
845 +++ b/drivers/crypto/vmx/ghash.c
846 @@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
847
848 pagefault_disable();
849 enable_kernel_altivec();
850 + enable_kernel_vsx();
851 enable_kernel_fp();
852 gcm_init_p8(ctx->htable, (const u64 *) key);
853 pagefault_enable();
854 @@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc,
855 GHASH_DIGEST_SIZE - dctx->bytes);
856 pagefault_disable();
857 enable_kernel_altivec();
858 + enable_kernel_vsx();
859 enable_kernel_fp();
860 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
861 GHASH_DIGEST_SIZE);
862 @@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc,
863 if (len) {
864 pagefault_disable();
865 enable_kernel_altivec();
866 + enable_kernel_vsx();
867 enable_kernel_fp();
868 gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
869 pagefault_enable();
870 @@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
871 dctx->buffer[i] = 0;
872 pagefault_disable();
873 enable_kernel_altivec();
874 + enable_kernel_vsx();
875 enable_kernel_fp();
876 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
877 GHASH_DIGEST_SIZE);
878 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
879 index c097d3a82bda..a9b01bcf7d0a 100644
880 --- a/drivers/gpu/drm/radeon/radeon_combios.c
881 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
882 @@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
883 rdev->pdev->subsystem_device == 0x30ae)
884 return;
885
886 + /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
887 + * - it hangs on resume inside the dynclk 1 table.
888 + */
889 + if (rdev->family == CHIP_RS480 &&
890 + rdev->pdev->subsystem_vendor == 0x103c &&
891 + rdev->pdev->subsystem_device == 0x280a)
892 + return;
893 +
894 /* DYN CLK 1 */
895 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
896 if (table)
897 diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
898 index b716b0815644..bebf11a6622a 100644
899 --- a/drivers/infiniband/core/uverbs.h
900 +++ b/drivers/infiniband/core/uverbs.h
901 @@ -85,7 +85,7 @@
902 */
903
904 struct ib_uverbs_device {
905 - struct kref ref;
906 + atomic_t refcount;
907 int num_comp_vectors;
908 struct completion comp;
909 struct device *dev;
910 @@ -94,6 +94,7 @@ struct ib_uverbs_device {
911 struct cdev cdev;
912 struct rb_root xrcd_tree;
913 struct mutex xrcd_tree_mutex;
914 + struct kobject kobj;
915 };
916
917 struct ib_uverbs_event_file {
918 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
919 index a9f048990dfc..ccc2494b4ea7 100644
920 --- a/drivers/infiniband/core/uverbs_cmd.c
921 +++ b/drivers/infiniband/core/uverbs_cmd.c
922 @@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
923 next->send_flags = user_wr->send_flags;
924
925 if (is_ud) {
926 + if (next->opcode != IB_WR_SEND &&
927 + next->opcode != IB_WR_SEND_WITH_IMM) {
928 + ret = -EINVAL;
929 + goto out_put;
930 + }
931 +
932 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
933 file->ucontext);
934 if (!next->wr.ud.ah) {
935 @@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
936 user_wr->wr.atomic.compare_add;
937 next->wr.atomic.swap = user_wr->wr.atomic.swap;
938 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
939 + case IB_WR_SEND:
940 break;
941 default:
942 - break;
943 + ret = -EINVAL;
944 + goto out_put;
945 }
946 }
947
948 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
949 index 88cce9bb72fe..09686d49d4c1 100644
950 --- a/drivers/infiniband/core/uverbs_main.c
951 +++ b/drivers/infiniband/core/uverbs_main.c
952 @@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
953 static void ib_uverbs_add_one(struct ib_device *device);
954 static void ib_uverbs_remove_one(struct ib_device *device);
955
956 -static void ib_uverbs_release_dev(struct kref *ref)
957 +static void ib_uverbs_release_dev(struct kobject *kobj)
958 {
959 struct ib_uverbs_device *dev =
960 - container_of(ref, struct ib_uverbs_device, ref);
961 + container_of(kobj, struct ib_uverbs_device, kobj);
962
963 - complete(&dev->comp);
964 + kfree(dev);
965 }
966
967 +static struct kobj_type ib_uverbs_dev_ktype = {
968 + .release = ib_uverbs_release_dev,
969 +};
970 +
971 static void ib_uverbs_release_event_file(struct kref *ref)
972 {
973 struct ib_uverbs_event_file *file =
974 @@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
975 return context->device->dealloc_ucontext(context);
976 }
977
978 +static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
979 +{
980 + complete(&dev->comp);
981 +}
982 +
983 static void ib_uverbs_release_file(struct kref *ref)
984 {
985 struct ib_uverbs_file *file =
986 container_of(ref, struct ib_uverbs_file, ref);
987
988 module_put(file->device->ib_dev->owner);
989 - kref_put(&file->device->ref, ib_uverbs_release_dev);
990 + if (atomic_dec_and_test(&file->device->refcount))
991 + ib_uverbs_comp_dev(file->device);
992
993 kfree(file);
994 }
995 @@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
996 int ret;
997
998 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
999 - if (dev)
1000 - kref_get(&dev->ref);
1001 - else
1002 + if (!atomic_inc_not_zero(&dev->refcount))
1003 return -ENXIO;
1004
1005 if (!try_module_get(dev->ib_dev->owner)) {
1006 @@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
1007 mutex_init(&file->mutex);
1008
1009 filp->private_data = file;
1010 + kobject_get(&dev->kobj);
1011
1012 return nonseekable_open(inode, filp);
1013
1014 @@ -772,13 +781,16 @@ err_module:
1015 module_put(dev->ib_dev->owner);
1016
1017 err:
1018 - kref_put(&dev->ref, ib_uverbs_release_dev);
1019 + if (atomic_dec_and_test(&dev->refcount))
1020 + ib_uverbs_comp_dev(dev);
1021 +
1022 return ret;
1023 }
1024
1025 static int ib_uverbs_close(struct inode *inode, struct file *filp)
1026 {
1027 struct ib_uverbs_file *file = filp->private_data;
1028 + struct ib_uverbs_device *dev = file->device;
1029
1030 ib_uverbs_cleanup_ucontext(file, file->ucontext);
1031
1032 @@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1033 kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
1034
1035 kref_put(&file->ref, ib_uverbs_release_file);
1036 + kobject_put(&dev->kobj);
1037
1038 return 0;
1039 }
1040 @@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
1041 if (!uverbs_dev)
1042 return;
1043
1044 - kref_init(&uverbs_dev->ref);
1045 + atomic_set(&uverbs_dev->refcount, 1);
1046 init_completion(&uverbs_dev->comp);
1047 uverbs_dev->xrcd_tree = RB_ROOT;
1048 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1049 + kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
1050
1051 spin_lock(&map_lock);
1052 devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
1053 @@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1054 cdev_init(&uverbs_dev->cdev, NULL);
1055 uverbs_dev->cdev.owner = THIS_MODULE;
1056 uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
1057 + uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
1058 kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
1059 if (cdev_add(&uverbs_dev->cdev, base, 1))
1060 goto err_cdev;
1061 @@ -941,9 +956,10 @@ err_cdev:
1062 clear_bit(devnum, overflow_map);
1063
1064 err:
1065 - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
1066 + if (atomic_dec_and_test(&uverbs_dev->refcount))
1067 + ib_uverbs_comp_dev(uverbs_dev);
1068 wait_for_completion(&uverbs_dev->comp);
1069 - kfree(uverbs_dev);
1070 + kobject_put(&uverbs_dev->kobj);
1071 return;
1072 }
1073
1074 @@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
1075 else
1076 clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
1077
1078 - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
1079 + if (atomic_dec_and_test(&uverbs_dev->refcount))
1080 + ib_uverbs_comp_dev(uverbs_dev);
1081 wait_for_completion(&uverbs_dev->comp);
1082 - kfree(uverbs_dev);
1083 + kobject_put(&uverbs_dev->kobj);
1084 }
1085
1086 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1087 diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1088 index f50a546224ad..33fdd50123f7 100644
1089 --- a/drivers/infiniband/hw/mlx4/ah.c
1090 +++ b/drivers/infiniband/hw/mlx4/ah.c
1091 @@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1092 enum rdma_link_layer ll;
1093
1094 memset(ah_attr, 0, sizeof *ah_attr);
1095 - ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1096 ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
1097 ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
1098 + if (ll == IB_LINK_LAYER_ETHERNET)
1099 + ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
1100 + else
1101 + ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
1102 +
1103 ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
1104 if (ah->av.ib.stat_rate)
1105 ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
1106 diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
1107 index 0176caa5792c..2857ed89725e 100644
1108 --- a/drivers/infiniband/hw/mlx4/cq.c
1109 +++ b/drivers/infiniband/hw/mlx4/cq.c
1110 @@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
1111 * simulated FLUSH_ERR completions
1112 */
1113 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
1114 - mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
1115 + mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
1116 if (*npolled >= num_entries)
1117 goto out;
1118 }
1119 diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
1120 index ed327e6c8fdc..a0559a8af4f4 100644
1121 --- a/drivers/infiniband/hw/mlx4/mcg.c
1122 +++ b/drivers/infiniband/hw/mlx4/mcg.c
1123 @@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
1124 {
1125 struct mlx4_ib_dev *dev = ctx->dev;
1126 struct ib_ah_attr ah_attr;
1127 + unsigned long flags;
1128
1129 - spin_lock(&dev->sm_lock);
1130 + spin_lock_irqsave(&dev->sm_lock, flags);
1131 if (!dev->sm_ah[ctx->port - 1]) {
1132 /* port is not yet Active, sm_ah not ready */
1133 - spin_unlock(&dev->sm_lock);
1134 + spin_unlock_irqrestore(&dev->sm_lock, flags);
1135 return -EAGAIN;
1136 }
1137 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
1138 - spin_unlock(&dev->sm_lock);
1139 + spin_unlock_irqrestore(&dev->sm_lock, flags);
1140 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
1141 ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
1142 &ah_attr, NULL, mad);
1143 diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
1144 index 6797108ce873..69fb5ba94d0f 100644
1145 --- a/drivers/infiniband/hw/mlx4/sysfs.c
1146 +++ b/drivers/infiniband/hw/mlx4/sysfs.c
1147 @@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
1148 struct mlx4_port *p;
1149 int i;
1150 int ret;
1151 + int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
1152 + IB_LINK_LAYER_ETHERNET;
1153
1154 p = kzalloc(sizeof *p, GFP_KERNEL);
1155 if (!p)
1156 @@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
1157
1158 p->pkey_group.name = "pkey_idx";
1159 p->pkey_group.attrs =
1160 - alloc_group_attrs(show_port_pkey, store_port_pkey,
1161 + alloc_group_attrs(show_port_pkey,
1162 + is_eth ? NULL : store_port_pkey,
1163 dev->dev->caps.pkey_table_len[port_num]);
1164 if (!p->pkey_group.attrs) {
1165 ret = -ENOMEM;
1166 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1167 index 71c593583864..0c52f078759c 100644
1168 --- a/drivers/infiniband/hw/mlx5/mr.c
1169 +++ b/drivers/infiniband/hw/mlx5/mr.c
1170 @@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1171 return &mr->ibmr;
1172
1173 error:
1174 - /*
1175 - * Destroy the umem *before* destroying the MR, to ensure we
1176 - * will not have any in-flight notifiers when destroying the
1177 - * MR.
1178 - *
1179 - * As the MR is completely invalid to begin with, and this
1180 - * error path is only taken if we can't push the mr entry into
1181 - * the pagefault tree, this is safe.
1182 - */
1183 -
1184 ib_umem_release(umem);
1185 - /* Kill the MR, and return an error code. */
1186 - clean_mr(mr);
1187 return ERR_PTR(err);
1188 }
1189
1190 diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
1191 index ad843c786e72..5afaa218508d 100644
1192 --- a/drivers/infiniband/hw/qib/qib_keys.c
1193 +++ b/drivers/infiniband/hw/qib/qib_keys.c
1194 @@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
1195 * unrestricted LKEY.
1196 */
1197 rkt->gen++;
1198 + /*
1199 + * bits are capped in qib_verbs.c to insure enough bits
1200 + * for generation number
1201 + */
1202 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
1203 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
1204 << 8);
1205 diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
1206 index 4a3599890ea5..9dd5d9a0556b 100644
1207 --- a/drivers/infiniband/hw/qib/qib_verbs.c
1208 +++ b/drivers/infiniband/hw/qib/qib_verbs.c
1209 @@ -40,6 +40,7 @@
1210 #include <linux/rculist.h>
1211 #include <linux/mm.h>
1212 #include <linux/random.h>
1213 +#include <linux/vmalloc.h>
1214
1215 #include "qib.h"
1216 #include "qib_common.h"
1217 @@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
1218 * the LKEY). The remaining bits act as a generation number or tag.
1219 */
1220 spin_lock_init(&dev->lk_table.lock);
1221 + /* insure generation is at least 4 bits see keys.c */
1222 + if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
1223 + qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
1224 + ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
1225 + ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
1226 + }
1227 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
1228 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
1229 dev->lk_table.table = (struct qib_mregion __rcu **)
1230 - __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
1231 + vmalloc(lk_tab_size);
1232 if (dev->lk_table.table == NULL) {
1233 ret = -ENOMEM;
1234 goto err_lk;
1235 @@ -2265,7 +2272,7 @@ err_tx:
1236 sizeof(struct qib_pio_header),
1237 dev->pio_hdrs, dev->pio_hdrs_phys);
1238 err_hdrs:
1239 - free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
1240 + vfree(dev->lk_table.table);
1241 err_lk:
1242 kfree(dev->qp_table);
1243 err_qpt:
1244 @@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
1245 sizeof(struct qib_pio_header),
1246 dev->pio_hdrs, dev->pio_hdrs_phys);
1247 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
1248 - free_pages((unsigned long) dev->lk_table.table,
1249 - get_order(lk_tab_size));
1250 + vfree(dev->lk_table.table);
1251 kfree(dev->qp_table);
1252 }
1253
1254 diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
1255 index bfc8948fdd35..44ca28c83fe6 100644
1256 --- a/drivers/infiniband/hw/qib/qib_verbs.h
1257 +++ b/drivers/infiniband/hw/qib/qib_verbs.h
1258 @@ -647,6 +647,8 @@ struct qib_qpn_table {
1259 struct qpn_map map[QPNMAP_ENTRIES];
1260 };
1261
1262 +#define MAX_LKEY_TABLE_BITS 23
1263 +
1264 struct qib_lkey_table {
1265 spinlock_t lock; /* protect changes in this struct */
1266 u32 next; /* next unused index (speeds search) */
1267 diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
1268 index 6a594aac2290..c933d882c35c 100644
1269 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c
1270 +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
1271 @@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
1272 goto out;
1273 }
1274
1275 + tx_desc->mapped = true;
1276 tx_desc->dma_addr = dma_addr;
1277 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1278 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1279 @@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
1280 static void iscsi_iser_cleanup_task(struct iscsi_task *task)
1281 {
1282 struct iscsi_iser_task *iser_task = task->dd_data;
1283 - struct iser_tx_desc *tx_desc = &iser_task->desc;
1284 - struct iser_conn *iser_conn = task->conn->dd_data;
1285 + struct iser_tx_desc *tx_desc = &iser_task->desc;
1286 + struct iser_conn *iser_conn = task->conn->dd_data;
1287 struct iser_device *device = iser_conn->ib_conn.device;
1288
1289 /* DEVICE_REMOVAL event might have already released the device */
1290 if (!device)
1291 return;
1292
1293 - ib_dma_unmap_single(device->ib_device,
1294 - tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
1295 + if (likely(tx_desc->mapped)) {
1296 + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
1297 + ISER_HEADERS_LEN, DMA_TO_DEVICE);
1298 + tx_desc->mapped = false;
1299 + }
1300
1301 /* mgmt tasks do not need special cleanup */
1302 if (!task->sc)
1303 diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
1304 index 262ba1f8ee50..d2b6caf7694d 100644
1305 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h
1306 +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
1307 @@ -270,6 +270,7 @@ enum iser_desc_type {
1308 * sg[1] optionally points to either of immediate data
1309 * unsolicited data-out or control
1310 * @num_sge: number sges used on this TX task
1311 + * @mapped: Is the task header mapped
1312 */
1313 struct iser_tx_desc {
1314 struct iser_hdr iser_header;
1315 @@ -278,6 +279,7 @@ struct iser_tx_desc {
1316 u64 dma_addr;
1317 struct ib_sge tx_sg[2];
1318 int num_sge;
1319 + bool mapped;
1320 };
1321
1322 #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
1323 diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
1324 index 3e2118e8ed87..0a47f42fec24 100644
1325 --- a/drivers/infiniband/ulp/iser/iser_initiator.c
1326 +++ b/drivers/infiniband/ulp/iser/iser_initiator.c
1327 @@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
1328 unsigned long buf_offset;
1329 unsigned long data_seg_len;
1330 uint32_t itt;
1331 - int err = 0;
1332 + int err;
1333 struct ib_sge *tx_dsg;
1334
1335 itt = (__force uint32_t)hdr->itt;
1336 @@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
1337 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
1338
1339 /* build the tx desc */
1340 - iser_initialize_task_headers(task, tx_desc);
1341 + err = iser_initialize_task_headers(task, tx_desc);
1342 + if (err)
1343 + goto send_data_out_error;
1344
1345 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
1346 tx_dsg = &tx_desc->tx_sg[1];
1347 @@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
1348
1349 send_data_out_error:
1350 kmem_cache_free(ig.desc_cache, tx_desc);
1351 - iser_err("conn %p failed err %d\n",conn, err);
1352 + iser_err("conn %p failed err %d\n", conn, err);
1353 return err;
1354 }
1355
1356 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1357 index 75c01b27bd0b..025f93105444 100644
1358 --- a/drivers/infiniband/ulp/srp/ib_srp.c
1359 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1360 @@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
1361 return c;
1362 }
1363
1364 +/*
1365 + * Return values:
1366 + * < 0 upon failure. Caller is responsible for SRP target port cleanup.
1367 + * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
1368 + * removal has been scheduled.
1369 + * 0 and target->state != SRP_TARGET_REMOVED upon success.
1370 + */
1371 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1372 {
1373 struct srp_rport_identifiers ids;
1374 @@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev,
1375 srp_free_ch_ib(target, ch);
1376 srp_free_req_data(target, ch);
1377 target->ch_count = ch - target->ch;
1378 - break;
1379 + goto connected;
1380 }
1381 }
1382
1383 @@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev,
1384 node_idx++;
1385 }
1386
1387 +connected:
1388 target->scsi_host->nr_hw_queues = target->ch_count;
1389
1390 ret = srp_add_target(host, target);
1391 @@ -3298,6 +3306,8 @@ out:
1392 mutex_unlock(&host->add_target_mutex);
1393
1394 scsi_host_put(target->scsi_host);
1395 + if (ret < 0)
1396 + scsi_host_put(target->scsi_host);
1397
1398 return ret;
1399
1400 diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
1401 index a18f41b89b6a..2ae522f0d2b2 100644
1402 --- a/drivers/input/evdev.c
1403 +++ b/drivers/input/evdev.c
1404 @@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
1405 {
1406 struct evdev_client *client = file->private_data;
1407 struct evdev *evdev = client->evdev;
1408 - int retval;
1409
1410 - retval = mutex_lock_interruptible(&evdev->mutex);
1411 - if (retval)
1412 - return retval;
1413 + mutex_lock(&evdev->mutex);
1414
1415 - if (!evdev->exist || client->revoked)
1416 - retval = -ENODEV;
1417 - else
1418 - retval = input_flush_device(&evdev->handle, file);
1419 + if (evdev->exist && !client->revoked)
1420 + input_flush_device(&evdev->handle, file);
1421
1422 mutex_unlock(&evdev->mutex);
1423 - return retval;
1424 + return 0;
1425 }
1426
1427 static void evdev_free(struct device *dev)
1428 diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
1429 index abeedc9a78c2..2570f2a25dc4 100644
1430 --- a/drivers/iommu/fsl_pamu.c
1431 +++ b/drivers/iommu/fsl_pamu.c
1432 @@ -41,7 +41,6 @@ struct pamu_isr_data {
1433
1434 static struct paace *ppaact;
1435 static struct paace *spaact;
1436 -static struct ome *omt __initdata;
1437
1438 /*
1439 * Table for matching compatible strings, for device tree
1440 @@ -50,7 +49,7 @@ static struct ome *omt __initdata;
1441 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
1442 * string would be used.
1443 */
1444 -static const struct of_device_id guts_device_ids[] __initconst = {
1445 +static const struct of_device_id guts_device_ids[] = {
1446 { .compatible = "fsl,qoriq-device-config-1.0", },
1447 { .compatible = "fsl,qoriq-device-config-2.0", },
1448 {}
1449 @@ -599,7 +598,7 @@ found_cpu_node:
1450 * Memory accesses to QMAN and BMAN private memory need not be coherent, so
1451 * clear the PAACE entry coherency attribute for them.
1452 */
1453 -static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
1454 +static void setup_qbman_paace(struct paace *ppaace, int paace_type)
1455 {
1456 switch (paace_type) {
1457 case QMAN_PAACE:
1458 @@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
1459 * this table to translate device transaction to appropriate corenet
1460 * transaction.
1461 */
1462 -static void __init setup_omt(struct ome *omt)
1463 +static void setup_omt(struct ome *omt)
1464 {
1465 struct ome *ome;
1466
1467 @@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
1468 * Get the maximum number of PAACT table entries
1469 * and subwindows supported by PAMU
1470 */
1471 -static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
1472 +static void get_pamu_cap_values(unsigned long pamu_reg_base)
1473 {
1474 u32 pc_val;
1475
1476 @@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
1477 }
1478
1479 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
1480 -static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
1481 - phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
1482 - phys_addr_t omt_phys)
1483 +static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
1484 + phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
1485 + phys_addr_t omt_phys)
1486 {
1487 u32 *pc;
1488 struct pamu_mmap_regs *pamu_regs;
1489 @@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
1490 }
1491
1492 /* Enable all device LIODNS */
1493 -static void __init setup_liodns(void)
1494 +static void setup_liodns(void)
1495 {
1496 int i, len;
1497 struct paace *ppaace;
1498 @@ -846,7 +845,7 @@ struct ccsr_law {
1499 /*
1500 * Create a coherence subdomain for a given memory block.
1501 */
1502 -static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
1503 +static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
1504 {
1505 struct device_node *np;
1506 const __be32 *iprop;
1507 @@ -988,7 +987,7 @@ error:
1508 static const struct {
1509 u32 svr;
1510 u32 port_id;
1511 -} port_id_map[] __initconst = {
1512 +} port_id_map[] = {
1513 {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
1514 {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
1515 {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
1516 @@ -1006,7 +1005,7 @@ static const struct {
1517
1518 #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1519
1520 -static int __init fsl_pamu_probe(struct platform_device *pdev)
1521 +static int fsl_pamu_probe(struct platform_device *pdev)
1522 {
1523 struct device *dev = &pdev->dev;
1524 void __iomem *pamu_regs = NULL;
1525 @@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1526 int irq;
1527 phys_addr_t ppaact_phys;
1528 phys_addr_t spaact_phys;
1529 + struct ome *omt;
1530 phys_addr_t omt_phys;
1531 size_t mem_size = 0;
1532 unsigned int order = 0;
1533 @@ -1200,7 +1200,7 @@ error:
1534 return ret;
1535 }
1536
1537 -static struct platform_driver fsl_of_pamu_driver __initdata = {
1538 +static struct platform_driver fsl_of_pamu_driver = {
1539 .driver = {
1540 .name = "fsl-of-pamu",
1541 },
1542 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1543 index c87c4b1bfc00..c23427951ec1 100644
1544 --- a/drivers/iommu/intel-iommu.c
1545 +++ b/drivers/iommu/intel-iommu.c
1546 @@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
1547 struct context_entry *context;
1548 u64 *entry;
1549
1550 + entry = &root->lo;
1551 if (ecs_enabled(iommu)) {
1552 if (devfn >= 0x80) {
1553 devfn -= 0x80;
1554 @@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
1555 }
1556 devfn *= 2;
1557 }
1558 - entry = &root->lo;
1559 if (*entry & 1)
1560 context = phys_to_virt(*entry & VTD_PAGE_MASK);
1561 else {
1562 diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
1563 index 4e460216bd16..e29d5d7fe220 100644
1564 --- a/drivers/iommu/io-pgtable-arm.c
1565 +++ b/drivers/iommu/io-pgtable-arm.c
1566 @@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
1567
1568 static bool selftest_running = false;
1569
1570 +static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
1571 + unsigned long iova, size_t size, int lvl,
1572 + arm_lpae_iopte *ptep);
1573 +
1574 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
1575 unsigned long iova, phys_addr_t paddr,
1576 arm_lpae_iopte prot, int lvl,
1577 @@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
1578 {
1579 arm_lpae_iopte pte = prot;
1580
1581 - /* We require an unmap first */
1582 if (iopte_leaf(*ptep, lvl)) {
1583 + /* We require an unmap first */
1584 WARN_ON(!selftest_running);
1585 return -EEXIST;
1586 + } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
1587 + /*
1588 + * We need to unmap and free the old table before
1589 + * overwriting it with a block entry.
1590 + */
1591 + arm_lpae_iopte *tblp;
1592 + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
1593 +
1594 + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
1595 + if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
1596 + return -EINVAL;
1597 }
1598
1599 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
1600 diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
1601 index c845d99ecf6b..e0ff5f4d7fed 100644
1602 --- a/drivers/iommu/tegra-smmu.c
1603 +++ b/drivers/iommu/tegra-smmu.c
1604 @@ -26,6 +26,7 @@ struct tegra_smmu {
1605 const struct tegra_smmu_soc *soc;
1606
1607 unsigned long pfn_mask;
1608 + unsigned long tlb_mask;
1609
1610 unsigned long *asids;
1611 struct mutex lock;
1612 @@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
1613 #define SMMU_TLB_CONFIG 0x14
1614 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
1615 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
1616 -#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
1617 +#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
1618 + ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
1619
1620 #define SMMU_PTC_CONFIG 0x18
1621 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
1622 @@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1623 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
1624 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
1625 mc->soc->num_address_bits, smmu->pfn_mask);
1626 + smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
1627 + dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1628 + smmu->tlb_mask);
1629
1630 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1631
1632 @@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
1633 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1634
1635 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
1636 - SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
1637 + SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1638
1639 if (soc->supports_round_robin_arbitration)
1640 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
1641 diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
1642 index 8c91fd5eb6fd..3ac9c4194814 100644
1643 --- a/drivers/isdn/gigaset/ser-gigaset.c
1644 +++ b/drivers/isdn/gigaset/ser-gigaset.c
1645 @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
1646 cs->hw.ser->tty = tty;
1647 atomic_set(&cs->hw.ser->refcnt, 1);
1648 init_completion(&cs->hw.ser->dead_cmp);
1649 -
1650 tty->disc_data = cs;
1651
1652 + /* Set the amount of data we're willing to receive per call
1653 + * from the hardware driver to half of the input buffer size
1654 + * to leave some reserve.
1655 + * Note: We don't do flow control towards the hardware driver.
1656 + * If more data is received than will fit into the input buffer,
1657 + * it will be dropped and an error will be logged. This should
1658 + * never happen as the device is slow and the buffer size ample.
1659 + */
1660 + tty->receive_room = RBUFSIZE/2;
1661 +
1662 /* OK.. Initialization of the datastructures and the HW is done.. Now
1663 * startup system and notify the LL that we are ready to run
1664 */
1665 diff --git a/drivers/md/md.c b/drivers/md/md.c
1666 index e4621511d118..e8c44fcb1ad1 100644
1667 --- a/drivers/md/md.c
1668 +++ b/drivers/md/md.c
1669 @@ -5365,6 +5365,8 @@ static void __md_stop(struct mddev *mddev)
1670 {
1671 struct md_personality *pers = mddev->pers;
1672 mddev_detach(mddev);
1673 + /* Ensure ->event_work is done */
1674 + flush_workqueue(md_misc_wq);
1675 spin_lock(&mddev->lock);
1676 mddev->ready = 0;
1677 mddev->pers = NULL;
1678 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1679 index f55c3f35b746..fe0122771642 100644
1680 --- a/drivers/md/raid10.c
1681 +++ b/drivers/md/raid10.c
1682 @@ -3566,6 +3566,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
1683 /* far_copies must be 1 */
1684 conf->prev.stride = conf->dev_sectors;
1685 }
1686 + conf->reshape_safe = conf->reshape_progress;
1687 spin_lock_init(&conf->device_lock);
1688 INIT_LIST_HEAD(&conf->retry_list);
1689
1690 @@ -3770,7 +3771,6 @@ static int run(struct mddev *mddev)
1691 }
1692 conf->offset_diff = min_offset_diff;
1693
1694 - conf->reshape_safe = conf->reshape_progress;
1695 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1696 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1697 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
1698 @@ -4113,6 +4113,7 @@ static int raid10_start_reshape(struct mddev *mddev)
1699 conf->reshape_progress = size;
1700 } else
1701 conf->reshape_progress = 0;
1702 + conf->reshape_safe = conf->reshape_progress;
1703 spin_unlock_irq(&conf->device_lock);
1704
1705 if (mddev->delta_disks && mddev->bitmap) {
1706 @@ -4180,6 +4181,7 @@ abort:
1707 rdev->new_data_offset = rdev->data_offset;
1708 smp_wmb();
1709 conf->reshape_progress = MaxSector;
1710 + conf->reshape_safe = MaxSector;
1711 mddev->reshape_position = MaxSector;
1712 spin_unlock_irq(&conf->device_lock);
1713 return ret;
1714 @@ -4534,6 +4536,7 @@ static void end_reshape(struct r10conf *conf)
1715 md_finish_reshape(conf->mddev);
1716 smp_wmb();
1717 conf->reshape_progress = MaxSector;
1718 + conf->reshape_safe = MaxSector;
1719 spin_unlock_irq(&conf->device_lock);
1720
1721 /* read-ahead size must cover two whole stripes, which is
1722 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1723 index b6793d2e051f..23af6772f146 100644
1724 --- a/drivers/md/raid5.c
1725 +++ b/drivers/md/raid5.c
1726 @@ -2151,6 +2151,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1727 if (!sc)
1728 return -ENOMEM;
1729
1730 + /* Need to ensure auto-resizing doesn't interfere */
1731 + mutex_lock(&conf->cache_size_mutex);
1732 +
1733 for (i = conf->max_nr_stripes; i; i--) {
1734 nsh = alloc_stripe(sc, GFP_KERNEL);
1735 if (!nsh)
1736 @@ -2167,6 +2170,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1737 kmem_cache_free(sc, nsh);
1738 }
1739 kmem_cache_destroy(sc);
1740 + mutex_unlock(&conf->cache_size_mutex);
1741 return -ENOMEM;
1742 }
1743 /* Step 2 - Must use GFP_NOIO now.
1744 @@ -2213,6 +2217,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1745 } else
1746 err = -ENOMEM;
1747
1748 + mutex_unlock(&conf->cache_size_mutex);
1749 /* Step 4, return new stripes to service */
1750 while(!list_empty(&newstripes)) {
1751 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1752 @@ -2240,7 +2245,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1753 static int drop_one_stripe(struct r5conf *conf)
1754 {
1755 struct stripe_head *sh;
1756 - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
1757 + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
1758
1759 spin_lock_irq(conf->hash_locks + hash);
1760 sh = get_free_stripe(conf, hash);
1761 @@ -5846,12 +5851,14 @@ static void raid5d(struct md_thread *thread)
1762 pr_debug("%d stripes handled\n", handled);
1763
1764 spin_unlock_irq(&conf->device_lock);
1765 - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
1766 + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
1767 + mutex_trylock(&conf->cache_size_mutex)) {
1768 grow_one_stripe(conf, __GFP_NOWARN);
1769 /* Set flag even if allocation failed. This helps
1770 * slow down allocation requests when mem is short
1771 */
1772 set_bit(R5_DID_ALLOC, &conf->cache_state);
1773 + mutex_unlock(&conf->cache_size_mutex);
1774 }
1775
1776 async_tx_issue_pending_all();
1777 @@ -5883,18 +5890,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
1778 return -EINVAL;
1779
1780 conf->min_nr_stripes = size;
1781 + mutex_lock(&conf->cache_size_mutex);
1782 while (size < conf->max_nr_stripes &&
1783 drop_one_stripe(conf))
1784 ;
1785 + mutex_unlock(&conf->cache_size_mutex);
1786
1787
1788 err = md_allow_write(mddev);
1789 if (err)
1790 return err;
1791
1792 + mutex_lock(&conf->cache_size_mutex);
1793 while (size > conf->max_nr_stripes)
1794 if (!grow_one_stripe(conf, GFP_KERNEL))
1795 break;
1796 + mutex_unlock(&conf->cache_size_mutex);
1797
1798 return 0;
1799 }
1800 @@ -6360,11 +6371,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
1801 struct shrink_control *sc)
1802 {
1803 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
1804 - int ret = 0;
1805 - while (ret < sc->nr_to_scan) {
1806 - if (drop_one_stripe(conf) == 0)
1807 - return SHRINK_STOP;
1808 - ret++;
1809 + unsigned long ret = SHRINK_STOP;
1810 +
1811 + if (mutex_trylock(&conf->cache_size_mutex)) {
1812 + ret= 0;
1813 + while (ret < sc->nr_to_scan &&
1814 + conf->max_nr_stripes > conf->min_nr_stripes) {
1815 + if (drop_one_stripe(conf) == 0) {
1816 + ret = SHRINK_STOP;
1817 + break;
1818 + }
1819 + ret++;
1820 + }
1821 + mutex_unlock(&conf->cache_size_mutex);
1822 }
1823 return ret;
1824 }
1825 @@ -6433,6 +6452,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
1826 goto abort;
1827 spin_lock_init(&conf->device_lock);
1828 seqcount_init(&conf->gen_lock);
1829 + mutex_init(&conf->cache_size_mutex);
1830 init_waitqueue_head(&conf->wait_for_stripe);
1831 init_waitqueue_head(&conf->wait_for_overlap);
1832 INIT_LIST_HEAD(&conf->handle_list);
1833 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
1834 index 896d603ad0da..03472fbbd882 100644
1835 --- a/drivers/md/raid5.h
1836 +++ b/drivers/md/raid5.h
1837 @@ -482,7 +482,8 @@ struct r5conf {
1838 */
1839 int active_name;
1840 char cache_name[2][32];
1841 - struct kmem_cache *slab_cache; /* for allocating stripes */
1842 + struct kmem_cache *slab_cache; /* for allocating stripes */
1843 + struct mutex cache_size_mutex; /* Protect changes to cache size */
1844
1845 int seq_flush, seq_write;
1846 int quiesce;
1847 diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
1848 index a30cc2f7e4f1..ddf59ee5ca40 100644
1849 --- a/drivers/media/platform/am437x/am437x-vpfe.c
1850 +++ b/drivers/media/platform/am437x/am437x-vpfe.c
1851 @@ -1185,14 +1185,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
1852 static int vpfe_release(struct file *file)
1853 {
1854 struct vpfe_device *vpfe = video_drvdata(file);
1855 + bool fh_singular;
1856 int ret;
1857
1858 mutex_lock(&vpfe->lock);
1859
1860 - if (v4l2_fh_is_singular_file(file))
1861 - vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1862 + /* Save the singular status before we call the clean-up helper */
1863 + fh_singular = v4l2_fh_is_singular_file(file);
1864 +
1865 + /* the release helper will cleanup any on-going streaming */
1866 ret = _vb2_fop_release(file, NULL);
1867
1868 + /*
1869 + * If this was the last open file.
1870 + * Then de-initialize hw module.
1871 + */
1872 + if (fh_singular)
1873 + vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1874 +
1875 mutex_unlock(&vpfe->lock);
1876
1877 return ret;
1878 @@ -1577,7 +1587,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
1879 return -EBUSY;
1880 }
1881
1882 - ret = vpfe_try_fmt(file, priv, fmt);
1883 + ret = vpfe_try_fmt(file, priv, &format);
1884 if (ret)
1885 return ret;
1886
1887 diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
1888 index 18d0a871747f..947d8be7b245 100644
1889 --- a/drivers/media/platform/omap3isp/isp.c
1890 +++ b/drivers/media/platform/omap3isp/isp.c
1891 @@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
1892 int ret;
1893
1894 if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
1895 - !(link->flags & MEDIA_LNK_FL_ENABLED)) {
1896 + !(flags & MEDIA_LNK_FL_ENABLED)) {
1897 /* Powering off entities is assumed to never fail. */
1898 isp_pipeline_pm_power(source, -sink_use);
1899 isp_pipeline_pm_power(sink, -source_use);
1900 return 0;
1901 }
1902
1903 - if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
1904 + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
1905 (flags & MEDIA_LNK_FL_ENABLED)) {
1906
1907 ret = isp_pipeline_pm_power(source, sink_use);
1908 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1909 index f8c5e47a30aa..0aba9ff92102 100644
1910 --- a/drivers/media/rc/rc-main.c
1911 +++ b/drivers/media/rc/rc-main.c
1912 @@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1913 {
1914 struct rc_dev *dev = to_rc_dev(device);
1915
1916 - if (!dev || !dev->input_dev)
1917 - return -ENODEV;
1918 -
1919 if (dev->rc_map.name)
1920 ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
1921 if (dev->driver_name)
1922 diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
1923 index 511e9a25c151..16c4d26f51e7 100644
1924 --- a/drivers/memory/tegra/tegra114.c
1925 +++ b/drivers/memory/tegra/tegra114.c
1926 @@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
1927 .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
1928 .supports_round_robin_arbitration = false,
1929 .supports_request_limit = false,
1930 + .num_tlb_lines = 32,
1931 .num_asids = 4,
1932 .ops = &tegra114_smmu_ops,
1933 };
1934 diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
1935 index 278d40b854c1..b153d0b732cf 100644
1936 --- a/drivers/memory/tegra/tegra124.c
1937 +++ b/drivers/memory/tegra/tegra124.c
1938 @@ -981,6 +981,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
1939 .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
1940 .supports_round_robin_arbitration = true,
1941 .supports_request_limit = true,
1942 + .num_tlb_lines = 32,
1943 .num_asids = 128,
1944 .ops = &tegra124_smmu_ops,
1945 };
1946 diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
1947 index 71fe9376fe53..f422b18f45f3 100644
1948 --- a/drivers/memory/tegra/tegra30.c
1949 +++ b/drivers/memory/tegra/tegra30.c
1950 @@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
1951 .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
1952 .supports_round_robin_arbitration = false,
1953 .supports_request_limit = false,
1954 + .num_tlb_lines = 16,
1955 .num_asids = 4,
1956 .ops = &tegra30_smmu_ops,
1957 };
1958 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
1959 index 1ef01647265f..4f1b0bdb9cf8 100644
1960 --- a/drivers/misc/cxl/pci.c
1961 +++ b/drivers/misc/cxl/pci.c
1962 @@ -778,14 +778,9 @@ int cxl_reset(struct cxl *adapter)
1963 {
1964 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1965 int rc;
1966 - int i;
1967 - u32 val;
1968
1969 dev_info(&dev->dev, "CXL reset\n");
1970
1971 - for (i = 0; i < adapter->slices; i++)
1972 - cxl_remove_afu(adapter->afu[i]);
1973 -
1974 /* pcie_warm_reset requests a fundamental pci reset which includes a
1975 * PERST assert/deassert. PERST triggers a loading of the image
1976 * if "user" or "factory" is selected in sysfs */
1977 @@ -794,20 +789,6 @@ int cxl_reset(struct cxl *adapter)
1978 return rc;
1979 }
1980
1981 - /* the PERST done above fences the PHB. So, reset depends on EEH
1982 - * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
1983 - * the driver. Do an mmio read explictly to ensure EEH notices the
1984 - * fenced PHB. Retry for a few seconds before giving up. */
1985 - i = 0;
1986 - while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
1987 - (i < 5)) {
1988 - msleep(500);
1989 - i++;
1990 - }
1991 -
1992 - if (val != 0xffffffff)
1993 - dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
1994 -
1995 return rc;
1996 }
1997
1998 @@ -1062,8 +1043,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1999 int slice;
2000 int rc;
2001
2002 - pci_dev_get(dev);
2003 -
2004 if (cxl_verbose)
2005 dump_cxl_config_space(dev);
2006
2007 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2008 index 92e7671426eb..588fb7908642 100644
2009 --- a/drivers/mmc/core/core.c
2010 +++ b/drivers/mmc/core/core.c
2011 @@ -330,8 +330,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
2012 */
2013 static void mmc_wait_data_done(struct mmc_request *mrq)
2014 {
2015 - mrq->host->context_info.is_done_rcv = true;
2016 - wake_up_interruptible(&mrq->host->context_info.wait);
2017 + struct mmc_context_info *context_info = &mrq->host->context_info;
2018 +
2019 + context_info->is_done_rcv = true;
2020 + wake_up_interruptible(&context_info->wait);
2021 }
2022
2023 static void mmc_wait_done(struct mmc_request *mrq)
2024 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
2025 index 7a3fc16d0a6c..53cfc7cedefe 100644
2026 --- a/drivers/mmc/host/sdhci-pci.c
2027 +++ b/drivers/mmc/host/sdhci-pci.c
2028 @@ -549,6 +549,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
2029 static const struct sdhci_pci_fixes sdhci_o2 = {
2030 .probe = sdhci_pci_o2_probe,
2031 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
2032 + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
2033 .probe_slot = sdhci_pci_o2_probe_slot,
2034 .resume = sdhci_pci_o2_resume,
2035 };
2036 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2037 index bec8a307f8cd..fd41b91436ec 100644
2038 --- a/drivers/mmc/host/sdhci.c
2039 +++ b/drivers/mmc/host/sdhci.c
2040 @@ -1146,6 +1146,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
2041 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
2042 break;
2043 case MMC_TIMING_UHS_DDR50:
2044 + case MMC_TIMING_MMC_DDR52:
2045 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
2046 break;
2047 case MMC_TIMING_MMC_HS400:
2048 @@ -1598,7 +1599,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2049 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2050 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2051 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2052 - (ios->timing == MMC_TIMING_UHS_DDR50))) {
2053 + (ios->timing == MMC_TIMING_UHS_DDR50) ||
2054 + (ios->timing == MMC_TIMING_MMC_DDR52))) {
2055 u16 preset;
2056
2057 sdhci_enable_preset_value(host, true);
2058 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2059 index d5fe5d5f490f..16d87bf8ac3c 100644
2060 --- a/drivers/net/bonding/bond_main.c
2061 +++ b/drivers/net/bonding/bond_main.c
2062 @@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
2063 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
2064 }
2065
2066 +static struct slave *bond_get_old_active(struct bonding *bond,
2067 + struct slave *new_active)
2068 +{
2069 + struct slave *slave;
2070 + struct list_head *iter;
2071 +
2072 + bond_for_each_slave(bond, slave, iter) {
2073 + if (slave == new_active)
2074 + continue;
2075 +
2076 + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
2077 + return slave;
2078 + }
2079 +
2080 + return NULL;
2081 +}
2082 +
2083 /* bond_do_fail_over_mac
2084 *
2085 * Perform special MAC address swapping for fail_over_mac settings
2086 @@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
2087 if (!new_active)
2088 return;
2089
2090 + if (!old_active)
2091 + old_active = bond_get_old_active(bond, new_active);
2092 +
2093 if (old_active) {
2094 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
2095 ether_addr_copy(saddr.sa_data,
2096 @@ -1902,6 +1922,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2097 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2098 netdev_info(bond_dev, "Destroying bond %s\n",
2099 bond_dev->name);
2100 + bond_remove_proc_entry(bond);
2101 unregister_netdevice(bond_dev);
2102 }
2103 return ret;
2104 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2105 index 069952fa5d64..0d8af5bb5907 100644
2106 --- a/drivers/net/ethernet/broadcom/tg3.c
2107 +++ b/drivers/net/ethernet/broadcom/tg3.c
2108 @@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
2109 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
2110 sizeof(temperature));
2111 spin_unlock_bh(&tp->lock);
2112 - return sprintf(buf, "%u\n", temperature);
2113 + return sprintf(buf, "%u\n", temperature * 1000);
2114 }
2115
2116
2117 diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
2118 index caae6cb2bc1a..a1c30ee60888 100644
2119 --- a/drivers/net/ethernet/brocade/bna/bnad.c
2120 +++ b/drivers/net/ethernet/brocade/bna/bnad.c
2121 @@ -675,6 +675,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
2122 if (!next_cmpl->valid)
2123 break;
2124 }
2125 + packets++;
2126
2127 /* TODO: BNA_CQ_EF_LOCAL ? */
2128 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
2129 @@ -691,7 +692,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
2130 else
2131 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
2132
2133 - packets++;
2134 rcb->rxq->rx_packets++;
2135 rcb->rxq->rx_bytes += totlen;
2136 ccb->bytes_per_intr += totlen;
2137 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2138 index c754b2027281..c9da1b5d4804 100644
2139 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2140 +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2141 @@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
2142
2143 static inline bool fm10k_page_is_reserved(struct page *page)
2144 {
2145 - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2146 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2147 }
2148
2149 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
2150 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
2151 index c2bd4f98a837..212d668dabb3 100644
2152 --- a/drivers/net/ethernet/intel/igb/igb.h
2153 +++ b/drivers/net/ethernet/intel/igb/igb.h
2154 @@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
2155 struct sk_buff *skb);
2156 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
2157 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
2158 +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
2159 #ifdef CONFIG_IGB_HWMON
2160 void igb_sysfs_exit(struct igb_adapter *adapter);
2161 int igb_sysfs_init(struct igb_adapter *adapter);
2162 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
2163 index d5673eb90c54..0afc0913e5b9 100644
2164 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
2165 +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
2166 @@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev,
2167 {
2168 struct igb_adapter *adapter = netdev_priv(netdev);
2169 unsigned int count = ch->combined_count;
2170 + unsigned int max_combined = 0;
2171
2172 /* Verify they are not requesting separate vectors */
2173 if (!count || ch->rx_count || ch->tx_count)
2174 @@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev,
2175 return -EINVAL;
2176
2177 /* Verify the number of channels doesn't exceed hw limits */
2178 - if (count > igb_max_channels(adapter))
2179 + max_combined = igb_max_channels(adapter);
2180 + if (count > max_combined)
2181 return -EINVAL;
2182
2183 if (count != adapter->rss_queues) {
2184 adapter->rss_queues = count;
2185 + igb_set_flag_queue_pairs(adapter, max_combined);
2186
2187 /* Hardware has to reinitialize queues and interrupts to
2188 * match the new configuration.
2189 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2190 index a0a9b1fcb5e8..4f6bf996851e 100644
2191 --- a/drivers/net/ethernet/intel/igb/igb_main.c
2192 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2193 @@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
2194
2195 /* allocate q_vector and rings */
2196 q_vector = adapter->q_vector[v_idx];
2197 - if (!q_vector)
2198 + if (!q_vector) {
2199 q_vector = kzalloc(size, GFP_KERNEL);
2200 - else
2201 + } else if (size > ksize(q_vector)) {
2202 + kfree_rcu(q_vector, rcu);
2203 + q_vector = kzalloc(size, GFP_KERNEL);
2204 + } else {
2205 memset(q_vector, 0, size);
2206 + }
2207 if (!q_vector)
2208 return -ENOMEM;
2209
2210 @@ -2901,6 +2905,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2211
2212 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2213
2214 + igb_set_flag_queue_pairs(adapter, max_rss_queues);
2215 +}
2216 +
2217 +void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
2218 + const u32 max_rss_queues)
2219 +{
2220 + struct e1000_hw *hw = &adapter->hw;
2221 +
2222 /* Determine if we need to pair queues. */
2223 switch (hw->mac.type) {
2224 case e1000_82575:
2225 @@ -6584,7 +6596,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
2226
2227 static inline bool igb_page_is_reserved(struct page *page)
2228 {
2229 - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2230 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2231 }
2232
2233 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
2234 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2235 index 5be12a00e1f4..463ff47200f1 100644
2236 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2237 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2238 @@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
2239
2240 static inline bool ixgbe_page_is_reserved(struct page *page)
2241 {
2242 - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2243 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2244 }
2245
2246 /**
2247 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2248 index e71cdde9cb01..1d7b00b038a2 100644
2249 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2250 +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2251 @@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
2252
2253 static inline bool ixgbevf_page_is_reserved(struct page *page)
2254 {
2255 - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
2256 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2257 }
2258
2259 /**
2260 diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
2261 index 2619c9fbf42d..983b1d51244d 100644
2262 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
2263 +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
2264 @@ -573,7 +573,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2265 continue;
2266 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
2267 __func__, i, port);
2268 - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
2269 + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
2270 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
2271 eqe->event.port_change.port =
2272 cpu_to_be32(
2273 @@ -608,7 +608,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2274 continue;
2275 if (i == mlx4_master_func_num(dev))
2276 continue;
2277 - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
2278 + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
2279 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
2280 eqe->event.port_change.port =
2281 cpu_to_be32(
2282 diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
2283 index cf98cc9bbc8d..73b6fc21ea00 100644
2284 --- a/drivers/net/ethernet/rocker/rocker.c
2285 +++ b/drivers/net/ethernet/rocker/rocker.c
2286 @@ -4587,6 +4587,7 @@ static void rocker_remove_ports(struct rocker *rocker)
2287 rocker_port = rocker->ports[i];
2288 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
2289 unregister_netdev(rocker_port->dev);
2290 + free_netdev(rocker_port->dev);
2291 }
2292 kfree(rocker->ports);
2293 }
2294 diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
2295 index ad3996038018..799c2929c536 100644
2296 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h
2297 +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
2298 @@ -158,6 +158,8 @@ struct dma_desc {
2299 u32 buffer2_size:13;
2300 u32 reserved4:3;
2301 } etx; /* -- enhanced -- */
2302 +
2303 + u64 all_flags;
2304 } des01;
2305 unsigned int des2;
2306 unsigned int des3;
2307 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2308 index 6249a4ec08f0..573708123338 100644
2309 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2310 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2311 @@ -38,7 +38,6 @@ struct rk_priv_data {
2312 bool clock_input;
2313
2314 struct clk *clk_mac;
2315 - struct clk *clk_mac_pll;
2316 struct clk *gmac_clkin;
2317 struct clk *mac_clk_rx;
2318 struct clk *mac_clk_tx;
2319 @@ -208,7 +207,7 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv)
2320 dev_info(dev, "%s: clock input from PHY\n", __func__);
2321 } else {
2322 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
2323 - clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
2324 + clk_set_rate(bsp_priv->clk_mac, 50000000);
2325 }
2326
2327 return 0;
2328 diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2329 index 1e2bcf5f89e1..7d944449f5ef 100644
2330 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2331 +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2332 @@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2333 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2334 int mode, int end)
2335 {
2336 + p->des01.all_flags = 0;
2337 p->des01.erx.own = 1;
2338 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
2339
2340 @@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2341
2342 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
2343 {
2344 - p->des01.etx.own = 0;
2345 + p->des01.all_flags = 0;
2346 if (mode == STMMAC_CHAIN_MODE)
2347 ehn_desc_tx_set_on_chain(p, end);
2348 else
2349 diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2350 index 35ad4f427ae2..48c3456445b2 100644
2351 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2352 +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2353 @@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2354 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
2355 int end)
2356 {
2357 + p->des01.all_flags = 0;
2358 p->des01.rx.own = 1;
2359 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
2360
2361 @@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
2362
2363 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
2364 {
2365 - p->des01.tx.own = 0;
2366 + p->des01.all_flags = 0;
2367 if (mode == STMMAC_CHAIN_MODE)
2368 ndesc_tx_set_on_chain(p, end);
2369 else
2370 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2371 index 2c5ce2baca87..c274cdc5df1e 100644
2372 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2373 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2374 @@ -829,8 +829,11 @@ static int stmmac_init_phy(struct net_device *dev)
2375
2376 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
2377
2378 - if (IS_ERR(phydev)) {
2379 + if (IS_ERR_OR_NULL(phydev)) {
2380 pr_err("%s: Could not attach to PHY\n", dev->name);
2381 + if (!phydev)
2382 + return -ENODEV;
2383 +
2384 return PTR_ERR(phydev);
2385 }
2386
2387 @@ -1189,41 +1192,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2388 goto err_tx_skbuff;
2389
2390 if (priv->extend_desc) {
2391 - priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
2392 - sizeof(struct
2393 - dma_extended_desc),
2394 - &priv->dma_rx_phy,
2395 - GFP_KERNEL);
2396 + priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
2397 + sizeof(struct
2398 + dma_extended_desc),
2399 + &priv->dma_rx_phy,
2400 + GFP_KERNEL);
2401 if (!priv->dma_erx)
2402 goto err_dma;
2403
2404 - priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
2405 - sizeof(struct
2406 - dma_extended_desc),
2407 - &priv->dma_tx_phy,
2408 - GFP_KERNEL);
2409 + priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
2410 + sizeof(struct
2411 + dma_extended_desc),
2412 + &priv->dma_tx_phy,
2413 + GFP_KERNEL);
2414 if (!priv->dma_etx) {
2415 dma_free_coherent(priv->device, priv->dma_rx_size *
2416 - sizeof(struct dma_extended_desc),
2417 - priv->dma_erx, priv->dma_rx_phy);
2418 + sizeof(struct dma_extended_desc),
2419 + priv->dma_erx, priv->dma_rx_phy);
2420 goto err_dma;
2421 }
2422 } else {
2423 - priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
2424 - sizeof(struct dma_desc),
2425 - &priv->dma_rx_phy,
2426 - GFP_KERNEL);
2427 + priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
2428 + sizeof(struct dma_desc),
2429 + &priv->dma_rx_phy,
2430 + GFP_KERNEL);
2431 if (!priv->dma_rx)
2432 goto err_dma;
2433
2434 - priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
2435 - sizeof(struct dma_desc),
2436 - &priv->dma_tx_phy,
2437 - GFP_KERNEL);
2438 + priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
2439 + sizeof(struct dma_desc),
2440 + &priv->dma_tx_phy,
2441 + GFP_KERNEL);
2442 if (!priv->dma_tx) {
2443 dma_free_coherent(priv->device, priv->dma_rx_size *
2444 - sizeof(struct dma_desc),
2445 - priv->dma_rx, priv->dma_rx_phy);
2446 + sizeof(struct dma_desc),
2447 + priv->dma_rx, priv->dma_rx_phy);
2448 goto err_dma;
2449 }
2450 }
2451 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2452 index 63c7810e1545..7fbca37a1adf 100644
2453 --- a/drivers/net/virtio_net.c
2454 +++ b/drivers/net/virtio_net.c
2455 @@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
2456 else
2457 vi->hdr_len = sizeof(struct virtio_net_hdr);
2458
2459 - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
2460 + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2461 + virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2462 vi->any_header_sg = true;
2463
2464 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2465 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2466 index 23806c243a53..fd4a5353d216 100644
2467 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2468 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
2469 @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
2470 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
2471 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
2472 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
2473 + {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
2474 {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
2475 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
2476 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
2477 diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2478 index 57966e3c8e8d..3fa2fb7c8e4e 100644
2479 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2480 +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
2481 @@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
2482
2483 rtl_write_byte(rtlpriv, MSR, bt_msr);
2484 rtlpriv->cfg->ops->led_control(hw, ledaction);
2485 - if ((bt_msr & 0xfc) == MSR_AP)
2486 + if ((bt_msr & MSR_MASK) == MSR_AP)
2487 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
2488 else
2489 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
2490 diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2491 index 53668fc8f23e..1d6110f9c1fb 100644
2492 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2493 +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
2494 @@ -429,6 +429,7 @@
2495 #define MSR_ADHOC 0x01
2496 #define MSR_INFRA 0x02
2497 #define MSR_AP 0x03
2498 +#define MSR_MASK 0x03
2499
2500 #define RRSR_RSC_OFFSET 21
2501 #define RRSR_SHORT_OFFSET 23
2502 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
2503 index 0d2594395ffb..0866c5dfdf87 100644
2504 --- a/drivers/net/xen-netback/netback.c
2505 +++ b/drivers/net/xen-netback/netback.c
2506 @@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
2507 smp_rmb();
2508
2509 while (dc != dp) {
2510 - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
2511 + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
2512 pending_idx =
2513 queue->dealloc_ring[pending_index(dc++)];
2514
2515 - pending_idx_release[gop-queue->tx_unmap_ops] =
2516 + pending_idx_release[gop - queue->tx_unmap_ops] =
2517 pending_idx;
2518 - queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
2519 + queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
2520 queue->mmap_pages[pending_idx];
2521 gnttab_set_unmap_op(gop,
2522 idx_to_kaddr(queue, pending_idx),
2523 diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
2524 index d251f7229c4e..051286562fab 100644
2525 --- a/drivers/nfc/st21nfca/st21nfca.c
2526 +++ b/drivers/nfc/st21nfca/st21nfca.c
2527 @@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2528 ST21NFCA_DEVICE_MGNT_GATE,
2529 ST21NFCA_DEVICE_MGNT_PIPE);
2530 if (r < 0)
2531 - goto free_info;
2532 + return r;
2533
2534 /* Get pipe list */
2535 r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
2536 ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
2537 &skb_pipe_list);
2538 if (r < 0)
2539 - goto free_info;
2540 + return r;
2541
2542 /* Complete the existing gate_pipe table */
2543 for (i = 0; i < skb_pipe_list->len; i++) {
2544 @@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2545 info->src_host_id != ST21NFCA_ESE_HOST_ID) {
2546 pr_err("Unexpected apdu_reader pipe on host %x\n",
2547 info->src_host_id);
2548 + kfree_skb(skb_pipe_info);
2549 continue;
2550 }
2551
2552 @@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2553 hdev->pipes[st21nfca_gates[j].pipe].dest_host =
2554 info->src_host_id;
2555 }
2556 + kfree_skb(skb_pipe_info);
2557 }
2558
2559 /*
2560 @@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
2561 st21nfca_gates[i].gate,
2562 st21nfca_gates[i].pipe);
2563 if (r < 0)
2564 - goto free_info;
2565 + goto free_list;
2566 }
2567 }
2568
2569 memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
2570 -free_info:
2571 - kfree_skb(skb_pipe_info);
2572 +free_list:
2573 kfree_skb(skb_pipe_list);
2574 return r;
2575 }
2576 diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
2577 index cde35c5d0191..d91f721a05b6 100644
2578 --- a/drivers/of/fdt.c
2579 +++ b/drivers/of/fdt.c
2580 @@ -955,7 +955,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
2581 }
2582
2583 #ifdef CONFIG_HAVE_MEMBLOCK
2584 -#define MAX_PHYS_ADDR ((phys_addr_t)~0)
2585 +#ifndef MAX_MEMBLOCK_ADDR
2586 +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
2587 +#endif
2588
2589 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
2590 {
2591 @@ -972,16 +974,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
2592 }
2593 size &= PAGE_MASK;
2594
2595 - if (base > MAX_PHYS_ADDR) {
2596 + if (base > MAX_MEMBLOCK_ADDR) {
2597 pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
2598 base, base + size);
2599 return;
2600 }
2601
2602 - if (base + size - 1 > MAX_PHYS_ADDR) {
2603 + if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
2604 pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
2605 - ((u64)MAX_PHYS_ADDR) + 1, base + size);
2606 - size = MAX_PHYS_ADDR - base + 1;
2607 + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
2608 + size = MAX_MEMBLOCK_ADDR - base + 1;
2609 }
2610
2611 if (base + size < phys_offset) {
2612 diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
2613 index dceb9ddfd99a..a32c1f6c252c 100644
2614 --- a/drivers/parisc/lba_pci.c
2615 +++ b/drivers/parisc/lba_pci.c
2616 @@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev)
2617 if (lba_dev->hba.lmmio_space.flags)
2618 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
2619 lba_dev->hba.lmmio_space_offset);
2620 - if (lba_dev->hba.gmmio_space.flags)
2621 - pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
2622 + if (lba_dev->hba.gmmio_space.flags) {
2623 + /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
2624 + pr_warn("LBA: Not registering GMMIO space %pR\n",
2625 + &lba_dev->hba.gmmio_space);
2626 + }
2627
2628 pci_add_resource(&resources, &lba_dev->hba.bus_num);
2629
2630 diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
2631 index 944f50015ed0..73de4efcbe6e 100644
2632 --- a/drivers/pci/Kconfig
2633 +++ b/drivers/pci/Kconfig
2634 @@ -2,7 +2,7 @@
2635 # PCI configuration
2636 #
2637 config PCI_BUS_ADDR_T_64BIT
2638 - def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
2639 + def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
2640 depends on PCI
2641
2642 config PCI_MSI
2643 diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2644 index 2f797cb7e205..774781450885 100644
2645 --- a/drivers/pinctrl/pinctrl-at91.c
2646 +++ b/drivers/pinctrl/pinctrl-at91.c
2647 @@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = {
2648 static void __iomem *pin_to_controller(struct at91_pinctrl *info,
2649 unsigned int bank)
2650 {
2651 + if (!gpio_chips[bank])
2652 + return NULL;
2653 +
2654 return gpio_chips[bank]->regbase;
2655 }
2656
2657 @@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
2658 pin = &pins_conf[i];
2659 at91_pin_dbg(info->dev, pin);
2660 pio = pin_to_controller(info, pin->bank);
2661 +
2662 + if (!pio)
2663 + continue;
2664 +
2665 mask = pin_to_mask(pin->pin);
2666 at91_mux_disable_interrupt(pio, mask);
2667 switch (pin->mux) {
2668 @@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
2669 *config = 0;
2670 dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
2671 pio = pin_to_controller(info, pin_to_bank(pin_id));
2672 +
2673 + if (!pio)
2674 + return -EINVAL;
2675 +
2676 pin = pin_id % MAX_NB_GPIO_PER_BANK;
2677
2678 if (at91_mux_get_multidrive(pio, pin))
2679 @@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
2680 "%s:%d, pin_id=%d, config=0x%lx",
2681 __func__, __LINE__, pin_id, config);
2682 pio = pin_to_controller(info, pin_to_bank(pin_id));
2683 +
2684 + if (!pio)
2685 + return -EINVAL;
2686 +
2687 pin = pin_id % MAX_NB_GPIO_PER_BANK;
2688 mask = pin_to_mask(pin);
2689
2690 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
2691 index cb7cd8d79329..cd78f1166b33 100644
2692 --- a/drivers/platform/x86/ideapad-laptop.c
2693 +++ b/drivers/platform/x86/ideapad-laptop.c
2694 @@ -852,6 +852,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
2695 },
2696 },
2697 {
2698 + .ident = "Lenovo Yoga 3 14",
2699 + .matches = {
2700 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2701 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
2702 + },
2703 + },
2704 + {
2705 .ident = "Lenovo Yoga 3 Pro 1370",
2706 .matches = {
2707 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2708 diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
2709 index 4337c3bc6ace..afea84c7a155 100644
2710 --- a/drivers/rtc/rtc-abx80x.c
2711 +++ b/drivers/rtc/rtc-abx80x.c
2712 @@ -28,7 +28,7 @@
2713 #define ABX8XX_REG_WD 0x07
2714
2715 #define ABX8XX_REG_CTRL1 0x10
2716 -#define ABX8XX_CTRL_WRITE BIT(1)
2717 +#define ABX8XX_CTRL_WRITE BIT(0)
2718 #define ABX8XX_CTRL_12_24 BIT(6)
2719
2720 #define ABX8XX_REG_CFG_KEY 0x1f
2721 diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
2722 index 76cbad7a99d3..c5a2523b0185 100644
2723 --- a/drivers/rtc/rtc-s3c.c
2724 +++ b/drivers/rtc/rtc-s3c.c
2725 @@ -39,6 +39,7 @@ struct s3c_rtc {
2726 void __iomem *base;
2727 struct clk *rtc_clk;
2728 struct clk *rtc_src_clk;
2729 + bool clk_disabled;
2730
2731 struct s3c_rtc_data *data;
2732
2733 @@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info)
2734 unsigned long irq_flags;
2735
2736 spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
2737 - clk_enable(info->rtc_clk);
2738 - if (info->data->needs_src_clk)
2739 - clk_enable(info->rtc_src_clk);
2740 + if (info->clk_disabled) {
2741 + clk_enable(info->rtc_clk);
2742 + if (info->data->needs_src_clk)
2743 + clk_enable(info->rtc_src_clk);
2744 + info->clk_disabled = false;
2745 + }
2746 spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
2747 }
2748
2749 @@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
2750 unsigned long irq_flags;
2751
2752 spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
2753 - if (info->data->needs_src_clk)
2754 - clk_disable(info->rtc_src_clk);
2755 - clk_disable(info->rtc_clk);
2756 + if (!info->clk_disabled) {
2757 + if (info->data->needs_src_clk)
2758 + clk_disable(info->rtc_src_clk);
2759 + clk_disable(info->rtc_clk);
2760 + info->clk_disabled = true;
2761 + }
2762 spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
2763 }
2764
2765 @@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
2766
2767 s3c_rtc_disable_clk(info);
2768
2769 + if (enabled)
2770 + s3c_rtc_enable_clk(info);
2771 + else
2772 + s3c_rtc_disable_clk(info);
2773 +
2774 return 0;
2775 }
2776
2777 diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
2778 index 8c70d785ba73..ab60287ee72d 100644
2779 --- a/drivers/rtc/rtc-s5m.c
2780 +++ b/drivers/rtc/rtc-s5m.c
2781 @@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
2782 case S2MPS13X:
2783 data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
2784 ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
2785 + if (ret < 0)
2786 + break;
2787 +
2788 + /*
2789 + * Should set WUDR & (RUDR or AUDR) bits to high after writing
2790 + * RTC_CTRL register like writing Alarm registers. We can't find
2791 + * the description from datasheet but vendor code does that
2792 + * really.
2793 + */
2794 + ret = s5m8767_rtc_set_alarm_reg(info);
2795 break;
2796
2797 default:
2798 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
2799 index 94e909c5a503..00d18c2bdb0f 100644
2800 --- a/fs/btrfs/transaction.c
2801 +++ b/fs/btrfs/transaction.c
2802 @@ -1875,8 +1875,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2803 spin_unlock(&root->fs_info->trans_lock);
2804
2805 wait_for_commit(root, prev_trans);
2806 + ret = prev_trans->aborted;
2807
2808 btrfs_put_transaction(prev_trans);
2809 + if (ret)
2810 + goto cleanup_transaction;
2811 } else {
2812 spin_unlock(&root->fs_info->trans_lock);
2813 }
2814 diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
2815 index 8b7898b7670f..64a9bca976d0 100644
2816 --- a/fs/cifs/ioctl.c
2817 +++ b/fs/cifs/ioctl.c
2818 @@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
2819 goto out_drop_write;
2820 }
2821
2822 + if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
2823 + rc = -EBADF;
2824 + cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
2825 + goto out_fput;
2826 + }
2827 +
2828 if ((!src_file.file->private_data) || (!dst_file->private_data)) {
2829 rc = -EBADF;
2830 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
2831 diff --git a/fs/coredump.c b/fs/coredump.c
2832 index bbbe139ab280..8dd099dc5f9b 100644
2833 --- a/fs/coredump.c
2834 +++ b/fs/coredump.c
2835 @@ -506,10 +506,10 @@ void do_coredump(const siginfo_t *siginfo)
2836 const struct cred *old_cred;
2837 struct cred *cred;
2838 int retval = 0;
2839 - int flag = 0;
2840 int ispipe;
2841 struct files_struct *displaced;
2842 - bool need_nonrelative = false;
2843 + /* require nonrelative corefile path and be extra careful */
2844 + bool need_suid_safe = false;
2845 bool core_dumped = false;
2846 static atomic_t core_dump_count = ATOMIC_INIT(0);
2847 struct coredump_params cprm = {
2848 @@ -543,9 +543,8 @@ void do_coredump(const siginfo_t *siginfo)
2849 */
2850 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
2851 /* Setuid core dump mode */
2852 - flag = O_EXCL; /* Stop rewrite attacks */
2853 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
2854 - need_nonrelative = true;
2855 + need_suid_safe = true;
2856 }
2857
2858 retval = coredump_wait(siginfo->si_signo, &core_state);
2859 @@ -626,7 +625,7 @@ void do_coredump(const siginfo_t *siginfo)
2860 if (cprm.limit < binfmt->min_coredump)
2861 goto fail_unlock;
2862
2863 - if (need_nonrelative && cn.corename[0] != '/') {
2864 + if (need_suid_safe && cn.corename[0] != '/') {
2865 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
2866 "to fully qualified path!\n",
2867 task_tgid_vnr(current), current->comm);
2868 @@ -634,8 +633,35 @@ void do_coredump(const siginfo_t *siginfo)
2869 goto fail_unlock;
2870 }
2871
2872 + /*
2873 + * Unlink the file if it exists unless this is a SUID
2874 + * binary - in that case, we're running around with root
2875 + * privs and don't want to unlink another user's coredump.
2876 + */
2877 + if (!need_suid_safe) {
2878 + mm_segment_t old_fs;
2879 +
2880 + old_fs = get_fs();
2881 + set_fs(KERNEL_DS);
2882 + /*
2883 + * If it doesn't exist, that's fine. If there's some
2884 + * other problem, we'll catch it at the filp_open().
2885 + */
2886 + (void) sys_unlink((const char __user *)cn.corename);
2887 + set_fs(old_fs);
2888 + }
2889 +
2890 + /*
2891 + * There is a race between unlinking and creating the
2892 + * file, but if that causes an EEXIST here, that's
2893 + * fine - another process raced with us while creating
2894 + * the corefile, and the other process won. To userspace,
2895 + * what matters is that at least one of the two processes
2896 + * writes its coredump successfully, not which one.
2897 + */
2898 cprm.file = filp_open(cn.corename,
2899 - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
2900 + O_CREAT | 2 | O_NOFOLLOW |
2901 + O_LARGEFILE | O_EXCL,
2902 0600);
2903 if (IS_ERR(cprm.file))
2904 goto fail_unlock;
2905 @@ -652,11 +678,15 @@ void do_coredump(const siginfo_t *siginfo)
2906 if (!S_ISREG(inode->i_mode))
2907 goto close_fail;
2908 /*
2909 - * Dont allow local users get cute and trick others to coredump
2910 - * into their pre-created files.
2911 + * Don't dump core if the filesystem changed owner or mode
2912 + * of the file during file creation. This is an issue when
2913 + * a process dumps core while its cwd is e.g. on a vfat
2914 + * filesystem.
2915 */
2916 if (!uid_eq(inode->i_uid, current_fsuid()))
2917 goto close_fail;
2918 + if ((inode->i_mode & 0677) != 0600)
2919 + goto close_fail;
2920 if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
2921 goto close_fail;
2922 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
2923 diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
2924 index 8db0b464483f..63cd2c147221 100644
2925 --- a/fs/ecryptfs/dentry.c
2926 +++ b/fs/ecryptfs/dentry.c
2927 @@ -45,20 +45,20 @@
2928 static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
2929 {
2930 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
2931 - int rc;
2932 -
2933 - if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
2934 - return 1;
2935 + int rc = 1;
2936
2937 if (flags & LOOKUP_RCU)
2938 return -ECHILD;
2939
2940 - rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
2941 + if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
2942 + rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
2943 +
2944 if (d_really_is_positive(dentry)) {
2945 - struct inode *lower_inode =
2946 - ecryptfs_inode_to_lower(d_inode(dentry));
2947 + struct inode *inode = d_inode(dentry);
2948
2949 - fsstack_copy_attr_all(d_inode(dentry), lower_inode);
2950 + fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
2951 + if (!inode->i_nlink)
2952 + return 0;
2953 }
2954 return rc;
2955 }
2956 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2957 index 6b4eb94b04a5..ff89971e3ee0 100644
2958 --- a/fs/ext4/super.c
2959 +++ b/fs/ext4/super.c
2960 @@ -324,6 +324,22 @@ static void save_error_info(struct super_block *sb, const char *func,
2961 ext4_commit_super(sb, 1);
2962 }
2963
2964 +/*
2965 + * The del_gendisk() function uninitializes the disk-specific data
2966 + * structures, including the bdi structure, without telling anyone
2967 + * else. Once this happens, any attempt to call mark_buffer_dirty()
2968 + * (for example, by ext4_commit_super), will cause a kernel OOPS.
2969 + * This is a kludge to prevent these oops until we can put in a proper
2970 + * hook in del_gendisk() to inform the VFS and file system layers.
2971 + */
2972 +static int block_device_ejected(struct super_block *sb)
2973 +{
2974 + struct inode *bd_inode = sb->s_bdev->bd_inode;
2975 + struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
2976 +
2977 + return bdi->dev == NULL;
2978 +}
2979 +
2980 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
2981 {
2982 struct super_block *sb = journal->j_private;
2983 @@ -4591,7 +4607,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
2984 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
2985 int error = 0;
2986
2987 - if (!sbh)
2988 + if (!sbh || block_device_ejected(sb))
2989 return error;
2990 if (buffer_write_io_error(sbh)) {
2991 /*
2992 @@ -4807,10 +4823,11 @@ static int ext4_freeze(struct super_block *sb)
2993 error = jbd2_journal_flush(journal);
2994 if (error < 0)
2995 goto out;
2996 +
2997 + /* Journal blocked and flushed, clear needs_recovery flag. */
2998 + EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
2999 }
3000
3001 - /* Journal blocked and flushed, clear needs_recovery flag. */
3002 - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3003 error = ext4_commit_super(sb, 1);
3004 out:
3005 if (journal)
3006 @@ -4828,8 +4845,11 @@ static int ext4_unfreeze(struct super_block *sb)
3007 if (sb->s_flags & MS_RDONLY)
3008 return 0;
3009
3010 - /* Reset the needs_recovery flag before the fs is unlocked. */
3011 - EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3012 + if (EXT4_SB(sb)->s_journal) {
3013 + /* Reset the needs_recovery flag before the fs is unlocked. */
3014 + EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3015 + }
3016 +
3017 ext4_commit_super(sb, 1);
3018 return 0;
3019 }
3020 diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
3021 index d3fa6bd9503e..221719eac5de 100644
3022 --- a/fs/hfs/bnode.c
3023 +++ b/fs/hfs/bnode.c
3024 @@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
3025 page_cache_release(page);
3026 goto fail;
3027 }
3028 - page_cache_release(page);
3029 node->page[i] = page;
3030 }
3031
3032 @@ -398,11 +397,11 @@ node_error:
3033
3034 void hfs_bnode_free(struct hfs_bnode *node)
3035 {
3036 - //int i;
3037 + int i;
3038
3039 - //for (i = 0; i < node->tree->pages_per_bnode; i++)
3040 - // if (node->page[i])
3041 - // page_cache_release(node->page[i]);
3042 + for (i = 0; i < node->tree->pages_per_bnode; i++)
3043 + if (node->page[i])
3044 + page_cache_release(node->page[i]);
3045 kfree(node);
3046 }
3047
3048 diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
3049 index 9f4ee7f52026..6fc766df0461 100644
3050 --- a/fs/hfs/brec.c
3051 +++ b/fs/hfs/brec.c
3052 @@ -131,13 +131,16 @@ skip:
3053 hfs_bnode_write(node, entry, data_off + key_len, entry_len);
3054 hfs_bnode_dump(node);
3055
3056 - if (new_node) {
3057 - /* update parent key if we inserted a key
3058 - * at the start of the first node
3059 - */
3060 - if (!rec && new_node != node)
3061 - hfs_brec_update_parent(fd);
3062 + /*
3063 + * update parent key if we inserted a key
3064 + * at the start of the node and it is not the new node
3065 + */
3066 + if (!rec && new_node != node) {
3067 + hfs_bnode_read_key(node, fd->search_key, data_off + size);
3068 + hfs_brec_update_parent(fd);
3069 + }
3070
3071 + if (new_node) {
3072 hfs_bnode_put(fd->bnode);
3073 if (!new_node->parent) {
3074 hfs_btree_inc_height(tree);
3075 @@ -166,9 +169,6 @@ skip:
3076 goto again;
3077 }
3078
3079 - if (!rec)
3080 - hfs_brec_update_parent(fd);
3081 -
3082 return 0;
3083 }
3084
3085 @@ -366,6 +366,8 @@ again:
3086 if (IS_ERR(parent))
3087 return PTR_ERR(parent);
3088 __hfs_brec_find(parent, fd);
3089 + if (fd->record < 0)
3090 + return -ENOENT;
3091 hfs_bnode_dump(parent);
3092 rec = fd->record;
3093
3094 diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
3095 index 759708fd9331..63924662aaf3 100644
3096 --- a/fs/hfsplus/bnode.c
3097 +++ b/fs/hfsplus/bnode.c
3098 @@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
3099 page_cache_release(page);
3100 goto fail;
3101 }
3102 - page_cache_release(page);
3103 node->page[i] = page;
3104 }
3105
3106 @@ -566,13 +565,11 @@ node_error:
3107
3108 void hfs_bnode_free(struct hfs_bnode *node)
3109 {
3110 -#if 0
3111 int i;
3112
3113 for (i = 0; i < node->tree->pages_per_bnode; i++)
3114 if (node->page[i])
3115 page_cache_release(node->page[i]);
3116 -#endif
3117 kfree(node);
3118 }
3119
3120 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
3121 index 4227dc4f7437..8c44654ce274 100644
3122 --- a/fs/jbd2/checkpoint.c
3123 +++ b/fs/jbd2/checkpoint.c
3124 @@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
3125 * journal_clean_one_cp_list
3126 *
3127 * Find all the written-back checkpoint buffers in the given list and
3128 - * release them.
3129 + * release them. If 'destroy' is set, clean all buffers unconditionally.
3130 *
3131 * Called with j_list_lock held.
3132 * Returns 1 if we freed the transaction, 0 otherwise.
3133 */
3134 -static int journal_clean_one_cp_list(struct journal_head *jh)
3135 +static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
3136 {
3137 struct journal_head *last_jh;
3138 struct journal_head *next_jh = jh;
3139 @@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
3140 do {
3141 jh = next_jh;
3142 next_jh = jh->b_cpnext;
3143 - ret = __try_to_free_cp_buf(jh);
3144 + if (!destroy)
3145 + ret = __try_to_free_cp_buf(jh);
3146 + else
3147 + ret = __jbd2_journal_remove_checkpoint(jh) + 1;
3148 if (!ret)
3149 return freed;
3150 if (ret == 2)
3151 @@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
3152 * journal_clean_checkpoint_list
3153 *
3154 * Find all the written-back checkpoint buffers in the journal and release them.
3155 + * If 'destroy' is set, release all buffers unconditionally.
3156 *
3157 * Called with j_list_lock held.
3158 */
3159 -void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3160 +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
3161 {
3162 transaction_t *transaction, *last_transaction, *next_transaction;
3163 int ret;
3164 @@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3165 do {
3166 transaction = next_transaction;
3167 next_transaction = transaction->t_cpnext;
3168 - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
3169 + ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
3170 + destroy);
3171 /*
3172 * This function only frees up some memory if possible so we
3173 * dont have an obligation to finish processing. Bail out if
3174 @@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3175 * we can possibly see not yet submitted buffers on io_list
3176 */
3177 ret = journal_clean_one_cp_list(transaction->
3178 - t_checkpoint_io_list);
3179 + t_checkpoint_io_list, destroy);
3180 if (need_resched())
3181 return;
3182 /*
3183 @@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3184 }
3185
3186 /*
3187 + * Remove buffers from all checkpoint lists as journal is aborted and we just
3188 + * need to free memory
3189 + */
3190 +void jbd2_journal_destroy_checkpoint(journal_t *journal)
3191 +{
3192 + /*
3193 + * We loop because __jbd2_journal_clean_checkpoint_list() may abort
3194 + * early due to a need of rescheduling.
3195 + */
3196 + while (1) {
3197 + spin_lock(&journal->j_list_lock);
3198 + if (!journal->j_checkpoint_transactions) {
3199 + spin_unlock(&journal->j_list_lock);
3200 + break;
3201 + }
3202 + __jbd2_journal_clean_checkpoint_list(journal, true);
3203 + spin_unlock(&journal->j_list_lock);
3204 + cond_resched();
3205 + }
3206 +}
3207 +
3208 +/*
3209 * journal_remove_checkpoint: called after a buffer has been committed
3210 * to disk (either by being write-back flushed to disk, or being
3211 * committed to the log).
3212 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
3213 index b73e0215baa7..362e5f614450 100644
3214 --- a/fs/jbd2/commit.c
3215 +++ b/fs/jbd2/commit.c
3216 @@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3217 * frees some memory
3218 */
3219 spin_lock(&journal->j_list_lock);
3220 - __jbd2_journal_clean_checkpoint_list(journal);
3221 + __jbd2_journal_clean_checkpoint_list(journal, false);
3222 spin_unlock(&journal->j_list_lock);
3223
3224 jbd_debug(3, "JBD2: commit phase 1\n");
3225 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3226 index 112fad9e1e20..7003c0925760 100644
3227 --- a/fs/jbd2/journal.c
3228 +++ b/fs/jbd2/journal.c
3229 @@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal)
3230 while (journal->j_checkpoint_transactions != NULL) {
3231 spin_unlock(&journal->j_list_lock);
3232 mutex_lock(&journal->j_checkpoint_mutex);
3233 - jbd2_log_do_checkpoint(journal);
3234 + err = jbd2_log_do_checkpoint(journal);
3235 mutex_unlock(&journal->j_checkpoint_mutex);
3236 + /*
3237 + * If checkpointing failed, just free the buffers to avoid
3238 + * looping forever
3239 + */
3240 + if (err) {
3241 + jbd2_journal_destroy_checkpoint(journal);
3242 + spin_lock(&journal->j_list_lock);
3243 + break;
3244 + }
3245 spin_lock(&journal->j_list_lock);
3246 }
3247
3248 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
3249 index 6f5f0f425e86..fecd9201dbad 100644
3250 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
3251 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
3252 @@ -1039,6 +1039,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
3253 hdr->res.verf->committed == NFS_DATA_SYNC)
3254 ff_layout_set_layoutcommit(hdr);
3255
3256 + /* zero out fattr since we don't care DS attr at all */
3257 + hdr->fattr.valid = 0;
3258 + if (task->tk_status >= 0)
3259 + nfs_writeback_update_inode(hdr);
3260 +
3261 return 0;
3262 }
3263
3264 diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3265 index f13e1969eedd..b28fa4cbea52 100644
3266 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3267 +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
3268 @@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
3269 range->offset, range->length))
3270 continue;
3271 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
3272 - * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
3273 + * + array length + deviceid(NFS4_DEVICEID4_SIZE)
3274 + * + status(4) + opnum(4)
3275 */
3276 p = xdr_reserve_space(xdr,
3277 - 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
3278 + 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
3279 if (unlikely(!p))
3280 return -ENOBUFS;
3281 p = xdr_encode_hyper(p, err->offset);
3282 p = xdr_encode_hyper(p, err->length);
3283 p = xdr_encode_opaque_fixed(p, &err->stateid,
3284 NFS4_STATEID_SIZE);
3285 + /* Encode 1 error */
3286 + *p++ = cpu_to_be32(1);
3287 p = xdr_encode_opaque_fixed(p, &err->deviceid,
3288 NFS4_DEVICEID4_SIZE);
3289 *p++ = cpu_to_be32(err->status);
3290 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
3291 index 5d25b9d97c29..976ba792fbc6 100644
3292 --- a/fs/nfs/inode.c
3293 +++ b/fs/nfs/inode.c
3294 @@ -1270,13 +1270,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
3295 return 0;
3296 }
3297
3298 -static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
3299 -{
3300 - if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
3301 - return 0;
3302 - return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
3303 -}
3304 -
3305 static atomic_long_t nfs_attr_generation_counter;
3306
3307 static unsigned long nfs_read_attr_generation_counter(void)
3308 @@ -1425,7 +1418,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
3309 const struct nfs_inode *nfsi = NFS_I(inode);
3310
3311 return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
3312 - nfs_ctime_need_update(inode, fattr) ||
3313 ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
3314 }
3315
3316 @@ -1488,6 +1480,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
3317 {
3318 unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
3319
3320 + /*
3321 + * Don't revalidate the pagecache if we hold a delegation, but do
3322 + * force an attribute update
3323 + */
3324 + if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
3325 + invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
3326 +
3327 if (S_ISDIR(inode->i_mode))
3328 invalid |= NFS_INO_INVALID_DATA;
3329 nfs_set_cache_invalid(inode, invalid);
3330 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3331 index d3f205126609..c245874d7e9d 100644
3332 --- a/fs/nfs/nfs4proc.c
3333 +++ b/fs/nfs/nfs4proc.c
3334 @@ -1152,6 +1152,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
3335 return 0;
3336 if ((delegation->type & fmode) != fmode)
3337 return 0;
3338 + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
3339 + return 0;
3340 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
3341 return 0;
3342 nfs_mark_delegation_referenced(delegation);
3343 @@ -1216,6 +1218,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
3344 }
3345
3346 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3347 + nfs4_stateid *arg_stateid,
3348 nfs4_stateid *stateid, fmode_t fmode)
3349 {
3350 clear_bit(NFS_O_RDWR_STATE, &state->flags);
3351 @@ -1234,8 +1237,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3352 if (stateid == NULL)
3353 return;
3354 /* Handle races with OPEN */
3355 - if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
3356 - !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
3357 + if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
3358 + (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
3359 + !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
3360 nfs_resync_open_stateid_locked(state);
3361 return;
3362 }
3363 @@ -1244,10 +1248,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
3364 nfs4_stateid_copy(&state->open_stateid, stateid);
3365 }
3366
3367 -static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
3368 +static void nfs_clear_open_stateid(struct nfs4_state *state,
3369 + nfs4_stateid *arg_stateid,
3370 + nfs4_stateid *stateid, fmode_t fmode)
3371 {
3372 write_seqlock(&state->seqlock);
3373 - nfs_clear_open_stateid_locked(state, stateid, fmode);
3374 + nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
3375 write_sequnlock(&state->seqlock);
3376 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
3377 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
3378 @@ -2413,7 +2419,7 @@ static int _nfs4_do_open(struct inode *dir,
3379 goto err_free_label;
3380 state = ctx->state;
3381
3382 - if ((opendata->o_arg.open_flags & O_EXCL) &&
3383 + if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3384 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3385 nfs4_exclusive_attrset(opendata, sattr);
3386
3387 @@ -2672,7 +2678,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
3388 goto out_release;
3389 }
3390 }
3391 - nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
3392 + nfs_clear_open_stateid(state, &calldata->arg.stateid,
3393 + res_stateid, calldata->arg.fmode);
3394 out_release:
3395 nfs_release_seqid(calldata->arg.seqid);
3396 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
3397 @@ -8571,6 +8578,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
3398 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
3399 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
3400 .state_renewal_ops = &nfs41_state_renewal_ops,
3401 + .mig_recovery_ops = &nfs41_mig_recovery_ops,
3402 };
3403 #endif
3404
3405 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3406 index 7b4552678536..069914ce7641 100644
3407 --- a/fs/nfs/pagelist.c
3408 +++ b/fs/nfs/pagelist.c
3409 @@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
3410 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
3411 {
3412 spin_lock(&hdr->lock);
3413 - if (pos < hdr->io_start + hdr->good_bytes) {
3414 - set_bit(NFS_IOHDR_ERROR, &hdr->flags);
3415 + if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
3416 + || pos < hdr->io_start + hdr->good_bytes) {
3417 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
3418 hdr->good_bytes = pos - hdr->io_start;
3419 hdr->error = error;
3420 diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
3421 index f37e25b6311c..1705c78ee2d8 100644
3422 --- a/fs/nfs/pnfs_nfs.c
3423 +++ b/fs/nfs/pnfs_nfs.c
3424 @@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
3425 return false;
3426 }
3427
3428 +/*
3429 + * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
3430 + * declare a match.
3431 + */
3432 static bool
3433 _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
3434 const struct list_head *dsaddrs2)
3435 {
3436 struct nfs4_pnfs_ds_addr *da1, *da2;
3437 -
3438 - /* step through both lists, comparing as we go */
3439 - for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
3440 - da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
3441 - da1 != NULL && da2 != NULL;
3442 - da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
3443 - da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
3444 - if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
3445 - (struct sockaddr *)&da2->da_addr))
3446 - return false;
3447 + struct sockaddr *sa1, *sa2;
3448 + bool match = false;
3449 +
3450 + list_for_each_entry(da1, dsaddrs1, da_node) {
3451 + sa1 = (struct sockaddr *)&da1->da_addr;
3452 + match = false;
3453 + list_for_each_entry(da2, dsaddrs2, da_node) {
3454 + sa2 = (struct sockaddr *)&da2->da_addr;
3455 + match = same_sockaddr(sa1, sa2);
3456 + if (match)
3457 + break;
3458 + }
3459 + if (!match)
3460 + break;
3461 }
3462 - if (da1 == NULL && da2 == NULL)
3463 - return true;
3464 -
3465 - return false;
3466 + return match;
3467 }
3468
3469 /*
3470 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
3471 index daf355642845..07115b9b1ad2 100644
3472 --- a/fs/nfs/write.c
3473 +++ b/fs/nfs/write.c
3474 @@ -1383,24 +1383,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
3475 {
3476 struct nfs_pgio_args *argp = &hdr->args;
3477 struct nfs_pgio_res *resp = &hdr->res;
3478 + u64 size = argp->offset + resp->count;
3479
3480 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
3481 + fattr->size = size;
3482 + if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
3483 + fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
3484 return;
3485 - if (argp->offset + resp->count != fattr->size)
3486 - return;
3487 - if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
3488 + }
3489 + if (size != fattr->size)
3490 return;
3491 /* Set attribute barrier */
3492 nfs_fattr_set_barrier(fattr);
3493 + /* ...and update size */
3494 + fattr->valid |= NFS_ATTR_FATTR_SIZE;
3495 }
3496
3497 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
3498 {
3499 - struct nfs_fattr *fattr = hdr->res.fattr;
3500 + struct nfs_fattr *fattr = &hdr->fattr;
3501 struct inode *inode = hdr->inode;
3502
3503 - if (fattr == NULL)
3504 - return;
3505 spin_lock(&inode->i_lock);
3506 nfs_writeback_check_extend(hdr, fattr);
3507 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
3508 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3509 index 6e13504f736e..397798368b1a 100644
3510 --- a/fs/nfsd/nfs4state.c
3511 +++ b/fs/nfsd/nfs4state.c
3512 @@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
3513 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
3514 }
3515
3516 -static void
3517 +static bool
3518 unhash_delegation_locked(struct nfs4_delegation *dp)
3519 {
3520 struct nfs4_file *fp = dp->dl_stid.sc_file;
3521
3522 lockdep_assert_held(&state_lock);
3523
3524 + if (list_empty(&dp->dl_perfile))
3525 + return false;
3526 +
3527 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
3528 /* Ensure that deleg break won't try to requeue it */
3529 ++dp->dl_time;
3530 @@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
3531 list_del_init(&dp->dl_recall_lru);
3532 list_del_init(&dp->dl_perfile);
3533 spin_unlock(&fp->fi_lock);
3534 + return true;
3535 }
3536
3537 static void destroy_delegation(struct nfs4_delegation *dp)
3538 {
3539 + bool unhashed;
3540 +
3541 spin_lock(&state_lock);
3542 - unhash_delegation_locked(dp);
3543 + unhashed = unhash_delegation_locked(dp);
3544 spin_unlock(&state_lock);
3545 - put_clnt_odstate(dp->dl_clnt_odstate);
3546 - nfs4_put_deleg_lease(dp->dl_stid.sc_file);
3547 - nfs4_put_stid(&dp->dl_stid);
3548 + if (unhashed) {
3549 + put_clnt_odstate(dp->dl_clnt_odstate);
3550 + nfs4_put_deleg_lease(dp->dl_stid.sc_file);
3551 + nfs4_put_stid(&dp->dl_stid);
3552 + }
3553 }
3554
3555 static void revoke_delegation(struct nfs4_delegation *dp)
3556 @@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
3557 sop->so_ops->so_free(sop);
3558 }
3559
3560 -static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
3561 +static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
3562 {
3563 struct nfs4_file *fp = stp->st_stid.sc_file;
3564
3565 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
3566
3567 + if (list_empty(&stp->st_perfile))
3568 + return false;
3569 +
3570 spin_lock(&fp->fi_lock);
3571 - list_del(&stp->st_perfile);
3572 + list_del_init(&stp->st_perfile);
3573 spin_unlock(&fp->fi_lock);
3574 list_del(&stp->st_perstateowner);
3575 + return true;
3576 }
3577
3578 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
3579 @@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
3580 list_add(&stp->st_locks, reaplist);
3581 }
3582
3583 -static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3584 +static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3585 {
3586 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3587
3588 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
3589
3590 list_del_init(&stp->st_locks);
3591 - unhash_ol_stateid(stp);
3592 nfs4_unhash_stid(&stp->st_stid);
3593 + return unhash_ol_stateid(stp);
3594 }
3595
3596 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
3597 {
3598 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3599 + bool unhashed;
3600
3601 spin_lock(&oo->oo_owner.so_client->cl_lock);
3602 - unhash_lock_stateid(stp);
3603 + unhashed = unhash_lock_stateid(stp);
3604 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3605 - nfs4_put_stid(&stp->st_stid);
3606 + if (unhashed)
3607 + nfs4_put_stid(&stp->st_stid);
3608 }
3609
3610 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
3611 @@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo)
3612 while (!list_empty(&lo->lo_owner.so_stateids)) {
3613 stp = list_first_entry(&lo->lo_owner.so_stateids,
3614 struct nfs4_ol_stateid, st_perstateowner);
3615 - unhash_lock_stateid(stp);
3616 + WARN_ON(!unhash_lock_stateid(stp));
3617 put_ol_stateid_locked(stp, &reaplist);
3618 }
3619 spin_unlock(&clp->cl_lock);
3620 @@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
3621 {
3622 struct nfs4_ol_stateid *stp;
3623
3624 + lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
3625 +
3626 while (!list_empty(&open_stp->st_locks)) {
3627 stp = list_entry(open_stp->st_locks.next,
3628 struct nfs4_ol_stateid, st_locks);
3629 - unhash_lock_stateid(stp);
3630 + WARN_ON(!unhash_lock_stateid(stp));
3631 put_ol_stateid_locked(stp, reaplist);
3632 }
3633 }
3634
3635 -static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
3636 +static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
3637 struct list_head *reaplist)
3638 {
3639 + bool unhashed;
3640 +
3641 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
3642
3643 - unhash_ol_stateid(stp);
3644 + unhashed = unhash_ol_stateid(stp);
3645 release_open_stateid_locks(stp, reaplist);
3646 + return unhashed;
3647 }
3648
3649 static void release_open_stateid(struct nfs4_ol_stateid *stp)
3650 @@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
3651 LIST_HEAD(reaplist);
3652
3653 spin_lock(&stp->st_stid.sc_client->cl_lock);
3654 - unhash_open_stateid(stp, &reaplist);
3655 - put_ol_stateid_locked(stp, &reaplist);
3656 + if (unhash_open_stateid(stp, &reaplist))
3657 + put_ol_stateid_locked(stp, &reaplist);
3658 spin_unlock(&stp->st_stid.sc_client->cl_lock);
3659 free_ol_stateid_reaplist(&reaplist);
3660 }
3661 @@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo)
3662 while (!list_empty(&oo->oo_owner.so_stateids)) {
3663 stp = list_first_entry(&oo->oo_owner.so_stateids,
3664 struct nfs4_ol_stateid, st_perstateowner);
3665 - unhash_open_stateid(stp, &reaplist);
3666 - put_ol_stateid_locked(stp, &reaplist);
3667 + if (unhash_open_stateid(stp, &reaplist))
3668 + put_ol_stateid_locked(stp, &reaplist);
3669 }
3670 spin_unlock(&clp->cl_lock);
3671 free_ol_stateid_reaplist(&reaplist);
3672 @@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp)
3673 spin_lock(&state_lock);
3674 while (!list_empty(&clp->cl_delegations)) {
3675 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
3676 - unhash_delegation_locked(dp);
3677 + WARN_ON(!unhash_delegation_locked(dp));
3678 list_add(&dp->dl_recall_lru, &reaplist);
3679 }
3680 spin_unlock(&state_lock);
3681 @@ -4346,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn)
3682 new_timeo = min(new_timeo, t);
3683 break;
3684 }
3685 - unhash_delegation_locked(dp);
3686 + WARN_ON(!unhash_delegation_locked(dp));
3687 list_add(&dp->dl_recall_lru, &reaplist);
3688 }
3689 spin_unlock(&state_lock);
3690 @@ -4714,7 +4733,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3691 if (check_for_locks(stp->st_stid.sc_file,
3692 lockowner(stp->st_stateowner)))
3693 break;
3694 - unhash_lock_stateid(stp);
3695 + WARN_ON(!unhash_lock_stateid(stp));
3696 spin_unlock(&cl->cl_lock);
3697 nfs4_put_stid(s);
3698 ret = nfs_ok;
3699 @@ -4930,20 +4949,23 @@ out:
3700 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3701 {
3702 struct nfs4_client *clp = s->st_stid.sc_client;
3703 + bool unhashed;
3704 LIST_HEAD(reaplist);
3705
3706 s->st_stid.sc_type = NFS4_CLOSED_STID;
3707 spin_lock(&clp->cl_lock);
3708 - unhash_open_stateid(s, &reaplist);
3709 + unhashed = unhash_open_stateid(s, &reaplist);
3710
3711 if (clp->cl_minorversion) {
3712 - put_ol_stateid_locked(s, &reaplist);
3713 + if (unhashed)
3714 + put_ol_stateid_locked(s, &reaplist);
3715 spin_unlock(&clp->cl_lock);
3716 free_ol_stateid_reaplist(&reaplist);
3717 } else {
3718 spin_unlock(&clp->cl_lock);
3719 free_ol_stateid_reaplist(&reaplist);
3720 - move_to_close_lru(s, clp->net);
3721 + if (unhashed)
3722 + move_to_close_lru(s, clp->net);
3723 }
3724 }
3725
3726 @@ -5982,7 +6004,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
3727
3728 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
3729 struct list_head *collect,
3730 - void (*func)(struct nfs4_ol_stateid *))
3731 + bool (*func)(struct nfs4_ol_stateid *))
3732 {
3733 struct nfs4_openowner *oop;
3734 struct nfs4_ol_stateid *stp, *st_next;
3735 @@ -5996,9 +6018,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
3736 list_for_each_entry_safe(lst, lst_next,
3737 &stp->st_locks, st_locks) {
3738 if (func) {
3739 - func(lst);
3740 - nfsd_inject_add_lock_to_list(lst,
3741 - collect);
3742 + if (func(lst))
3743 + nfsd_inject_add_lock_to_list(lst,
3744 + collect);
3745 }
3746 ++count;
3747 /*
3748 @@ -6268,7 +6290,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
3749 continue;
3750
3751 atomic_inc(&clp->cl_refcount);
3752 - unhash_delegation_locked(dp);
3753 + WARN_ON(!unhash_delegation_locked(dp));
3754 list_add(&dp->dl_recall_lru, victims);
3755 }
3756 ++count;
3757 @@ -6598,7 +6620,7 @@ nfs4_state_shutdown_net(struct net *net)
3758 spin_lock(&state_lock);
3759 list_for_each_safe(pos, next, &nn->del_recall_lru) {
3760 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3761 - unhash_delegation_locked(dp);
3762 + WARN_ON(!unhash_delegation_locked(dp));
3763 list_add(&dp->dl_recall_lru, &reaplist);
3764 }
3765 spin_unlock(&state_lock);
3766 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3767 index d4d84451e0e6..3dd1b616b92b 100644
3768 --- a/fs/nfsd/nfs4xdr.c
3769 +++ b/fs/nfsd/nfs4xdr.c
3770 @@ -2139,6 +2139,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
3771 return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
3772 }
3773
3774 +static inline __be32
3775 +nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
3776 +{
3777 + __be32 *p;
3778 +
3779 + if (layout_type) {
3780 + p = xdr_reserve_space(xdr, 8);
3781 + if (!p)
3782 + return nfserr_resource;
3783 + *p++ = cpu_to_be32(1);
3784 + *p++ = cpu_to_be32(layout_type);
3785 + } else {
3786 + p = xdr_reserve_space(xdr, 4);
3787 + if (!p)
3788 + return nfserr_resource;
3789 + *p++ = cpu_to_be32(0);
3790 + }
3791 +
3792 + return 0;
3793 +}
3794 +
3795 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
3796 FATTR4_WORD0_RDATTR_ERROR)
3797 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
3798 @@ -2692,20 +2713,16 @@ out_acl:
3799 p = xdr_encode_hyper(p, stat.ino);
3800 }
3801 #ifdef CONFIG_NFSD_PNFS
3802 - if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
3803 - (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
3804 - if (exp->ex_layout_type) {
3805 - p = xdr_reserve_space(xdr, 8);
3806 - if (!p)
3807 - goto out_resource;
3808 - *p++ = cpu_to_be32(1);
3809 - *p++ = cpu_to_be32(exp->ex_layout_type);
3810 - } else {
3811 - p = xdr_reserve_space(xdr, 4);
3812 - if (!p)
3813 - goto out_resource;
3814 - *p++ = cpu_to_be32(0);
3815 - }
3816 + if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
3817 + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
3818 + if (status)
3819 + goto out;
3820 + }
3821 +
3822 + if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
3823 + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
3824 + if (status)
3825 + goto out;
3826 }
3827
3828 if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
3829 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
3830 index edb640ae9a94..eb1cebed3f36 100644
3831 --- a/include/linux/jbd2.h
3832 +++ b/include/linux/jbd2.h
3833 @@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
3834 extern void jbd2_journal_commit_transaction(journal_t *);
3835
3836 /* Checkpoint list management */
3837 -void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
3838 +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
3839 int __jbd2_journal_remove_checkpoint(struct journal_head *);
3840 +void jbd2_journal_destroy_checkpoint(journal_t *journal);
3841 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
3842
3843
3844 diff --git a/include/linux/mm.h b/include/linux/mm.h
3845 index 0755b9fd03a7..b2085582d44e 100644
3846 --- a/include/linux/mm.h
3847 +++ b/include/linux/mm.h
3848 @@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
3849 }
3850
3851 /*
3852 + * Return true only if the page has been allocated with
3853 + * ALLOC_NO_WATERMARKS and the low watermark was not
3854 + * met implying that the system is under some pressure.
3855 + */
3856 +static inline bool page_is_pfmemalloc(struct page *page)
3857 +{
3858 + /*
3859 + * Page index cannot be this large so this must be
3860 + * a pfmemalloc page.
3861 + */
3862 + return page->index == -1UL;
3863 +}
3864 +
3865 +/*
3866 + * Only to be called by the page allocator on a freshly allocated
3867 + * page.
3868 + */
3869 +static inline void set_page_pfmemalloc(struct page *page)
3870 +{
3871 + page->index = -1UL;
3872 +}
3873 +
3874 +static inline void clear_page_pfmemalloc(struct page *page)
3875 +{
3876 + page->index = 0;
3877 +}
3878 +
3879 +/*
3880 * Different kinds of faults, as returned by handle_mm_fault().
3881 * Used to decide whether a process gets delivered SIGBUS or
3882 * just gets major/minor fault counters bumped up.
3883 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3884 index 8d37e26a1007..c0c6b33535fb 100644
3885 --- a/include/linux/mm_types.h
3886 +++ b/include/linux/mm_types.h
3887 @@ -63,15 +63,6 @@ struct page {
3888 union {
3889 pgoff_t index; /* Our offset within mapping. */
3890 void *freelist; /* sl[aou]b first free object */
3891 - bool pfmemalloc; /* If set by the page allocator,
3892 - * ALLOC_NO_WATERMARKS was set
3893 - * and the low watermark was not
3894 - * met implying that the system
3895 - * is under some pressure. The
3896 - * caller should try ensure
3897 - * this page is only used to
3898 - * free other pages.
3899 - */
3900 };
3901
3902 union {
3903 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3904 index f15154a879c7..eb1c55b8255a 100644
3905 --- a/include/linux/skbuff.h
3906 +++ b/include/linux/skbuff.h
3907 @@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
3908 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3909
3910 /*
3911 - * Propagate page->pfmemalloc to the skb if we can. The problem is
3912 - * that not all callers have unique ownership of the page. If
3913 - * pfmemalloc is set, we check the mapping as a mapping implies
3914 - * page->index is set (index and pfmemalloc share space).
3915 - * If it's a valid mapping, we cannot use page->pfmemalloc but we
3916 - * do not lose pfmemalloc information as the pages would not be
3917 - * allocated using __GFP_MEMALLOC.
3918 + * Propagate page pfmemalloc to the skb if we can. The problem is
3919 + * that not all callers have unique ownership of the page but rely
3920 + * on page_is_pfmemalloc doing the right thing(tm).
3921 */
3922 frag->page.p = page;
3923 frag->page_offset = off;
3924 skb_frag_size_set(frag, size);
3925
3926 page = compound_head(page);
3927 - if (page->pfmemalloc && !page->mapping)
3928 + if (page_is_pfmemalloc(page))
3929 skb->pfmemalloc = true;
3930 }
3931
3932 @@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
3933 static inline void skb_propagate_pfmemalloc(struct page *page,
3934 struct sk_buff *skb)
3935 {
3936 - if (page && page->pfmemalloc)
3937 + if (page_is_pfmemalloc(page))
3938 skb->pfmemalloc = true;
3939 }
3940
3941 diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
3942 index 7591788e9fbf..357e44c1a46b 100644
3943 --- a/include/linux/sunrpc/xprtsock.h
3944 +++ b/include/linux/sunrpc/xprtsock.h
3945 @@ -42,6 +42,7 @@ struct sock_xprt {
3946 /*
3947 * Connection of transports
3948 */
3949 + unsigned long sock_state;
3950 struct delayed_work connect_worker;
3951 struct sockaddr_storage srcaddr;
3952 unsigned short srcport;
3953 @@ -76,6 +77,8 @@ struct sock_xprt {
3954 */
3955 #define TCP_RPC_REPLY (1UL << 6)
3956
3957 +#define XPRT_SOCK_CONNECTING 1U
3958 +
3959 #endif /* __KERNEL__ */
3960
3961 #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
3962 diff --git a/include/net/act_api.h b/include/net/act_api.h
3963 index 3ee4c92afd1b..931738bc5bba 100644
3964 --- a/include/net/act_api.h
3965 +++ b/include/net/act_api.h
3966 @@ -99,7 +99,6 @@ struct tc_action_ops {
3967
3968 int tcf_hash_search(struct tc_action *a, u32 index);
3969 void tcf_hash_destroy(struct tc_action *a);
3970 -int tcf_hash_release(struct tc_action *a, int bind);
3971 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
3972 int tcf_hash_check(u32 index, struct tc_action *a, int bind);
3973 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
3974 @@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
3975 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
3976 void tcf_hash_insert(struct tc_action *a);
3977
3978 +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
3979 +
3980 +static inline int tcf_hash_release(struct tc_action *a, bool bind)
3981 +{
3982 + return __tcf_hash_release(a, bind, false);
3983 +}
3984 +
3985 int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
3986 int tcf_unregister_action(struct tc_action_ops *a);
3987 int tcf_action_destroy(struct list_head *actions, int bind);
3988 diff --git a/include/net/ip.h b/include/net/ip.h
3989 index d14af7edd197..f41fc497b21b 100644
3990 --- a/include/net/ip.h
3991 +++ b/include/net/ip.h
3992 @@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
3993 }
3994
3995 /* datagram.c */
3996 +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
3997 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
3998
3999 void ip4_datagram_release_cb(struct sock *sk);
4000 diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
4001 index 63deb8d9f82a..d298857cd845 100644
4002 --- a/include/soc/tegra/mc.h
4003 +++ b/include/soc/tegra/mc.h
4004 @@ -59,6 +59,7 @@ struct tegra_smmu_soc {
4005 bool supports_round_robin_arbitration;
4006 bool supports_request_limit;
4007
4008 + unsigned int num_tlb_lines;
4009 unsigned int num_asids;
4010
4011 const struct tegra_smmu_ops *ops;
4012 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
4013 index fd1a02cb3c82..003dca933803 100644
4014 --- a/include/trace/events/sunrpc.h
4015 +++ b/include/trace/events/sunrpc.h
4016 @@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
4017
4018 TP_STRUCT__entry(
4019 __field(struct svc_xprt *, xprt)
4020 - __field(struct svc_rqst *, rqst)
4021 + __field_struct(struct sockaddr_storage, ss)
4022 + __field(int, pid)
4023 + __field(unsigned long, flags)
4024 ),
4025
4026 TP_fast_assign(
4027 __entry->xprt = xprt;
4028 - __entry->rqst = rqst;
4029 + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
4030 + __entry->pid = rqst? rqst->rq_task->pid : 0;
4031 + __entry->flags = xprt ? xprt->xpt_flags : 0;
4032 ),
4033
4034 TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
4035 - (struct sockaddr *)&__entry->xprt->xpt_remote,
4036 - __entry->rqst ? __entry->rqst->rq_task->pid : 0,
4037 - show_svc_xprt_flags(__entry->xprt->xpt_flags))
4038 + (struct sockaddr *)&__entry->ss,
4039 + __entry->pid, show_svc_xprt_flags(__entry->flags))
4040 );
4041
4042 TRACE_EVENT(svc_xprt_dequeue,
4043 @@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
4044 TP_STRUCT__entry(
4045 __field(struct svc_xprt *, xprt)
4046 __field(int, len)
4047 + __field_struct(struct sockaddr_storage, ss)
4048 + __field(unsigned long, flags)
4049 ),
4050
4051 TP_fast_assign(
4052 __entry->xprt = xprt;
4053 + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
4054 __entry->len = len;
4055 + __entry->flags = xprt ? xprt->xpt_flags : 0;
4056 ),
4057
4058 TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
4059 - (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
4060 - show_svc_xprt_flags(__entry->xprt->xpt_flags))
4061 + (struct sockaddr *)&__entry->ss,
4062 + __entry->len, show_svc_xprt_flags(__entry->flags))
4063 );
4064 #endif /* _TRACE_SUNRPC_H */
4065
4066 diff --git a/kernel/fork.c b/kernel/fork.c
4067 index 03c1eaaa6ef5..8209fa2d36ef 100644
4068 --- a/kernel/fork.c
4069 +++ b/kernel/fork.c
4070 @@ -1854,13 +1854,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
4071 CLONE_NEWUSER|CLONE_NEWPID))
4072 return -EINVAL;
4073 /*
4074 - * Not implemented, but pretend it works if there is nothing to
4075 - * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
4076 - * needs to unshare vm.
4077 + * Not implemented, but pretend it works if there is nothing
4078 + * to unshare. Note that unsharing the address space or the
4079 + * signal handlers also need to unshare the signal queues (aka
4080 + * CLONE_THREAD).
4081 */
4082 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
4083 - /* FIXME: get_task_mm() increments ->mm_users */
4084 - if (atomic_read(&current->mm->mm_users) > 1)
4085 + if (!thread_group_empty(current))
4086 + return -EINVAL;
4087 + }
4088 + if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
4089 + if (atomic_read(&current->sighand->count) > 1)
4090 + return -EINVAL;
4091 + }
4092 + if (unshare_flags & CLONE_VM) {
4093 + if (!current_is_single_threaded())
4094 return -EINVAL;
4095 }
4096
4097 @@ -1929,16 +1937,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
4098 if (unshare_flags & CLONE_NEWUSER)
4099 unshare_flags |= CLONE_THREAD | CLONE_FS;
4100 /*
4101 - * If unsharing a thread from a thread group, must also unshare vm.
4102 - */
4103 - if (unshare_flags & CLONE_THREAD)
4104 - unshare_flags |= CLONE_VM;
4105 - /*
4106 * If unsharing vm, must also unshare signal handlers.
4107 */
4108 if (unshare_flags & CLONE_VM)
4109 unshare_flags |= CLONE_SIGHAND;
4110 /*
4111 + * If unsharing a signal handlers, must also unshare the signal queues.
4112 + */
4113 + if (unshare_flags & CLONE_SIGHAND)
4114 + unshare_flags |= CLONE_THREAD;
4115 + /*
4116 * If unsharing namespace, must also unshare filesystem information.
4117 */
4118 if (unshare_flags & CLONE_NEWNS)
4119 diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
4120 index 6dd0335ea61b..0234361b24b8 100644
4121 --- a/lib/decompress_bunzip2.c
4122 +++ b/lib/decompress_bunzip2.c
4123 @@ -743,12 +743,12 @@ exit_0:
4124 }
4125
4126 #ifdef PREBOOT
4127 -STATIC int INIT decompress(unsigned char *buf, long len,
4128 +STATIC int INIT __decompress(unsigned char *buf, long len,
4129 long (*fill)(void*, unsigned long),
4130 long (*flush)(void*, unsigned long),
4131 - unsigned char *outbuf,
4132 + unsigned char *outbuf, long olen,
4133 long *pos,
4134 - void(*error)(char *x))
4135 + void (*error)(char *x))
4136 {
4137 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
4138 }
4139 diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
4140 index d4c7891635ec..555c06bf20da 100644
4141 --- a/lib/decompress_inflate.c
4142 +++ b/lib/decompress_inflate.c
4143 @@ -1,4 +1,5 @@
4144 #ifdef STATIC
4145 +#define PREBOOT
4146 /* Pre-boot environment: included */
4147
4148 /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
4149 @@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
4150 }
4151
4152 /* Included from initramfs et al code */
4153 -STATIC int INIT gunzip(unsigned char *buf, long len,
4154 +STATIC int INIT __gunzip(unsigned char *buf, long len,
4155 long (*fill)(void*, unsigned long),
4156 long (*flush)(void*, unsigned long),
4157 - unsigned char *out_buf,
4158 + unsigned char *out_buf, long out_len,
4159 long *pos,
4160 void(*error)(char *x)) {
4161 u8 *zbuf;
4162 struct z_stream_s *strm;
4163 int rc;
4164 - size_t out_len;
4165
4166 rc = -1;
4167 if (flush) {
4168 out_len = 0x8000; /* 32 K */
4169 out_buf = malloc(out_len);
4170 } else {
4171 - out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
4172 + if (!out_len)
4173 + out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
4174 }
4175 if (!out_buf) {
4176 error("Out of memory while allocating output buffer");
4177 @@ -181,4 +182,24 @@ gunzip_nomem1:
4178 return rc; /* returns Z_OK (0) if successful */
4179 }
4180
4181 -#define decompress gunzip
4182 +#ifndef PREBOOT
4183 +STATIC int INIT gunzip(unsigned char *buf, long len,
4184 + long (*fill)(void*, unsigned long),
4185 + long (*flush)(void*, unsigned long),
4186 + unsigned char *out_buf,
4187 + long *pos,
4188 + void (*error)(char *x))
4189 +{
4190 + return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
4191 +}
4192 +#else
4193 +STATIC int INIT __decompress(unsigned char *buf, long len,
4194 + long (*fill)(void*, unsigned long),
4195 + long (*flush)(void*, unsigned long),
4196 + unsigned char *out_buf, long out_len,
4197 + long *pos,
4198 + void (*error)(char *x))
4199 +{
4200 + return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
4201 +}
4202 +#endif
4203 diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
4204 index 40f66ebe57b7..036fc882cd72 100644
4205 --- a/lib/decompress_unlz4.c
4206 +++ b/lib/decompress_unlz4.c
4207 @@ -196,12 +196,12 @@ exit_0:
4208 }
4209
4210 #ifdef PREBOOT
4211 -STATIC int INIT decompress(unsigned char *buf, long in_len,
4212 +STATIC int INIT __decompress(unsigned char *buf, long in_len,
4213 long (*fill)(void*, unsigned long),
4214 long (*flush)(void*, unsigned long),
4215 - unsigned char *output,
4216 + unsigned char *output, long out_len,
4217 long *posp,
4218 - void(*error)(char *x)
4219 + void (*error)(char *x)
4220 )
4221 {
4222 return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
4223 diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
4224 index 0be83af62b88..decb64629c14 100644
4225 --- a/lib/decompress_unlzma.c
4226 +++ b/lib/decompress_unlzma.c
4227 @@ -667,13 +667,12 @@ exit_0:
4228 }
4229
4230 #ifdef PREBOOT
4231 -STATIC int INIT decompress(unsigned char *buf, long in_len,
4232 +STATIC int INIT __decompress(unsigned char *buf, long in_len,
4233 long (*fill)(void*, unsigned long),
4234 long (*flush)(void*, unsigned long),
4235 - unsigned char *output,
4236 + unsigned char *output, long out_len,
4237 long *posp,
4238 - void(*error)(char *x)
4239 - )
4240 + void (*error)(char *x))
4241 {
4242 return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
4243 }
4244 diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
4245 index b94a31bdd87d..f4c158e3a022 100644
4246 --- a/lib/decompress_unlzo.c
4247 +++ b/lib/decompress_unlzo.c
4248 @@ -31,6 +31,7 @@
4249 */
4250
4251 #ifdef STATIC
4252 +#define PREBOOT
4253 #include "lzo/lzo1x_decompress_safe.c"
4254 #else
4255 #include <linux/decompress/unlzo.h>
4256 @@ -287,4 +288,14 @@ exit:
4257 return ret;
4258 }
4259
4260 -#define decompress unlzo
4261 +#ifdef PREBOOT
4262 +STATIC int INIT __decompress(unsigned char *buf, long len,
4263 + long (*fill)(void*, unsigned long),
4264 + long (*flush)(void*, unsigned long),
4265 + unsigned char *out_buf, long olen,
4266 + long *pos,
4267 + void (*error)(char *x))
4268 +{
4269 + return unlzo(buf, len, fill, flush, out_buf, pos, error);
4270 +}
4271 +#endif
4272 diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
4273 index b07a78340e9d..25d59a95bd66 100644
4274 --- a/lib/decompress_unxz.c
4275 +++ b/lib/decompress_unxz.c
4276 @@ -394,4 +394,14 @@ error_alloc_state:
4277 * This macro is used by architecture-specific files to decompress
4278 * the kernel image.
4279 */
4280 -#define decompress unxz
4281 +#ifdef XZ_PREBOOT
4282 +STATIC int INIT __decompress(unsigned char *buf, long len,
4283 + long (*fill)(void*, unsigned long),
4284 + long (*flush)(void*, unsigned long),
4285 + unsigned char *out_buf, long olen,
4286 + long *pos,
4287 + void (*error)(char *x))
4288 +{
4289 + return unxz(buf, len, fill, flush, out_buf, pos, error);
4290 +}
4291 +#endif
4292 diff --git a/lib/rhashtable.c b/lib/rhashtable.c
4293 index 8609378e6505..cf910e48f8f2 100644
4294 --- a/lib/rhashtable.c
4295 +++ b/lib/rhashtable.c
4296 @@ -612,6 +612,8 @@ next:
4297 iter->skip = 0;
4298 }
4299
4300 + iter->p = NULL;
4301 +
4302 /* Ensure we see any new tables. */
4303 smp_rmb();
4304
4305 @@ -622,8 +624,6 @@ next:
4306 return ERR_PTR(-EAGAIN);
4307 }
4308
4309 - iter->p = NULL;
4310 -
4311 out:
4312
4313 return obj;
4314 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4315 index ebffa0e4a9c0..18490f3bd7f1 100644
4316 --- a/mm/page_alloc.c
4317 +++ b/mm/page_alloc.c
4318 @@ -983,12 +983,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
4319 set_page_owner(page, order, gfp_flags);
4320
4321 /*
4322 - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
4323 + * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
4324 * allocate the page. The expectation is that the caller is taking
4325 * steps that will free more memory. The caller should avoid the page
4326 * being used for !PFMEMALLOC purposes.
4327 */
4328 - page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
4329 + if (alloc_flags & ALLOC_NO_WATERMARKS)
4330 + set_page_pfmemalloc(page);
4331 + else
4332 + clear_page_pfmemalloc(page);
4333
4334 return 0;
4335 }
4336 diff --git a/mm/slab.c b/mm/slab.c
4337 index 7eb38dd1cefa..3dd2d1ff9d5d 100644
4338 --- a/mm/slab.c
4339 +++ b/mm/slab.c
4340 @@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
4341 }
4342
4343 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
4344 - if (unlikely(page->pfmemalloc))
4345 + if (page_is_pfmemalloc(page))
4346 pfmemalloc_active = true;
4347
4348 nr_pages = (1 << cachep->gfporder);
4349 @@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
4350 add_zone_page_state(page_zone(page),
4351 NR_SLAB_UNRECLAIMABLE, nr_pages);
4352 __SetPageSlab(page);
4353 - if (page->pfmemalloc)
4354 + if (page_is_pfmemalloc(page))
4355 SetPageSlabPfmemalloc(page);
4356
4357 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
4358 diff --git a/mm/slub.c b/mm/slub.c
4359 index 54c0876b43d5..08342c523a85 100644
4360 --- a/mm/slub.c
4361 +++ b/mm/slub.c
4362 @@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
4363 inc_slabs_node(s, page_to_nid(page), page->objects);
4364 page->slab_cache = s;
4365 __SetPageSlab(page);
4366 - if (page->pfmemalloc)
4367 + if (page_is_pfmemalloc(page))
4368 SetPageSlabPfmemalloc(page);
4369
4370 start = page_address(page);
4371 diff --git a/mm/vmscan.c b/mm/vmscan.c
4372 index 0d024fc8aa8e..1a17bd7c0ce5 100644
4373 --- a/mm/vmscan.c
4374 +++ b/mm/vmscan.c
4375 @@ -1153,7 +1153,7 @@ cull_mlocked:
4376 if (PageSwapCache(page))
4377 try_to_free_swap(page);
4378 unlock_page(page);
4379 - putback_lru_page(page);
4380 + list_add(&page->lru, &ret_pages);
4381 continue;
4382
4383 activate_locked:
4384 diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
4385 index e97572b5d2cc..0ff6e1bbca91 100644
4386 --- a/net/bridge/br_forward.c
4387 +++ b/net/bridge/br_forward.c
4388 @@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
4389 } else {
4390 skb_push(skb, ETH_HLEN);
4391 br_drop_fake_rtable(skb);
4392 + skb_sender_cpu_clear(skb);
4393 dev_queue_xmit(skb);
4394 }
4395
4396 diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
4397 index e29ad70b3000..d1f910c0d586 100644
4398 --- a/net/bridge/br_mdb.c
4399 +++ b/net/bridge/br_mdb.c
4400 @@ -348,7 +348,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
4401 return -ENOMEM;
4402 rcu_assign_pointer(*pp, p);
4403
4404 - br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
4405 return 0;
4406 }
4407
4408 @@ -371,6 +370,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
4409 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
4410 return -EINVAL;
4411
4412 + memset(&ip, 0, sizeof(ip));
4413 ip.proto = entry->addr.proto;
4414 if (ip.proto == htons(ETH_P_IP))
4415 ip.u.ip4 = entry->addr.u.ip4;
4416 @@ -417,6 +417,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
4417 if (!netif_running(br->dev) || br->multicast_disabled)
4418 return -EINVAL;
4419
4420 + memset(&ip, 0, sizeof(ip));
4421 ip.proto = entry->addr.proto;
4422 if (ip.proto == htons(ETH_P_IP)) {
4423 if (timer_pending(&br->ip4_other_query.timer))
4424 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
4425 index 4b5c236998ff..a7559ef312bd 100644
4426 --- a/net/bridge/br_netlink.c
4427 +++ b/net/bridge/br_netlink.c
4428 @@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
4429 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
4430 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
4431 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
4432 + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
4433 + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
4434 + 0;
4435 }
4436
4437 @@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
4438 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
4439 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
4440 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
4441 + [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
4442 + [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
4443 };
4444
4445 /* Change the state of the port and notify spanning tree */
4446 @@ -711,9 +715,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
4447 struct nlattr *tb[],
4448 struct nlattr *data[])
4449 {
4450 + struct net_bridge *br = netdev_priv(brdev);
4451 + int ret;
4452 +
4453 if (!data)
4454 return 0;
4455 - return br_setport(br_port_get_rtnl(dev), data);
4456 +
4457 + spin_lock_bh(&br->lock);
4458 + ret = br_setport(br_port_get_rtnl(dev), data);
4459 + spin_unlock_bh(&br->lock);
4460 +
4461 + return ret;
4462 }
4463
4464 static int br_port_fill_slave_info(struct sk_buff *skb,
4465 diff --git a/net/core/datagram.c b/net/core/datagram.c
4466 index b80fb91bb3f7..617088aee21d 100644
4467 --- a/net/core/datagram.c
4468 +++ b/net/core/datagram.c
4469 @@ -131,6 +131,35 @@ out_noerr:
4470 goto out;
4471 }
4472
4473 +static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
4474 +{
4475 + struct sk_buff *nskb;
4476 +
4477 + if (skb->peeked)
4478 + return skb;
4479 +
4480 + /* We have to unshare an skb before modifying it. */
4481 + if (!skb_shared(skb))
4482 + goto done;
4483 +
4484 + nskb = skb_clone(skb, GFP_ATOMIC);
4485 + if (!nskb)
4486 + return ERR_PTR(-ENOMEM);
4487 +
4488 + skb->prev->next = nskb;
4489 + skb->next->prev = nskb;
4490 + nskb->prev = skb->prev;
4491 + nskb->next = skb->next;
4492 +
4493 + consume_skb(skb);
4494 + skb = nskb;
4495 +
4496 +done:
4497 + skb->peeked = 1;
4498 +
4499 + return skb;
4500 +}
4501 +
4502 /**
4503 * __skb_recv_datagram - Receive a datagram skbuff
4504 * @sk: socket
4505 @@ -165,7 +194,9 @@ out_noerr:
4506 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4507 int *peeked, int *off, int *err)
4508 {
4509 + struct sk_buff_head *queue = &sk->sk_receive_queue;
4510 struct sk_buff *skb, *last;
4511 + unsigned long cpu_flags;
4512 long timeo;
4513 /*
4514 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
4515 @@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4516 * Look at current nfs client by the way...
4517 * However, this function was correct in any case. 8)
4518 */
4519 - unsigned long cpu_flags;
4520 - struct sk_buff_head *queue = &sk->sk_receive_queue;
4521 int _off = *off;
4522
4523 last = (struct sk_buff *)queue;
4524 @@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4525 _off -= skb->len;
4526 continue;
4527 }
4528 - skb->peeked = 1;
4529 +
4530 + skb = skb_set_peeked(skb);
4531 + error = PTR_ERR(skb);
4532 + if (IS_ERR(skb))
4533 + goto unlock_err;
4534 +
4535 atomic_inc(&skb->users);
4536 } else
4537 __skb_unlink(skb, queue);
4538 @@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
4539
4540 return NULL;
4541
4542 +unlock_err:
4543 + spin_unlock_irqrestore(&queue->lock, cpu_flags);
4544 no_packet:
4545 *err = error;
4546 return NULL;
4547 @@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
4548 !skb->csum_complete_sw)
4549 netdev_rx_csum_fault(skb->dev);
4550 }
4551 - skb->csum_valid = !sum;
4552 + if (!skb_shared(skb))
4553 + skb->csum_valid = !sum;
4554 return sum;
4555 }
4556 EXPORT_SYMBOL(__skb_checksum_complete_head);
4557 @@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
4558 netdev_rx_csum_fault(skb->dev);
4559 }
4560
4561 - /* Save full packet checksum */
4562 - skb->csum = csum;
4563 - skb->ip_summed = CHECKSUM_COMPLETE;
4564 - skb->csum_complete_sw = 1;
4565 - skb->csum_valid = !sum;
4566 + if (!skb_shared(skb)) {
4567 + /* Save full packet checksum */
4568 + skb->csum = csum;
4569 + skb->ip_summed = CHECKSUM_COMPLETE;
4570 + skb->csum_complete_sw = 1;
4571 + skb->csum_valid = !sum;
4572 + }
4573
4574 return sum;
4575 }
4576 diff --git a/net/core/dev.c b/net/core/dev.c
4577 index aa82f9ab6a36..a42b232805a5 100644
4578 --- a/net/core/dev.c
4579 +++ b/net/core/dev.c
4580 @@ -672,10 +672,6 @@ int dev_get_iflink(const struct net_device *dev)
4581 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
4582 return dev->netdev_ops->ndo_get_iflink(dev);
4583
4584 - /* If dev->rtnl_link_ops is set, it's a virtual interface. */
4585 - if (dev->rtnl_link_ops)
4586 - return 0;
4587 -
4588 return dev->ifindex;
4589 }
4590 EXPORT_SYMBOL(dev_get_iflink);
4591 @@ -3341,6 +3337,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4592 local_irq_save(flags);
4593
4594 rps_lock(sd);
4595 + if (!netif_running(skb->dev))
4596 + goto drop;
4597 qlen = skb_queue_len(&sd->input_pkt_queue);
4598 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4599 if (qlen) {
4600 @@ -3362,6 +3360,7 @@ enqueue:
4601 goto enqueue;
4602 }
4603
4604 +drop:
4605 sd->dropped++;
4606 rps_unlock(sd);
4607
4608 @@ -3667,8 +3666,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4609
4610 pt_prev = NULL;
4611
4612 - rcu_read_lock();
4613 -
4614 another_round:
4615 skb->skb_iif = skb->dev->ifindex;
4616
4617 @@ -3678,7 +3675,7 @@ another_round:
4618 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4619 skb = skb_vlan_untag(skb);
4620 if (unlikely(!skb))
4621 - goto unlock;
4622 + goto out;
4623 }
4624
4625 #ifdef CONFIG_NET_CLS_ACT
4626 @@ -3708,7 +3705,7 @@ skip_taps:
4627 if (static_key_false(&ingress_needed)) {
4628 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
4629 if (!skb)
4630 - goto unlock;
4631 + goto out;
4632 }
4633
4634 skb->tc_verd = 0;
4635 @@ -3725,7 +3722,7 @@ ncls:
4636 if (vlan_do_receive(&skb))
4637 goto another_round;
4638 else if (unlikely(!skb))
4639 - goto unlock;
4640 + goto out;
4641 }
4642
4643 rx_handler = rcu_dereference(skb->dev->rx_handler);
4644 @@ -3737,7 +3734,7 @@ ncls:
4645 switch (rx_handler(&skb)) {
4646 case RX_HANDLER_CONSUMED:
4647 ret = NET_RX_SUCCESS;
4648 - goto unlock;
4649 + goto out;
4650 case RX_HANDLER_ANOTHER:
4651 goto another_round;
4652 case RX_HANDLER_EXACT:
4653 @@ -3791,8 +3788,7 @@ drop:
4654 ret = NET_RX_DROP;
4655 }
4656
4657 -unlock:
4658 - rcu_read_unlock();
4659 +out:
4660 return ret;
4661 }
4662
4663 @@ -3823,29 +3819,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
4664
4665 static int netif_receive_skb_internal(struct sk_buff *skb)
4666 {
4667 + int ret;
4668 +
4669 net_timestamp_check(netdev_tstamp_prequeue, skb);
4670
4671 if (skb_defer_rx_timestamp(skb))
4672 return NET_RX_SUCCESS;
4673
4674 + rcu_read_lock();
4675 +
4676 #ifdef CONFIG_RPS
4677 if (static_key_false(&rps_needed)) {
4678 struct rps_dev_flow voidflow, *rflow = &voidflow;
4679 - int cpu, ret;
4680 -
4681 - rcu_read_lock();
4682 -
4683 - cpu = get_rps_cpu(skb->dev, skb, &rflow);
4684 + int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4685
4686 if (cpu >= 0) {
4687 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4688 rcu_read_unlock();
4689 return ret;
4690 }
4691 - rcu_read_unlock();
4692 }
4693 #endif
4694 - return __netif_receive_skb(skb);
4695 + ret = __netif_receive_skb(skb);
4696 + rcu_read_unlock();
4697 + return ret;
4698 }
4699
4700 /**
4701 @@ -4390,8 +4387,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4702 struct sk_buff *skb;
4703
4704 while ((skb = __skb_dequeue(&sd->process_queue))) {
4705 + rcu_read_lock();
4706 local_irq_enable();
4707 __netif_receive_skb(skb);
4708 + rcu_read_unlock();
4709 local_irq_disable();
4710 input_queue_head_incr(sd);
4711 if (++work >= quota) {
4712 @@ -6027,6 +6026,7 @@ static void rollback_registered_many(struct list_head *head)
4713 unlist_netdevice(dev);
4714
4715 dev->reg_state = NETREG_UNREGISTERING;
4716 + on_each_cpu(flush_backlog, dev, 1);
4717 }
4718
4719 synchronize_net();
4720 @@ -6297,7 +6297,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
4721 struct netdev_queue *tx;
4722 size_t sz = count * sizeof(*tx);
4723
4724 - BUG_ON(count < 1 || count > 0xffff);
4725 + if (count < 1 || count > 0xffff)
4726 + return -EINVAL;
4727
4728 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
4729 if (!tx) {
4730 @@ -6650,8 +6651,6 @@ void netdev_run_todo(void)
4731
4732 dev->reg_state = NETREG_UNREGISTERED;
4733
4734 - on_each_cpu(flush_backlog, dev, 1);
4735 -
4736 netdev_wait_allrefs(dev);
4737
4738 /* paranoia */
4739 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
4740 index 508155b283dd..043ea1867d0f 100644
4741 --- a/net/core/pktgen.c
4742 +++ b/net/core/pktgen.c
4743 @@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg)
4744 pktgen_rem_thread(t);
4745
4746 /* Wait for kthread_stop */
4747 - while (!kthread_should_stop()) {
4748 + for (;;) {
4749 set_current_state(TASK_INTERRUPTIBLE);
4750 + if (kthread_should_stop())
4751 + break;
4752 schedule();
4753 }
4754 __set_current_state(TASK_RUNNING);
4755 diff --git a/net/core/request_sock.c b/net/core/request_sock.c
4756 index 87b22c0bc08c..b42f0e26f89e 100644
4757 --- a/net/core/request_sock.c
4758 +++ b/net/core/request_sock.c
4759 @@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
4760 spin_lock_bh(&queue->syn_wait_lock);
4761 while ((req = lopt->syn_table[i]) != NULL) {
4762 lopt->syn_table[i] = req->dl_next;
4763 + /* Because of following del_timer_sync(),
4764 + * we must release the spinlock here
4765 + * or risk a dead lock.
4766 + */
4767 + spin_unlock_bh(&queue->syn_wait_lock);
4768 atomic_inc(&lopt->qlen_dec);
4769 - if (del_timer(&req->rsk_timer))
4770 + if (del_timer_sync(&req->rsk_timer))
4771 reqsk_put(req);
4772 reqsk_put(req);
4773 + spin_lock_bh(&queue->syn_wait_lock);
4774 }
4775 spin_unlock_bh(&queue->syn_wait_lock);
4776 }
4777 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4778 index 8de36824018d..fe95cb704aaa 100644
4779 --- a/net/core/rtnetlink.c
4780 +++ b/net/core/rtnetlink.c
4781 @@ -1287,10 +1287,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
4782 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
4783 };
4784
4785 -static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
4786 - [IFLA_VF_INFO] = { .type = NLA_NESTED },
4787 -};
4788 -
4789 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
4790 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
4791 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
4792 @@ -1437,96 +1433,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
4793 return 0;
4794 }
4795
4796 -static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
4797 +static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4798 {
4799 - int rem, err = -EINVAL;
4800 - struct nlattr *vf;
4801 const struct net_device_ops *ops = dev->netdev_ops;
4802 + int err = -EINVAL;
4803
4804 - nla_for_each_nested(vf, attr, rem) {
4805 - switch (nla_type(vf)) {
4806 - case IFLA_VF_MAC: {
4807 - struct ifla_vf_mac *ivm;
4808 - ivm = nla_data(vf);
4809 - err = -EOPNOTSUPP;
4810 - if (ops->ndo_set_vf_mac)
4811 - err = ops->ndo_set_vf_mac(dev, ivm->vf,
4812 - ivm->mac);
4813 - break;
4814 - }
4815 - case IFLA_VF_VLAN: {
4816 - struct ifla_vf_vlan *ivv;
4817 - ivv = nla_data(vf);
4818 - err = -EOPNOTSUPP;
4819 - if (ops->ndo_set_vf_vlan)
4820 - err = ops->ndo_set_vf_vlan(dev, ivv->vf,
4821 - ivv->vlan,
4822 - ivv->qos);
4823 - break;
4824 - }
4825 - case IFLA_VF_TX_RATE: {
4826 - struct ifla_vf_tx_rate *ivt;
4827 - struct ifla_vf_info ivf;
4828 - ivt = nla_data(vf);
4829 - err = -EOPNOTSUPP;
4830 - if (ops->ndo_get_vf_config)
4831 - err = ops->ndo_get_vf_config(dev, ivt->vf,
4832 - &ivf);
4833 - if (err)
4834 - break;
4835 - err = -EOPNOTSUPP;
4836 - if (ops->ndo_set_vf_rate)
4837 - err = ops->ndo_set_vf_rate(dev, ivt->vf,
4838 - ivf.min_tx_rate,
4839 - ivt->rate);
4840 - break;
4841 - }
4842 - case IFLA_VF_RATE: {
4843 - struct ifla_vf_rate *ivt;
4844 - ivt = nla_data(vf);
4845 - err = -EOPNOTSUPP;
4846 - if (ops->ndo_set_vf_rate)
4847 - err = ops->ndo_set_vf_rate(dev, ivt->vf,
4848 - ivt->min_tx_rate,
4849 - ivt->max_tx_rate);
4850 - break;
4851 - }
4852 - case IFLA_VF_SPOOFCHK: {
4853 - struct ifla_vf_spoofchk *ivs;
4854 - ivs = nla_data(vf);
4855 - err = -EOPNOTSUPP;
4856 - if (ops->ndo_set_vf_spoofchk)
4857 - err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
4858 - ivs->setting);
4859 - break;
4860 - }
4861 - case IFLA_VF_LINK_STATE: {
4862 - struct ifla_vf_link_state *ivl;
4863 - ivl = nla_data(vf);
4864 - err = -EOPNOTSUPP;
4865 - if (ops->ndo_set_vf_link_state)
4866 - err = ops->ndo_set_vf_link_state(dev, ivl->vf,
4867 - ivl->link_state);
4868 - break;
4869 - }
4870 - case IFLA_VF_RSS_QUERY_EN: {
4871 - struct ifla_vf_rss_query_en *ivrssq_en;
4872 + if (tb[IFLA_VF_MAC]) {
4873 + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
4874
4875 - ivrssq_en = nla_data(vf);
4876 - err = -EOPNOTSUPP;
4877 - if (ops->ndo_set_vf_rss_query_en)
4878 - err = ops->ndo_set_vf_rss_query_en(dev,
4879 - ivrssq_en->vf,
4880 - ivrssq_en->setting);
4881 - break;
4882 - }
4883 - default:
4884 - err = -EINVAL;
4885 - break;
4886 - }
4887 - if (err)
4888 - break;
4889 + err = -EOPNOTSUPP;
4890 + if (ops->ndo_set_vf_mac)
4891 + err = ops->ndo_set_vf_mac(dev, ivm->vf,
4892 + ivm->mac);
4893 + if (err < 0)
4894 + return err;
4895 + }
4896 +
4897 + if (tb[IFLA_VF_VLAN]) {
4898 + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
4899 +
4900 + err = -EOPNOTSUPP;
4901 + if (ops->ndo_set_vf_vlan)
4902 + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
4903 + ivv->qos);
4904 + if (err < 0)
4905 + return err;
4906 + }
4907 +
4908 + if (tb[IFLA_VF_TX_RATE]) {
4909 + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
4910 + struct ifla_vf_info ivf;
4911 +
4912 + err = -EOPNOTSUPP;
4913 + if (ops->ndo_get_vf_config)
4914 + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
4915 + if (err < 0)
4916 + return err;
4917 +
4918 + err = -EOPNOTSUPP;
4919 + if (ops->ndo_set_vf_rate)
4920 + err = ops->ndo_set_vf_rate(dev, ivt->vf,
4921 + ivf.min_tx_rate,
4922 + ivt->rate);
4923 + if (err < 0)
4924 + return err;
4925 + }
4926 +
4927 + if (tb[IFLA_VF_RATE]) {
4928 + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
4929 +
4930 + err = -EOPNOTSUPP;
4931 + if (ops->ndo_set_vf_rate)
4932 + err = ops->ndo_set_vf_rate(dev, ivt->vf,
4933 + ivt->min_tx_rate,
4934 + ivt->max_tx_rate);
4935 + if (err < 0)
4936 + return err;
4937 }
4938 +
4939 + if (tb[IFLA_VF_SPOOFCHK]) {
4940 + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
4941 +
4942 + err = -EOPNOTSUPP;
4943 + if (ops->ndo_set_vf_spoofchk)
4944 + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
4945 + ivs->setting);
4946 + if (err < 0)
4947 + return err;
4948 + }
4949 +
4950 + if (tb[IFLA_VF_LINK_STATE]) {
4951 + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
4952 +
4953 + err = -EOPNOTSUPP;
4954 + if (ops->ndo_set_vf_link_state)
4955 + err = ops->ndo_set_vf_link_state(dev, ivl->vf,
4956 + ivl->link_state);
4957 + if (err < 0)
4958 + return err;
4959 + }
4960 +
4961 + if (tb[IFLA_VF_RSS_QUERY_EN]) {
4962 + struct ifla_vf_rss_query_en *ivrssq_en;
4963 +
4964 + err = -EOPNOTSUPP;
4965 + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
4966 + if (ops->ndo_set_vf_rss_query_en)
4967 + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
4968 + ivrssq_en->setting);
4969 + if (err < 0)
4970 + return err;
4971 + }
4972 +
4973 return err;
4974 }
4975
4976 @@ -1722,14 +1720,21 @@ static int do_setlink(const struct sk_buff *skb,
4977 }
4978
4979 if (tb[IFLA_VFINFO_LIST]) {
4980 + struct nlattr *vfinfo[IFLA_VF_MAX + 1];
4981 struct nlattr *attr;
4982 int rem;
4983 +
4984 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
4985 - if (nla_type(attr) != IFLA_VF_INFO) {
4986 + if (nla_type(attr) != IFLA_VF_INFO ||
4987 + nla_len(attr) < NLA_HDRLEN) {
4988 err = -EINVAL;
4989 goto errout;
4990 }
4991 - err = do_setvfinfo(dev, attr);
4992 + err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
4993 + ifla_vf_policy);
4994 + if (err < 0)
4995 + goto errout;
4996 + err = do_setvfinfo(dev, vfinfo);
4997 if (err < 0)
4998 goto errout;
4999 status |= DO_SETLINK_NOTIFY;
5000 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5001 index 41ec02242ea7..a2e4e47b2839 100644
5002 --- a/net/core/skbuff.c
5003 +++ b/net/core/skbuff.c
5004 @@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
5005
5006 if (skb && frag_size) {
5007 skb->head_frag = 1;
5008 - if (virt_to_head_page(data)->pfmemalloc)
5009 + if (page_is_pfmemalloc(virt_to_head_page(data)))
5010 skb->pfmemalloc = 1;
5011 }
5012 return skb;
5013 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
5014 index 827cda560a55..57978c5b2c91 100644
5015 --- a/net/dsa/slave.c
5016 +++ b/net/dsa/slave.c
5017 @@ -732,7 +732,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
5018 return -ENODEV;
5019
5020 /* Use already configured phy mode */
5021 - p->phy_interface = p->phy->interface;
5022 + if (p->phy_interface == PHY_INTERFACE_MODE_NA)
5023 + p->phy_interface = p->phy->interface;
5024 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
5025 p->phy_interface);
5026
5027 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
5028 index 90c0e8386116..574fad9cca05 100644
5029 --- a/net/ipv4/datagram.c
5030 +++ b/net/ipv4/datagram.c
5031 @@ -20,7 +20,7 @@
5032 #include <net/route.h>
5033 #include <net/tcp_states.h>
5034
5035 -int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5036 +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5037 {
5038 struct inet_sock *inet = inet_sk(sk);
5039 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
5040 @@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5041
5042 sk_dst_reset(sk);
5043
5044 - lock_sock(sk);
5045 -
5046 oif = sk->sk_bound_dev_if;
5047 saddr = inet->inet_saddr;
5048 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
5049 @@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5050 sk_dst_set(sk, &rt->dst);
5051 err = 0;
5052 out:
5053 - release_sock(sk);
5054 return err;
5055 }
5056 +EXPORT_SYMBOL(__ip4_datagram_connect);
5057 +
5058 +int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5059 +{
5060 + int res;
5061 +
5062 + lock_sock(sk);
5063 + res = __ip4_datagram_connect(sk, uaddr, addr_len);
5064 + release_sock(sk);
5065 + return res;
5066 +}
5067 EXPORT_SYMBOL(ip4_datagram_connect);
5068
5069 /* Because UDP xmit path can manipulate sk_dst_cache without holding
5070 diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
5071 index 09b62e17dd8c..0ca933db1b41 100644
5072 --- a/net/ipv4/fib_trie.c
5073 +++ b/net/ipv4/fib_trie.c
5074 @@ -1780,8 +1780,6 @@ void fib_table_flush_external(struct fib_table *tb)
5075 if (hlist_empty(&n->leaf)) {
5076 put_child_root(pn, n->key, NULL);
5077 node_free(n);
5078 - } else {
5079 - leaf_pull_suffix(pn, n);
5080 }
5081 }
5082 }
5083 @@ -1852,8 +1850,6 @@ int fib_table_flush(struct fib_table *tb)
5084 if (hlist_empty(&n->leaf)) {
5085 put_child_root(pn, n->key, NULL);
5086 node_free(n);
5087 - } else {
5088 - leaf_pull_suffix(pn, n);
5089 }
5090 }
5091
5092 @@ -2457,7 +2453,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
5093 key = l->key + 1;
5094 iter->pos++;
5095
5096 - if (pos-- <= 0)
5097 + if (--pos <= 0)
5098 break;
5099
5100 l = NULL;
5101 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
5102 index 8976ca423a07..b27fc401c6a9 100644
5103 --- a/net/ipv4/inet_connection_sock.c
5104 +++ b/net/ipv4/inet_connection_sock.c
5105 @@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
5106 }
5107
5108 spin_unlock(&queue->syn_wait_lock);
5109 - if (del_timer(&req->rsk_timer))
5110 + if (del_timer_sync(&req->rsk_timer))
5111 reqsk_put(req);
5112 return found;
5113 }
5114 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
5115 index cc1da6d9cb35..cae22a1a8777 100644
5116 --- a/net/ipv4/ip_fragment.c
5117 +++ b/net/ipv4/ip_fragment.c
5118 @@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5119 ihl = ip_hdrlen(skb);
5120
5121 /* Determine the position of this fragment. */
5122 - end = offset + skb->len - ihl;
5123 + end = offset + skb->len - skb_network_offset(skb) - ihl;
5124 err = -EINVAL;
5125
5126 /* Is this the final fragment? */
5127 @@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5128 goto err;
5129
5130 err = -ENOMEM;
5131 - if (!pskb_pull(skb, ihl))
5132 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
5133 goto err;
5134
5135 err = pskb_trim_rcsum(skb, end - offset);
5136 @@ -613,6 +613,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5137 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
5138 iph->tot_len = htons(len);
5139 iph->tos |= ecn;
5140 +
5141 + ip_send_check(iph);
5142 +
5143 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
5144 qp->q.fragments = NULL;
5145 qp->q.fragments_tail = NULL;
5146 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5147 index 4c2c3ba4ba65..626d9e56a6bd 100644
5148 --- a/net/ipv4/ip_tunnel.c
5149 +++ b/net/ipv4/ip_tunnel.c
5150 @@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
5151 EXPORT_SYMBOL(ip_tunnel_encap);
5152
5153 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
5154 - struct rtable *rt, __be16 df)
5155 + struct rtable *rt, __be16 df,
5156 + const struct iphdr *inner_iph)
5157 {
5158 struct ip_tunnel *tunnel = netdev_priv(dev);
5159 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
5160 @@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
5161
5162 if (skb->protocol == htons(ETH_P_IP)) {
5163 if (!skb_is_gso(skb) &&
5164 - (df & htons(IP_DF)) && mtu < pkt_size) {
5165 + (inner_iph->frag_off & htons(IP_DF)) &&
5166 + mtu < pkt_size) {
5167 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
5168 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
5169 return -E2BIG;
5170 @@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5171 goto tx_error;
5172 }
5173
5174 - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
5175 + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
5176 ip_rt_put(rt);
5177 goto tx_error;
5178 }
5179 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5180 index fc1c658ec6c1..441ca6f38981 100644
5181 --- a/net/ipv4/tcp_ipv4.c
5182 +++ b/net/ipv4/tcp_ipv4.c
5183 @@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
5184 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
5185 if (req) {
5186 nsk = tcp_check_req(sk, skb, req, false);
5187 - if (!nsk)
5188 + if (!nsk || nsk == sk)
5189 reqsk_put(req);
5190 return nsk;
5191 }
5192 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5193 index 83aa604f9273..1b8c5ba7d5f7 100644
5194 --- a/net/ipv4/udp.c
5195 +++ b/net/ipv4/udp.c
5196 @@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
5197
5198 skb->sk = sk;
5199 skb->destructor = sock_efree;
5200 - dst = sk->sk_rx_dst;
5201 + dst = READ_ONCE(sk->sk_rx_dst);
5202
5203 if (dst)
5204 dst = dst_check(dst, 0);
5205 - if (dst)
5206 - skb_dst_set_noref(skb, dst);
5207 + if (dst) {
5208 + /* DST_NOCACHE can not be used without taking a reference */
5209 + if (dst->flags & DST_NOCACHE) {
5210 + if (likely(atomic_inc_not_zero(&dst->__refcnt)))
5211 + skb_dst_set(skb, dst);
5212 + } else {
5213 + skb_dst_set_noref(skb, dst);
5214 + }
5215 + }
5216 }
5217
5218 int udp_rcv(struct sk_buff *skb)
5219 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
5220 index 62d908e64eeb..b10a88986a98 100644
5221 --- a/net/ipv6/datagram.c
5222 +++ b/net/ipv6/datagram.c
5223 @@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
5224 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
5225 }
5226
5227 -int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5228 +static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5229 {
5230 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
5231 struct inet_sock *inet = inet_sk(sk);
5232 @@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5233 if (usin->sin6_family == AF_INET) {
5234 if (__ipv6_only_sock(sk))
5235 return -EAFNOSUPPORT;
5236 - err = ip4_datagram_connect(sk, uaddr, addr_len);
5237 + err = __ip4_datagram_connect(sk, uaddr, addr_len);
5238 goto ipv4_connected;
5239 }
5240
5241 @@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5242 sin.sin_addr.s_addr = daddr->s6_addr32[3];
5243 sin.sin_port = usin->sin6_port;
5244
5245 - err = ip4_datagram_connect(sk,
5246 - (struct sockaddr *) &sin,
5247 - sizeof(sin));
5248 + err = __ip4_datagram_connect(sk,
5249 + (struct sockaddr *) &sin,
5250 + sizeof(sin));
5251
5252 ipv4_connected:
5253 if (err)
5254 @@ -204,6 +204,16 @@ out:
5255 fl6_sock_release(flowlabel);
5256 return err;
5257 }
5258 +
5259 +int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
5260 +{
5261 + int res;
5262 +
5263 + lock_sock(sk);
5264 + res = __ip6_datagram_connect(sk, uaddr, addr_len);
5265 + release_sock(sk);
5266 + return res;
5267 +}
5268 EXPORT_SYMBOL_GPL(ip6_datagram_connect);
5269
5270 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
5271 diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
5272 index f2e464eba5ef..57990c929cd8 100644
5273 --- a/net/ipv6/ip6_input.c
5274 +++ b/net/ipv6/ip6_input.c
5275 @@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
5276 if (offset < 0)
5277 goto out;
5278
5279 - if (!ipv6_is_mld(skb, nexthdr, offset))
5280 - goto out;
5281 + if (ipv6_is_mld(skb, nexthdr, offset))
5282 + deliver = true;
5283
5284 - deliver = true;
5285 + goto out;
5286 }
5287 /* unknown RA - process it normally */
5288 }
5289 diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
5290 index e893cd18612f..08b62047c67f 100644
5291 --- a/net/ipv6/ip6_offload.c
5292 +++ b/net/ipv6/ip6_offload.c
5293 @@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
5294 static const struct net_offload sit_offload = {
5295 .callbacks = {
5296 .gso_segment = ipv6_gso_segment,
5297 - .gro_receive = ipv6_gro_receive,
5298 - .gro_complete = ipv6_gro_complete,
5299 },
5300 };
5301
5302 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5303 index 3adffb300238..e541d68dba8b 100644
5304 --- a/net/ipv6/tcp_ipv6.c
5305 +++ b/net/ipv6/tcp_ipv6.c
5306 @@ -946,7 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
5307 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
5308 if (req) {
5309 nsk = tcp_check_req(sk, skb, req, false);
5310 - if (!nsk)
5311 + if (!nsk || nsk == sk)
5312 reqsk_put(req);
5313 return nsk;
5314 }
5315 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5316 index 667111ee6a20..5787f15a3a12 100644
5317 --- a/net/mac80211/tx.c
5318 +++ b/net/mac80211/tx.c
5319 @@ -301,9 +301,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
5320 if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
5321 return TX_CONTINUE;
5322
5323 - if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
5324 - return TX_CONTINUE;
5325 -
5326 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
5327 return TX_CONTINUE;
5328
5329 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5330 index bf6e76643f78..4856d975492d 100644
5331 --- a/net/netlink/af_netlink.c
5332 +++ b/net/netlink/af_netlink.c
5333 @@ -355,25 +355,52 @@ err1:
5334 return NULL;
5335 }
5336
5337 +
5338 +static void
5339 +__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
5340 + unsigned int order)
5341 +{
5342 + struct netlink_sock *nlk = nlk_sk(sk);
5343 + struct sk_buff_head *queue;
5344 + struct netlink_ring *ring;
5345 +
5346 + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
5347 + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
5348 +
5349 + spin_lock_bh(&queue->lock);
5350 +
5351 + ring->frame_max = req->nm_frame_nr - 1;
5352 + ring->head = 0;
5353 + ring->frame_size = req->nm_frame_size;
5354 + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
5355 +
5356 + swap(ring->pg_vec_len, req->nm_block_nr);
5357 + swap(ring->pg_vec_order, order);
5358 + swap(ring->pg_vec, pg_vec);
5359 +
5360 + __skb_queue_purge(queue);
5361 + spin_unlock_bh(&queue->lock);
5362 +
5363 + WARN_ON(atomic_read(&nlk->mapped));
5364 +
5365 + if (pg_vec)
5366 + free_pg_vec(pg_vec, order, req->nm_block_nr);
5367 +}
5368 +
5369 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
5370 - bool closing, bool tx_ring)
5371 + bool tx_ring)
5372 {
5373 struct netlink_sock *nlk = nlk_sk(sk);
5374 struct netlink_ring *ring;
5375 - struct sk_buff_head *queue;
5376 void **pg_vec = NULL;
5377 unsigned int order = 0;
5378 - int err;
5379
5380 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
5381 - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
5382
5383 - if (!closing) {
5384 - if (atomic_read(&nlk->mapped))
5385 - return -EBUSY;
5386 - if (atomic_read(&ring->pending))
5387 - return -EBUSY;
5388 - }
5389 + if (atomic_read(&nlk->mapped))
5390 + return -EBUSY;
5391 + if (atomic_read(&ring->pending))
5392 + return -EBUSY;
5393
5394 if (req->nm_block_nr) {
5395 if (ring->pg_vec != NULL)
5396 @@ -405,31 +432,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
5397 return -EINVAL;
5398 }
5399
5400 - err = -EBUSY;
5401 mutex_lock(&nlk->pg_vec_lock);
5402 - if (closing || atomic_read(&nlk->mapped) == 0) {
5403 - err = 0;
5404 - spin_lock_bh(&queue->lock);
5405 -
5406 - ring->frame_max = req->nm_frame_nr - 1;
5407 - ring->head = 0;
5408 - ring->frame_size = req->nm_frame_size;
5409 - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
5410 -
5411 - swap(ring->pg_vec_len, req->nm_block_nr);
5412 - swap(ring->pg_vec_order, order);
5413 - swap(ring->pg_vec, pg_vec);
5414 -
5415 - __skb_queue_purge(queue);
5416 - spin_unlock_bh(&queue->lock);
5417 -
5418 - WARN_ON(atomic_read(&nlk->mapped));
5419 + if (atomic_read(&nlk->mapped) == 0) {
5420 + __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
5421 + mutex_unlock(&nlk->pg_vec_lock);
5422 + return 0;
5423 }
5424 +
5425 mutex_unlock(&nlk->pg_vec_lock);
5426
5427 if (pg_vec)
5428 free_pg_vec(pg_vec, order, req->nm_block_nr);
5429 - return err;
5430 +
5431 + return -EBUSY;
5432 }
5433
5434 static void netlink_mm_open(struct vm_area_struct *vma)
5435 @@ -898,10 +913,10 @@ static void netlink_sock_destruct(struct sock *sk)
5436
5437 memset(&req, 0, sizeof(req));
5438 if (nlk->rx_ring.pg_vec)
5439 - netlink_set_ring(sk, &req, true, false);
5440 + __netlink_set_ring(sk, &req, false, NULL, 0);
5441 memset(&req, 0, sizeof(req));
5442 if (nlk->tx_ring.pg_vec)
5443 - netlink_set_ring(sk, &req, true, true);
5444 + __netlink_set_ring(sk, &req, true, NULL, 0);
5445 }
5446 #endif /* CONFIG_NETLINK_MMAP */
5447
5448 @@ -1079,6 +1094,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
5449
5450 err = __netlink_insert(table, sk);
5451 if (err) {
5452 + /* In case the hashtable backend returns with -EBUSY
5453 + * from here, it must not escape to the caller.
5454 + */
5455 + if (unlikely(err == -EBUSY))
5456 + err = -EOVERFLOW;
5457 if (err == -EEXIST)
5458 err = -EADDRINUSE;
5459 nlk_sk(sk)->portid = 0;
5460 @@ -2197,7 +2217,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
5461 return -EINVAL;
5462 if (copy_from_user(&req, optval, sizeof(req)))
5463 return -EFAULT;
5464 - err = netlink_set_ring(sk, &req, false,
5465 + err = netlink_set_ring(sk, &req,
5466 optname == NETLINK_TX_RING);
5467 break;
5468 }
5469 diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
5470 index ed54ec533836..b33fed6d1584 100644
5471 --- a/net/nfc/nci/hci.c
5472 +++ b/net/nfc/nci/hci.c
5473 @@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
5474 r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
5475 msecs_to_jiffies(NCI_DATA_TIMEOUT));
5476
5477 - if (r == NCI_STATUS_OK)
5478 + if (r == NCI_STATUS_OK && skb)
5479 *skb = conn_info->rx_skb;
5480
5481 return r;
5482 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5483 index fe1610ddeacf..e1ea5d43b01e 100644
5484 --- a/net/packet/af_packet.c
5485 +++ b/net/packet/af_packet.c
5486 @@ -2307,7 +2307,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
5487 }
5488 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
5489 addr, hlen);
5490 - if (tp_len > dev->mtu + dev->hard_header_len) {
5491 + if (likely(tp_len >= 0) &&
5492 + tp_len > dev->mtu + dev->hard_header_len) {
5493 struct ethhdr *ehdr;
5494 /* Earlier code assumed this would be a VLAN pkt,
5495 * double-check this now that we have the actual
5496 @@ -2688,7 +2689,7 @@ static int packet_release(struct socket *sock)
5497 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
5498 {
5499 struct packet_sock *po = pkt_sk(sk);
5500 - const struct net_device *dev_curr;
5501 + struct net_device *dev_curr;
5502 __be16 proto_curr;
5503 bool need_rehook;
5504
5505 @@ -2712,15 +2713,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
5506
5507 po->num = proto;
5508 po->prot_hook.type = proto;
5509 -
5510 - if (po->prot_hook.dev)
5511 - dev_put(po->prot_hook.dev);
5512 -
5513 po->prot_hook.dev = dev;
5514
5515 po->ifindex = dev ? dev->ifindex : 0;
5516 packet_cached_dev_assign(po, dev);
5517 }
5518 + if (dev_curr)
5519 + dev_put(dev_curr);
5520
5521 if (proto == 0 || !need_rehook)
5522 goto out_unlock;
5523 diff --git a/net/rds/info.c b/net/rds/info.c
5524 index 9a6b4f66187c..140a44a5f7b7 100644
5525 --- a/net/rds/info.c
5526 +++ b/net/rds/info.c
5527 @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
5528
5529 /* check for all kinds of wrapping and the like */
5530 start = (unsigned long)optval;
5531 - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
5532 + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
5533 ret = -EINVAL;
5534 goto out;
5535 }
5536 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
5537 index 3d43e4979f27..f8d9c2a2c451 100644
5538 --- a/net/sched/act_api.c
5539 +++ b/net/sched/act_api.c
5540 @@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
5541 }
5542 EXPORT_SYMBOL(tcf_hash_destroy);
5543
5544 -int tcf_hash_release(struct tc_action *a, int bind)
5545 +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
5546 {
5547 struct tcf_common *p = a->priv;
5548 int ret = 0;
5549 @@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
5550 if (p) {
5551 if (bind)
5552 p->tcfc_bindcnt--;
5553 - else if (p->tcfc_bindcnt > 0)
5554 + else if (strict && p->tcfc_bindcnt > 0)
5555 return -EPERM;
5556
5557 p->tcfc_refcnt--;
5558 @@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
5559 ret = 1;
5560 }
5561 }
5562 +
5563 return ret;
5564 }
5565 -EXPORT_SYMBOL(tcf_hash_release);
5566 +EXPORT_SYMBOL(__tcf_hash_release);
5567
5568 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
5569 struct tc_action *a)
5570 @@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
5571 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
5572 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
5573 a->priv = p;
5574 - ret = tcf_hash_release(a, 0);
5575 + ret = __tcf_hash_release(a, false, true);
5576 if (ret == ACT_P_DELETED) {
5577 module_put(a->ops->owner);
5578 n_i++;
5579 @@ -413,7 +414,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
5580 int ret = 0;
5581
5582 list_for_each_entry_safe(a, tmp, actions, list) {
5583 - ret = tcf_hash_release(a, bind);
5584 + ret = __tcf_hash_release(a, bind, true);
5585 if (ret == ACT_P_DELETED)
5586 module_put(a->ops->owner);
5587 else if (ret < 0)
5588 diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
5589 index dc6a2d324bd8..521ffca91228 100644
5590 --- a/net/sched/act_bpf.c
5591 +++ b/net/sched/act_bpf.c
5592 @@ -27,9 +27,10 @@
5593 struct tcf_bpf_cfg {
5594 struct bpf_prog *filter;
5595 struct sock_filter *bpf_ops;
5596 - char *bpf_name;
5597 + const char *bpf_name;
5598 u32 bpf_fd;
5599 u16 bpf_num_ops;
5600 + bool is_ebpf;
5601 };
5602
5603 static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
5604 @@ -200,6 +201,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
5605 cfg->bpf_ops = bpf_ops;
5606 cfg->bpf_num_ops = bpf_num_ops;
5607 cfg->filter = fp;
5608 + cfg->is_ebpf = false;
5609
5610 return 0;
5611 }
5612 @@ -234,18 +236,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
5613 cfg->bpf_fd = bpf_fd;
5614 cfg->bpf_name = name;
5615 cfg->filter = fp;
5616 + cfg->is_ebpf = true;
5617
5618 return 0;
5619 }
5620
5621 +static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
5622 +{
5623 + if (cfg->is_ebpf)
5624 + bpf_prog_put(cfg->filter);
5625 + else
5626 + bpf_prog_destroy(cfg->filter);
5627 +
5628 + kfree(cfg->bpf_ops);
5629 + kfree(cfg->bpf_name);
5630 +}
5631 +
5632 +static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
5633 + struct tcf_bpf_cfg *cfg)
5634 +{
5635 + cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
5636 + cfg->filter = prog->filter;
5637 +
5638 + cfg->bpf_ops = prog->bpf_ops;
5639 + cfg->bpf_name = prog->bpf_name;
5640 +}
5641 +
5642 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5643 struct nlattr *est, struct tc_action *act,
5644 int replace, int bind)
5645 {
5646 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
5647 + struct tcf_bpf_cfg cfg, old;
5648 struct tc_act_bpf *parm;
5649 struct tcf_bpf *prog;
5650 - struct tcf_bpf_cfg cfg;
5651 bool is_bpf, is_ebpf;
5652 int ret;
5653
5654 @@ -294,6 +318,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5655 prog = to_bpf(act);
5656 spin_lock_bh(&prog->tcf_lock);
5657
5658 + if (ret != ACT_P_CREATED)
5659 + tcf_bpf_prog_fill_cfg(prog, &old);
5660 +
5661 prog->bpf_ops = cfg.bpf_ops;
5662 prog->bpf_name = cfg.bpf_name;
5663
5664 @@ -309,29 +336,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
5665
5666 if (ret == ACT_P_CREATED)
5667 tcf_hash_insert(act);
5668 + else
5669 + tcf_bpf_cfg_cleanup(&old);
5670
5671 return ret;
5672
5673 destroy_fp:
5674 - if (is_ebpf)
5675 - bpf_prog_put(cfg.filter);
5676 - else
5677 - bpf_prog_destroy(cfg.filter);
5678 -
5679 - kfree(cfg.bpf_ops);
5680 - kfree(cfg.bpf_name);
5681 -
5682 + tcf_bpf_cfg_cleanup(&cfg);
5683 return ret;
5684 }
5685
5686 static void tcf_bpf_cleanup(struct tc_action *act, int bind)
5687 {
5688 - const struct tcf_bpf *prog = act->priv;
5689 + struct tcf_bpf_cfg tmp;
5690
5691 - if (tcf_bpf_is_ebpf(prog))
5692 - bpf_prog_put(prog->filter);
5693 - else
5694 - bpf_prog_destroy(prog->filter);
5695 + tcf_bpf_prog_fill_cfg(act->priv, &tmp);
5696 + tcf_bpf_cfg_cleanup(&tmp);
5697 }
5698
5699 static struct tc_action_ops act_bpf_ops __read_mostly = {
5700 diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
5701 index 91bd9c19471d..c0b86f2bfe22 100644
5702 --- a/net/sched/cls_bpf.c
5703 +++ b/net/sched/cls_bpf.c
5704 @@ -364,7 +364,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
5705 goto errout;
5706
5707 if (oldprog) {
5708 - list_replace_rcu(&prog->link, &oldprog->link);
5709 + list_replace_rcu(&oldprog->link, &prog->link);
5710 tcf_unbind_filter(tp, &oldprog->res);
5711 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
5712 } else {
5713 diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
5714 index a620c4e288a5..75df923f5c03 100644
5715 --- a/net/sched/cls_flow.c
5716 +++ b/net/sched/cls_flow.c
5717 @@ -419,6 +419,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5718 if (!fnew)
5719 goto err2;
5720
5721 + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
5722 +
5723 fold = (struct flow_filter *)*arg;
5724 if (fold) {
5725 err = -EINVAL;
5726 @@ -480,7 +482,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5727 fnew->mask = ~0U;
5728 fnew->tp = tp;
5729 get_random_bytes(&fnew->hashrnd, 4);
5730 - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
5731 }
5732
5733 fnew->perturb_timer.function = flow_perturbation;
5734 @@ -520,7 +521,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
5735 if (*arg == 0)
5736 list_add_tail_rcu(&fnew->list, &head->filters);
5737 else
5738 - list_replace_rcu(&fnew->list, &fold->list);
5739 + list_replace_rcu(&fold->list, &fnew->list);
5740
5741 *arg = (unsigned long)fnew;
5742
5743 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
5744 index c244c45b78d7..9291598b5aad 100644
5745 --- a/net/sched/sch_fq_codel.c
5746 +++ b/net/sched/sch_fq_codel.c
5747 @@ -162,10 +162,10 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
5748 skb = dequeue_head(flow);
5749 len = qdisc_pkt_len(skb);
5750 q->backlogs[idx] -= len;
5751 - kfree_skb(skb);
5752 sch->q.qlen--;
5753 qdisc_qstats_drop(sch);
5754 qdisc_qstats_backlog_dec(sch, skb);
5755 + kfree_skb(skb);
5756 flow->dropped++;
5757 return idx;
5758 }
5759 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
5760 index 1d4fe24af06a..d109d308ec3a 100644
5761 --- a/net/sunrpc/xprt.c
5762 +++ b/net/sunrpc/xprt.c
5763 @@ -611,6 +611,7 @@ static void xprt_autoclose(struct work_struct *work)
5764 xprt->ops->close(xprt);
5765 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
5766 xprt_release_write(xprt, NULL);
5767 + wake_up_bit(&xprt->state, XPRT_LOCKED);
5768 }
5769
5770 /**
5771 @@ -720,6 +721,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
5772 xprt->ops->release_xprt(xprt, NULL);
5773 out:
5774 spin_unlock_bh(&xprt->transport_lock);
5775 + wake_up_bit(&xprt->state, XPRT_LOCKED);
5776 }
5777
5778 /**
5779 @@ -1389,6 +1391,10 @@ out:
5780 static void xprt_destroy(struct rpc_xprt *xprt)
5781 {
5782 dprintk("RPC: destroying transport %p\n", xprt);
5783 +
5784 + /* Exclude transport connect/disconnect handlers */
5785 + wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
5786 +
5787 del_timer_sync(&xprt->timer);
5788
5789 rpc_xprt_debugfs_unregister(xprt);
5790 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
5791 index 66891e32c5e3..5e3ad598d3f5 100644
5792 --- a/net/sunrpc/xprtsock.c
5793 +++ b/net/sunrpc/xprtsock.c
5794 @@ -834,6 +834,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
5795 sk->sk_user_data = NULL;
5796
5797 xs_restore_old_callbacks(transport, sk);
5798 + xprt_clear_connected(xprt);
5799 write_unlock_bh(&sk->sk_callback_lock);
5800 xs_sock_reset_connection_flags(xprt);
5801
5802 @@ -1433,6 +1434,7 @@ out:
5803 static void xs_tcp_state_change(struct sock *sk)
5804 {
5805 struct rpc_xprt *xprt;
5806 + struct sock_xprt *transport;
5807
5808 read_lock_bh(&sk->sk_callback_lock);
5809 if (!(xprt = xprt_from_sock(sk)))
5810 @@ -1444,13 +1446,12 @@ static void xs_tcp_state_change(struct sock *sk)
5811 sock_flag(sk, SOCK_ZAPPED),
5812 sk->sk_shutdown);
5813
5814 + transport = container_of(xprt, struct sock_xprt, xprt);
5815 trace_rpc_socket_state_change(xprt, sk->sk_socket);
5816 switch (sk->sk_state) {
5817 case TCP_ESTABLISHED:
5818 spin_lock(&xprt->transport_lock);
5819 if (!xprt_test_and_set_connected(xprt)) {
5820 - struct sock_xprt *transport = container_of(xprt,
5821 - struct sock_xprt, xprt);
5822
5823 /* Reset TCP record info */
5824 transport->tcp_offset = 0;
5825 @@ -1459,6 +1460,8 @@ static void xs_tcp_state_change(struct sock *sk)
5826 transport->tcp_flags =
5827 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
5828 xprt->connect_cookie++;
5829 + clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
5830 + xprt_clear_connecting(xprt);
5831
5832 xprt_wake_pending_tasks(xprt, -EAGAIN);
5833 }
5834 @@ -1494,6 +1497,9 @@ static void xs_tcp_state_change(struct sock *sk)
5835 smp_mb__after_atomic();
5836 break;
5837 case TCP_CLOSE:
5838 + if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
5839 + &transport->sock_state))
5840 + xprt_clear_connecting(xprt);
5841 xs_sock_mark_closed(xprt);
5842 }
5843 out:
5844 @@ -2110,6 +2116,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
5845 /* Tell the socket layer to start connecting... */
5846 xprt->stat.connect_count++;
5847 xprt->stat.connect_start = jiffies;
5848 + set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
5849 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
5850 switch (ret) {
5851 case 0:
5852 @@ -2174,7 +2181,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
5853 case -EINPROGRESS:
5854 case -EALREADY:
5855 xprt_unlock_connect(xprt, transport);
5856 - xprt_clear_connecting(xprt);
5857 return;
5858 case -EINVAL:
5859 /* Happens, for instance, if the user specified a link
5860 @@ -2216,13 +2222,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
5861
5862 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
5863
5864 - /* Start by resetting any existing state */
5865 - xs_reset_transport(transport);
5866 -
5867 - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
5868 + if (transport->sock != NULL) {
5869 dprintk("RPC: xs_connect delayed xprt %p for %lu "
5870 "seconds\n",
5871 xprt, xprt->reestablish_timeout / HZ);
5872 +
5873 + /* Start by resetting any existing state */
5874 + xs_reset_transport(transport);
5875 +
5876 queue_delayed_work(rpciod_workqueue,
5877 &transport->connect_worker,
5878 xprt->reestablish_timeout);
5879 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5880 index f485600c4507..20cc6df07157 100644
5881 --- a/net/tipc/socket.c
5882 +++ b/net/tipc/socket.c
5883 @@ -2009,6 +2009,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
5884 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
5885 if (res)
5886 goto exit;
5887 + security_sk_clone(sock->sk, new_sock->sk);
5888
5889 new_sk = new_sock->sk;
5890 new_tsock = tipc_sk(new_sk);
5891 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5892 index 91f6928560e1..6fe862594e9b 100644
5893 --- a/sound/pci/hda/patch_realtek.c
5894 +++ b/sound/pci/hda/patch_realtek.c
5895 @@ -1134,7 +1134,7 @@ static const struct hda_fixup alc880_fixups[] = {
5896 /* override all pins as BIOS on old Amilo is broken */
5897 .type = HDA_FIXUP_PINS,
5898 .v.pins = (const struct hda_pintbl[]) {
5899 - { 0x14, 0x0121411f }, /* HP */
5900 + { 0x14, 0x0121401f }, /* HP */
5901 { 0x15, 0x99030120 }, /* speaker */
5902 { 0x16, 0x99030130 }, /* bass speaker */
5903 { 0x17, 0x411111f0 }, /* N/A */
5904 @@ -1154,7 +1154,7 @@ static const struct hda_fixup alc880_fixups[] = {
5905 /* almost compatible with FUJITSU, but no bass and SPDIF */
5906 .type = HDA_FIXUP_PINS,
5907 .v.pins = (const struct hda_pintbl[]) {
5908 - { 0x14, 0x0121411f }, /* HP */
5909 + { 0x14, 0x0121401f }, /* HP */
5910 { 0x15, 0x99030120 }, /* speaker */
5911 { 0x16, 0x411111f0 }, /* N/A */
5912 { 0x17, 0x411111f0 }, /* N/A */
5913 @@ -1363,7 +1363,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
5914 SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
5915 SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
5916 SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
5917 - SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
5918 + SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
5919 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
5920 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
5921 SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
5922 @@ -5118,8 +5118,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5923 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5924 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5925 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5926 - SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5927 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5928 + SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5929 + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5930 + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5931 + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5932 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5933 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5934 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
5935 @@ -6454,6 +6457,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5936 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5937 SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
5938 SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13),
5939 + SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13),
5940 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5941 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5942 SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5943 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5944 index 8b7e391dd0b8..cd8ed2e393a2 100644
5945 --- a/sound/usb/mixer.c
5946 +++ b/sound/usb/mixer.c
5947 @@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
5948 for (c = 0; c < MAX_CHANNELS; c++) {
5949 if (!(cval->cmask & (1 << c)))
5950 continue;
5951 - if (cval->cached & (1 << c)) {
5952 + if (cval->cached & (1 << (c + 1))) {
5953 err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
5954 cval->cache_val[idx]);
5955 if (err < 0)