Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.11/0108-4.11.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2953 - (show annotations) (download)
Thu Jul 6 13:57:45 2017 UTC (6 years, 9 months ago) by niro
File size: 124593 byte(s)
-linux-4.11.9
1 diff --git a/Makefile b/Makefile
2 index 8c5c94ca56d9..9db9095e0d7b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 11
8 -SUBLEVEL = 8
9 +SUBLEVEL = 9
10 EXTRAVERSION =
11 NAME = Fearless Coyote
12
13 diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
14 index 0afd201ab980..28255af5f2d8 100644
15 --- a/arch/arm/mach-davinci/pm.c
16 +++ b/arch/arm/mach-davinci/pm.c
17 @@ -154,7 +154,8 @@ int __init davinci_pm_init(void)
18 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
19 if (!davinci_sram_suspend) {
20 pr_err("PM: cannot allocate SRAM memory\n");
21 - return -ENOMEM;
22 + ret = -ENOMEM;
23 + goto no_sram_mem;
24 }
25
26 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
27 @@ -162,6 +163,10 @@ int __init davinci_pm_init(void)
28
29 suspend_set_ops(&davinci_pm_ops);
30
31 + return 0;
32 +
33 +no_sram_mem:
34 + iounmap(pm_config.ddrpsc_reg_base);
35 no_ddrpsc_mem:
36 iounmap(pm_config.ddrpll_reg_base);
37 no_ddrpll_mem:
38 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
39 index 347cca965783..2a343349d4c4 100644
40 --- a/arch/arm/mm/mmu.c
41 +++ b/arch/arm/mm/mmu.c
42 @@ -1216,15 +1216,15 @@ void __init adjust_lowmem_bounds(void)
43
44 high_memory = __va(arm_lowmem_limit - 1) + 1;
45
46 + if (!memblock_limit)
47 + memblock_limit = arm_lowmem_limit;
48 +
49 /*
50 * Round the memblock limit down to a pmd size. This
51 * helps to ensure that we will allocate memory from the
52 * last full pmd, which should be mapped.
53 */
54 - if (memblock_limit)
55 - memblock_limit = round_down(memblock_limit, PMD_SIZE);
56 - if (!memblock_limit)
57 - memblock_limit = arm_lowmem_limit;
58 + memblock_limit = round_down(memblock_limit, PMD_SIZE);
59
60 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
61 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
62 diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
63 index c1976c0adca7..ae66f5ef11f1 100644
64 --- a/arch/arm64/include/asm/acpi.h
65 +++ b/arch/arm64/include/asm/acpi.h
66 @@ -23,9 +23,9 @@
67 #define ACPI_MADT_GICC_LENGTH \
68 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
69
70 -#define BAD_MADT_GICC_ENTRY(entry, end) \
71 - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
72 - (entry)->header.length != ACPI_MADT_GICC_LENGTH)
73 +#define BAD_MADT_GICC_ENTRY(entry, end) \
74 + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
75 + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
76
77 /* Basic configuration for ACPI */
78 #ifdef CONFIG_ACPI
79 diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
80 index 4f0e3ebfea4b..c7e3e6387a49 100644
81 --- a/arch/arm64/kernel/pci.c
82 +++ b/arch/arm64/kernel/pci.c
83 @@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
84 return NULL;
85
86 root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
87 - if (!root_ops)
88 + if (!root_ops) {
89 + kfree(ri);
90 return NULL;
91 + }
92
93 ri->cfg = pci_acpi_setup_ecam_mapping(root);
94 if (!ri->cfg) {
95 diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
96 index 8d83fc2a96b7..38a302919e6b 100644
97 --- a/arch/mips/kernel/entry.S
98 +++ b/arch/mips/kernel/entry.S
99 @@ -11,6 +11,7 @@
100 #include <asm/asm.h>
101 #include <asm/asmmacro.h>
102 #include <asm/compiler.h>
103 +#include <asm/irqflags.h>
104 #include <asm/regdef.h>
105 #include <asm/mipsregs.h>
106 #include <asm/stackframe.h>
107 @@ -119,6 +120,7 @@ work_pending:
108 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
109 beqz t0, work_notifysig
110 work_resched:
111 + TRACE_IRQS_OFF
112 jal schedule
113
114 local_irq_disable # make sure need_resched and
115 @@ -155,6 +157,7 @@ syscall_exit_work:
116 beqz t0, work_pending # trace bit set?
117 local_irq_enable # could let syscall_trace_leave()
118 # call schedule() instead
119 + TRACE_IRQS_ON
120 move a0, sp
121 jal syscall_trace_leave
122 b resume_userspace
123 diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
124 index cf052204eb0a..d1bb506adc10 100644
125 --- a/arch/mips/kernel/head.S
126 +++ b/arch/mips/kernel/head.S
127 @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
128 beq t0, t1, dtb_found
129 #endif
130 li t1, -2
131 - beq a0, t1, dtb_found
132 move t2, a1
133 + beq a0, t1, dtb_found
134
135 li t2, 0
136 dtb_found:
137 diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
138 index 5f928c34c148..d99416094ba9 100644
139 --- a/arch/mips/kernel/pm-cps.c
140 +++ b/arch/mips/kernel/pm-cps.c
141 @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
142 * state. Actually per-core rather than per-CPU.
143 */
144 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
145 -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
146
147 /* Indicates online CPUs coupled with the current CPU */
148 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
149 @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
150 {
151 enum cps_pm_state state;
152 unsigned core = cpu_data[cpu].core;
153 - unsigned dlinesz = cpu_data[cpu].dcache.linesz;
154 void *entry_fn, *core_rc;
155
156 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
157 @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
158 }
159
160 if (!per_cpu(ready_count, core)) {
161 - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
162 + core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
163 if (!core_rc) {
164 pr_err("Failed allocate core %u ready_count\n", core);
165 return -ENOMEM;
166 }
167 - per_cpu(ready_count_alloc, core) = core_rc;
168 -
169 - /* Ensure ready_count is aligned to a cacheline boundary */
170 - core_rc += dlinesz - 1;
171 - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
172 per_cpu(ready_count, core) = core_rc;
173 }
174
175 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
176 index b49e7bf9f950..0b675023b9c6 100644
177 --- a/arch/mips/kernel/traps.c
178 +++ b/arch/mips/kernel/traps.c
179 @@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
180 {
181 struct pt_regs regs;
182 mm_segment_t old_fs = get_fs();
183 +
184 + regs.cp0_status = KSU_KERNEL;
185 if (sp) {
186 regs.regs[29] = (unsigned long)sp;
187 regs.regs[31] = 0;
188 diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
189 index 8b7c9e75edcb..8fad8a64d670 100644
190 --- a/arch/x86/boot/compressed/kaslr.c
191 +++ b/arch/x86/boot/compressed/kaslr.c
192 @@ -564,9 +564,6 @@ void choose_random_location(unsigned long input,
193 {
194 unsigned long random_addr, min_addr;
195
196 - /* By default, keep output position unchanged. */
197 - *virt_addr = *output;
198 -
199 if (cmdline_find_option_bool("nokaslr")) {
200 warn("KASLR disabled: 'nokaslr' on cmdline.");
201 return;
202 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
203 index b3c5a5f030ce..c945acd8fa33 100644
204 --- a/arch/x86/boot/compressed/misc.c
205 +++ b/arch/x86/boot/compressed/misc.c
206 @@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
207 unsigned long output_len)
208 {
209 const unsigned long kernel_total_size = VO__end - VO__text;
210 - unsigned long virt_addr = (unsigned long)output;
211 + unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
212
213 /* Retain x86 boot parameters pointer passed from startup_32/64. */
214 boot_params = rmode;
215 @@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
216 #ifndef CONFIG_RELOCATABLE
217 if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
218 error("Destination address does not match LOAD_PHYSICAL_ADDR");
219 - if ((unsigned long)output != virt_addr)
220 + if (virt_addr != LOAD_PHYSICAL_ADDR)
221 error("Destination virtual address changed when not relocatable");
222 #endif
223
224 diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
225 index 1c8355eadbd1..766a5211f827 100644
226 --- a/arch/x86/boot/compressed/misc.h
227 +++ b/arch/x86/boot/compressed/misc.h
228 @@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
229 unsigned long output_size,
230 unsigned long *virt_addr)
231 {
232 - /* No change from existing output location. */
233 - *virt_addr = *output;
234 }
235 #endif
236
237 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
238 index 57f7ec35216e..701d29f8e4d3 100644
239 --- a/arch/x86/entry/entry_32.S
240 +++ b/arch/x86/entry/entry_32.S
241 @@ -255,6 +255,23 @@ ENTRY(__switch_to_asm)
242 END(__switch_to_asm)
243
244 /*
245 + * The unwinder expects the last frame on the stack to always be at the same
246 + * offset from the end of the page, which allows it to validate the stack.
247 + * Calling schedule_tail() directly would break that convention because its an
248 + * asmlinkage function so its argument has to be pushed on the stack. This
249 + * wrapper creates a proper "end of stack" frame header before the call.
250 + */
251 +ENTRY(schedule_tail_wrapper)
252 + FRAME_BEGIN
253 +
254 + pushl %eax
255 + call schedule_tail
256 + popl %eax
257 +
258 + FRAME_END
259 + ret
260 +ENDPROC(schedule_tail_wrapper)
261 +/*
262 * A newly forked process directly context switches into this address.
263 *
264 * eax: prev task we switched from
265 @@ -262,24 +279,15 @@ END(__switch_to_asm)
266 * edi: kernel thread arg
267 */
268 ENTRY(ret_from_fork)
269 - FRAME_BEGIN /* help unwinder find end of stack */
270 -
271 - /*
272 - * schedule_tail() is asmlinkage so we have to put its 'prev' argument
273 - * on the stack.
274 - */
275 - pushl %eax
276 - call schedule_tail
277 - popl %eax
278 + call schedule_tail_wrapper
279
280 testl %ebx, %ebx
281 jnz 1f /* kernel threads are uncommon */
282
283 2:
284 /* When we fork, we trace the syscall return in the child, too. */
285 - leal FRAME_OFFSET(%esp), %eax
286 + movl %esp, %eax
287 call syscall_return_slowpath
288 - FRAME_END
289 jmp restore_all
290
291 /* kernel thread */
292 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
293 index 044d18ebc43c..5b219707c2f2 100644
294 --- a/arch/x86/entry/entry_64.S
295 +++ b/arch/x86/entry/entry_64.S
296 @@ -36,7 +36,6 @@
297 #include <asm/smap.h>
298 #include <asm/pgtable_types.h>
299 #include <asm/export.h>
300 -#include <asm/frame.h>
301 #include <linux/err.h>
302
303 .code64
304 @@ -409,19 +408,17 @@ END(__switch_to_asm)
305 * r12: kernel thread arg
306 */
307 ENTRY(ret_from_fork)
308 - FRAME_BEGIN /* help unwinder find end of stack */
309 movq %rax, %rdi
310 - call schedule_tail /* rdi: 'prev' task parameter */
311 + call schedule_tail /* rdi: 'prev' task parameter */
312
313 - testq %rbx, %rbx /* from kernel_thread? */
314 - jnz 1f /* kernel threads are uncommon */
315 + testq %rbx, %rbx /* from kernel_thread? */
316 + jnz 1f /* kernel threads are uncommon */
317
318 2:
319 - leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
320 + movq %rsp, %rdi
321 call syscall_return_slowpath /* returns with IRQs disabled */
322 TRACE_IRQS_ON /* user mode is traced as IRQS on */
323 SWAPGS
324 - FRAME_END
325 jmp restore_regs_and_iret
326
327 1:
328 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
329 index 65c2ca578556..78da7b646cd0 100644
330 --- a/arch/x86/events/intel/core.c
331 +++ b/arch/x86/events/intel/core.c
332 @@ -2130,7 +2130,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
333 * counters from the GLOBAL_STATUS mask and we always process PEBS
334 * events via drain_pebs().
335 */
336 - status &= ~cpuc->pebs_enabled;
337 + status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
338
339 /*
340 * PEBS overflow sets bit 62 in the global status register
341 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
342 index 9dfeeeca0ea8..c6d23ffe422d 100644
343 --- a/arch/x86/events/intel/ds.c
344 +++ b/arch/x86/events/intel/ds.c
345 @@ -1222,7 +1222,7 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
346
347 /* clear non-PEBS bit and re-check */
348 pebs_status = p->status & cpuc->pebs_enabled;
349 - pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1;
350 + pebs_status &= PEBS_COUNTER_MASK;
351 if (pebs_status == (1 << bit))
352 return at;
353 }
354 diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
355 index 758c1aa5009d..44ec523287f6 100644
356 --- a/arch/x86/events/intel/uncore.c
357 +++ b/arch/x86/events/intel/uncore.c
358 @@ -1170,7 +1170,7 @@ static int uncore_event_cpu_online(unsigned int cpu)
359 pmu = type->pmus;
360 for (i = 0; i < type->num_boxes; i++, pmu++) {
361 box = pmu->boxes[pkg];
362 - if (!box && atomic_inc_return(&box->refcnt) == 1)
363 + if (box && atomic_inc_return(&box->refcnt) == 1)
364 uncore_box_init(box);
365 }
366 }
367 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
368 index bcbb1d2ae10b..be3d36254040 100644
369 --- a/arch/x86/events/perf_event.h
370 +++ b/arch/x86/events/perf_event.h
371 @@ -79,6 +79,7 @@ struct amd_nb {
372
373 /* The maximal number of PEBS events: */
374 #define MAX_PEBS_EVENTS 8
375 +#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
376
377 /*
378 * Flags PEBS can handle without an PMI.
379 diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
380 index d5f83cda1dea..722d0e568863 100644
381 --- a/arch/x86/include/asm/kvm_emulate.h
382 +++ b/arch/x86/include/asm/kvm_emulate.h
383 @@ -221,6 +221,9 @@ struct x86_emulate_ops {
384 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
385 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
386 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
387 +
388 + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
389 + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
390 };
391
392 typedef u32 __attribute__((vector_size(16))) sse128_t;
393 @@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
394
395 /* interruptibility state, as a result of execution of STI or MOV SS */
396 int interruptibility;
397 - int emul_flags;
398
399 bool perm_ok; /* do not check permissions if true */
400 bool ud; /* inject an #UD if host doesn't support insn */
401 diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
402 index 7c9c895432a9..6a348b6f73a2 100644
403 --- a/arch/x86/include/asm/mshyperv.h
404 +++ b/arch/x86/include/asm/mshyperv.h
405 @@ -2,8 +2,7 @@
406 #define _ASM_X86_MSHYPER_H
407
408 #include <linux/types.h>
409 -#include <linux/interrupt.h>
410 -#include <linux/clocksource.h>
411 +#include <linux/atomic.h>
412 #include <asm/hyperv.h>
413
414 /*
415 diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
416 index 9ac2a5cdd9c2..e79fb6b3dffe 100644
417 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
418 +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
419 @@ -767,11 +767,13 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
420 dentry = kernfs_mount(fs_type, flags, rdt_root,
421 RDTGROUP_SUPER_MAGIC, NULL);
422 if (IS_ERR(dentry))
423 - goto out_cdp;
424 + goto out_destroy;
425
426 static_branch_enable(&rdt_enable_key);
427 goto out;
428
429 +out_destroy:
430 + kernfs_remove(kn_info);
431 out_cdp:
432 cdp_disable();
433 out:
434 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
435 index ce7f80baa664..0fca363787ff 100644
436 --- a/arch/x86/kvm/emulate.c
437 +++ b/arch/x86/kvm/emulate.c
438 @@ -2547,7 +2547,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
439 u64 smbase;
440 int ret;
441
442 - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
443 + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
444 return emulate_ud(ctxt);
445
446 /*
447 @@ -2596,11 +2596,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
448 return X86EMUL_UNHANDLEABLE;
449 }
450
451 - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
452 + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
453 ctxt->ops->set_nmi_mask(ctxt, false);
454
455 - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
456 - ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
457 + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
458 + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
459 return X86EMUL_CONTINUE;
460 }
461
462 @@ -5317,6 +5317,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
463 const struct x86_emulate_ops *ops = ctxt->ops;
464 int rc = X86EMUL_CONTINUE;
465 int saved_dst_type = ctxt->dst.type;
466 + unsigned emul_flags;
467
468 ctxt->mem_read.pos = 0;
469
470 @@ -5331,6 +5332,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
471 goto done;
472 }
473
474 + emul_flags = ctxt->ops->get_hflags(ctxt);
475 if (unlikely(ctxt->d &
476 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
477 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
478 @@ -5364,7 +5366,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
479 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
480 }
481
482 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
483 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
484 rc = emulator_check_intercept(ctxt, ctxt->intercept,
485 X86_ICPT_PRE_EXCEPT);
486 if (rc != X86EMUL_CONTINUE)
487 @@ -5393,7 +5395,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
488 goto done;
489 }
490
491 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
492 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
493 rc = emulator_check_intercept(ctxt, ctxt->intercept,
494 X86_ICPT_POST_EXCEPT);
495 if (rc != X86EMUL_CONTINUE)
496 @@ -5447,7 +5449,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
497
498 special_insn:
499
500 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
501 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
502 rc = emulator_check_intercept(ctxt, ctxt->intercept,
503 X86_ICPT_POST_MEMACCESS);
504 if (rc != X86EMUL_CONTINUE)
505 diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
506 index 9d4a8504a95a..5ab4a364348e 100644
507 --- a/arch/x86/kvm/pmu_intel.c
508 +++ b/arch/x86/kvm/pmu_intel.c
509 @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
510 ((u64)1 << edx.split.bit_width_fixed) - 1;
511 }
512
513 - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
514 + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
515 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
516 pmu->global_ctrl_mask = ~pmu->global_ctrl;
517
518 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
519 index 259e9b28ccf8..e06ec5333da1 100644
520 --- a/arch/x86/kvm/vmx.c
521 +++ b/arch/x86/kvm/vmx.c
522 @@ -2436,7 +2436,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
523 if (!(vmcs12->exception_bitmap & (1u << nr)))
524 return 0;
525
526 - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
527 + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
528 vmcs_read32(VM_EXIT_INTR_INFO),
529 vmcs_readl(EXIT_QUALIFICATION));
530 return 1;
531 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
532 index dd1fe338c5f5..4e957185d5b3 100644
533 --- a/arch/x86/kvm/x86.c
534 +++ b/arch/x86/kvm/x86.c
535 @@ -5098,6 +5098,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
536
537 if (var.unusable) {
538 memset(desc, 0, sizeof(*desc));
539 + if (base3)
540 + *base3 = 0;
541 return false;
542 }
543
544 @@ -5248,6 +5250,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
545 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
546 }
547
548 +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
549 +{
550 + return emul_to_vcpu(ctxt)->arch.hflags;
551 +}
552 +
553 +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
554 +{
555 + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
556 +}
557 +
558 static const struct x86_emulate_ops emulate_ops = {
559 .read_gpr = emulator_read_gpr,
560 .write_gpr = emulator_write_gpr,
561 @@ -5287,6 +5299,8 @@ static const struct x86_emulate_ops emulate_ops = {
562 .intercept = emulator_intercept,
563 .get_cpuid = emulator_get_cpuid,
564 .set_nmi_mask = emulator_set_nmi_mask,
565 + .get_hflags = emulator_get_hflags,
566 + .set_hflags = emulator_set_hflags,
567 };
568
569 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
570 @@ -5341,7 +5355,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
571 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
572 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
573 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
574 - ctxt->emul_flags = vcpu->arch.hflags;
575
576 init_decode_cache(ctxt);
577 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
578 @@ -5744,8 +5757,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
579 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
580 toggle_interruptibility(vcpu, ctxt->interruptibility);
581 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
582 - if (vcpu->arch.hflags != ctxt->emul_flags)
583 - kvm_set_hflags(vcpu, ctxt->emul_flags);
584 kvm_rip_write(vcpu, ctxt->eip);
585 if (r == EMULATE_DONE &&
586 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
587 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
588 index 15173d37f399..e1e3f7b4bdb0 100644
589 --- a/arch/x86/mm/init_64.c
590 +++ b/arch/x86/mm/init_64.c
591 @@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup);
592 */
593 void sync_global_pgds(unsigned long start, unsigned long end)
594 {
595 - unsigned long address;
596 + unsigned long addr;
597
598 - for (address = start; address <= end; address += PGDIR_SIZE) {
599 - const pgd_t *pgd_ref = pgd_offset_k(address);
600 + for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
601 + const pgd_t *pgd_ref = pgd_offset_k(addr);
602 struct page *page;
603
604 if (pgd_none(*pgd_ref))
605 @@ -108,7 +108,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
606 pgd_t *pgd;
607 spinlock_t *pgt_lock;
608
609 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
610 + pgd = (pgd_t *)page_address(page) + pgd_index(addr);
611 /* the pgt_lock only for Xen */
612 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
613 spin_lock(pgt_lock);
614 diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
615 index cd44ae727df7..1c34b767c84c 100644
616 --- a/arch/x86/mm/mpx.c
617 +++ b/arch/x86/mm/mpx.c
618 @@ -526,15 +526,7 @@ int mpx_handle_bd_fault(void)
619 if (!kernel_managing_mpx_tables(current->mm))
620 return -EINVAL;
621
622 - if (do_mpx_bt_fault()) {
623 - force_sig(SIGSEGV, current);
624 - /*
625 - * The force_sig() is essentially "handling" this
626 - * exception, so we do not pass up the error
627 - * from do_mpx_bt_fault().
628 - */
629 - }
630 - return 0;
631 + return do_mpx_bt_fault();
632 }
633
634 /*
635 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
636 index a7655f6caf7d..75fb01109f94 100644
637 --- a/arch/x86/mm/tlb.c
638 +++ b/arch/x86/mm/tlb.c
639 @@ -263,8 +263,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
640 {
641 struct flush_tlb_info info;
642
643 - if (end == 0)
644 - end = start + PAGE_SIZE;
645 info.flush_mm = mm;
646 info.flush_start = start;
647 info.flush_end = end;
648 @@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
649 }
650
651 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
652 - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
653 + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
654
655 preempt_enable();
656 }
657 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
658 index fbe0dfdffc0d..0e824091a12f 100644
659 --- a/drivers/block/xen-blkback/blkback.c
660 +++ b/drivers/block/xen-blkback/blkback.c
661 @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
662 unsigned long timeout;
663 int ret;
664
665 - xen_blkif_get(blkif);
666 -
667 set_freezable();
668 while (!kthread_should_stop()) {
669 if (try_to_freeze())
670 @@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg)
671 print_stats(ring);
672
673 ring->xenblkd = NULL;
674 - xen_blkif_put(blkif);
675
676 return 0;
677 }
678 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
679 index dcabf52425ff..1ccb5a7bbdfe 100644
680 --- a/drivers/block/xen-blkback/xenbus.c
681 +++ b/drivers/block/xen-blkback/xenbus.c
682 @@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
683 if (ring->xenblkd) {
684 kthread_stop(ring->xenblkd);
685 wake_up(&ring->shutdown_wq);
686 - ring->xenblkd = NULL;
687 }
688
689 /* The above kthread_stop() guarantees that at this point we
690 @@ -316,8 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
691 static void xen_blkif_free(struct xen_blkif *blkif)
692 {
693
694 - xen_blkif_disconnect(blkif);
695 + WARN_ON(xen_blkif_disconnect(blkif));
696 xen_vbd_free(&blkif->vbd);
697 + kfree(blkif->be->mode);
698 + kfree(blkif->be);
699
700 /* Make sure everything is drained before shutting down */
701 kmem_cache_free(xen_blkif_cachep, blkif);
702 @@ -512,8 +513,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
703
704 /* Put the reference we set in xen_blkif_alloc(). */
705 xen_blkif_put(be->blkif);
706 - kfree(be->mode);
707 - kfree(be);
708 return 0;
709 }
710
711 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
712 index 8b4d721d6d63..b5a43d66878a 100644
713 --- a/drivers/gpio/gpiolib.c
714 +++ b/drivers/gpio/gpiolib.c
715 @@ -708,7 +708,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
716
717 ge.timestamp = ktime_get_real_ns();
718
719 - if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
720 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
721 + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
722 int level = gpiod_get_value_cansleep(le->desc);
723
724 if (level)
725 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
726 index 13db8a2851ed..1f013d45c9e9 100644
727 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
728 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
729 @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
730 list_for_each_entry_safe(entry, next, &man->list, head)
731 vmw_cmdbuf_res_free(man, entry);
732
733 + drm_ht_remove(&man->resources);
734 kfree(man);
735 }
736
737 diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
738 index 7ef819680acd..8251fa2904b6 100644
739 --- a/drivers/hsi/clients/ssi_protocol.c
740 +++ b/drivers/hsi/clients/ssi_protocol.c
741 @@ -1065,7 +1065,7 @@ static void ssip_pn_setup(struct net_device *dev)
742 dev->addr_len = 1;
743 dev->tx_queue_len = SSIP_TXQUEUE_LEN;
744
745 - dev->destructor = free_netdev;
746 + dev->needs_free_netdev = true;
747 dev->header_ops = &phonet_header_ops;
748 }
749
750 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
751 index b8111b0c8877..d4b3ca3dd0e4 100644
752 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
753 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
754 @@ -1851,6 +1851,7 @@ void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
755 u32 doorbell[2];
756
757 doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
758 + doorbell[1] = 0;
759 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
760 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
761 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
762 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
763 index b17536d6e69b..3f25d881b343 100644
764 --- a/drivers/iommu/amd_iommu.c
765 +++ b/drivers/iommu/amd_iommu.c
766 @@ -3879,11 +3879,9 @@ static void irte_ga_prepare(void *entry,
767 u8 vector, u32 dest_apicid, int devid)
768 {
769 struct irte_ga *irte = (struct irte_ga *) entry;
770 - struct iommu_dev_data *dev_data = search_dev_data(devid);
771
772 irte->lo.val = 0;
773 irte->hi.val = 0;
774 - irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
775 irte->lo.fields_remap.int_type = delivery_mode;
776 irte->lo.fields_remap.dm = dest_mode;
777 irte->hi.fields.vector = vector;
778 @@ -3939,10 +3937,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
779 struct irte_ga *irte = (struct irte_ga *) entry;
780 struct iommu_dev_data *dev_data = search_dev_data(devid);
781
782 - if (!dev_data || !dev_data->use_vapic) {
783 + if (!dev_data || !dev_data->use_vapic ||
784 + !irte->lo.fields_remap.guest_mode) {
785 irte->hi.fields.vector = vector;
786 irte->lo.fields_remap.destination = dest_apicid;
787 - irte->lo.fields_remap.guest_mode = 0;
788 modify_irte_ga(devid, index, irte, NULL);
789 }
790 }
791 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
792 index 063343909b0d..6629c472eafd 100644
793 --- a/drivers/iommu/amd_iommu_v2.c
794 +++ b/drivers/iommu/amd_iommu_v2.c
795 @@ -696,9 +696,9 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
796
797 out_unregister:
798 mmu_notifier_unregister(&pasid_state->mn, mm);
799 + mmput(mm);
800
801 out_free:
802 - mmput(mm);
803 free_pasid_state(pasid_state);
804
805 out:
806 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
807 index 48d36ce59efb..1e0983488a8d 100644
808 --- a/drivers/iommu/dma-iommu.c
809 +++ b/drivers/iommu/dma-iommu.c
810 @@ -175,8 +175,7 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
811 unsigned long lo, hi;
812
813 resource_list_for_each_entry(window, &bridge->windows) {
814 - if (resource_type(window->res) != IORESOURCE_MEM &&
815 - resource_type(window->res) != IORESOURCE_IO)
816 + if (resource_type(window->res) != IORESOURCE_MEM)
817 continue;
818
819 lo = iova_pfn(iovad, window->res->start - window->offset);
820 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
821 index 5742e5eb0704..f68e8c5ca386 100644
822 --- a/drivers/md/dm-thin.c
823 +++ b/drivers/md/dm-thin.c
824 @@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
825 return;
826 }
827
828 + /*
829 + * Increment the unmapped blocks. This prevents a race between the
830 + * passdown io and reallocation of freed blocks.
831 + */
832 + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
833 + if (r) {
834 + metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
835 + bio_io_error(m->bio);
836 + cell_defer_no_holder(tc, m->cell);
837 + mempool_free(m, pool->mapping_pool);
838 + return;
839 + }
840 +
841 discard_parent = bio_alloc(GFP_NOIO, 1);
842 if (!discard_parent) {
843 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
844 @@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
845 end_discard(&op, r);
846 }
847 }
848 -
849 - /*
850 - * Increment the unmapped blocks. This prevents a race between the
851 - * passdown io and reallocation of freed blocks.
852 - */
853 - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
854 - if (r) {
855 - metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
856 - bio_io_error(m->bio);
857 - cell_defer_no_holder(tc, m->cell);
858 - mempool_free(m, pool->mapping_pool);
859 - return;
860 - }
861 }
862
863 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
864 diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
865 index 42ebd73f821d..7419c5ce63f8 100644
866 --- a/drivers/mtd/nand/brcmnand/brcmnand.c
867 +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
868 @@ -101,6 +101,9 @@ struct brcm_nand_dma_desc {
869 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
870 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
871
872 +#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
873 +#define NAND_POLL_STATUS_TIMEOUT_MS 100
874 +
875 /* Controller feature flags */
876 enum {
877 BRCMNAND_HAS_1K_SECTORS = BIT(0),
878 @@ -765,6 +768,31 @@ enum {
879 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
880 };
881
882 +static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
883 + u32 mask, u32 expected_val,
884 + unsigned long timeout_ms)
885 +{
886 + unsigned long limit;
887 + u32 val;
888 +
889 + if (!timeout_ms)
890 + timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
891 +
892 + limit = jiffies + msecs_to_jiffies(timeout_ms);
893 + do {
894 + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
895 + if ((val & mask) == expected_val)
896 + return 0;
897 +
898 + cpu_relax();
899 + } while (time_after(limit, jiffies));
900 +
901 + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
902 + expected_val, val & mask);
903 +
904 + return -ETIMEDOUT;
905 +}
906 +
907 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
908 {
909 u32 val = en ? CS_SELECT_NAND_WP : 0;
910 @@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
911
912 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
913 static int old_wp = -1;
914 + int ret;
915
916 if (old_wp != wp) {
917 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
918 old_wp = wp;
919 }
920 +
921 + /*
922 + * make sure ctrl/flash ready before and after
923 + * changing state of #WP pin
924 + */
925 + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
926 + NAND_STATUS_READY,
927 + NAND_CTRL_RDY |
928 + NAND_STATUS_READY, 0);
929 + if (ret)
930 + return;
931 +
932 brcmnand_set_wp(ctrl, wp);
933 + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
934 + /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
935 + ret = bcmnand_ctrl_poll_status(ctrl,
936 + NAND_CTRL_RDY |
937 + NAND_STATUS_READY |
938 + NAND_STATUS_WP,
939 + NAND_CTRL_RDY |
940 + NAND_STATUS_READY |
941 + (wp ? 0 : NAND_STATUS_WP), 0);
942 +
943 + if (ret)
944 + dev_err_ratelimited(&host->pdev->dev,
945 + "nand #WP expected %s\n",
946 + wp ? "on" : "off");
947 }
948 }
949
950 @@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data)
951 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
952 {
953 struct brcmnand_controller *ctrl = host->ctrl;
954 - u32 intfc;
955 + int ret;
956
957 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
958 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
959 BUG_ON(ctrl->cmd_pending != 0);
960 ctrl->cmd_pending = cmd;
961
962 - intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
963 - WARN_ON(!(intfc & INTFC_CTLR_READY));
964 + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
965 + WARN_ON(ret);
966
967 mb(); /* flush previous writes */
968 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
969 diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
970 index bda1e4667138..66aece9cc2cc 100644
971 --- a/drivers/mtd/nand/fsmc_nand.c
972 +++ b/drivers/mtd/nand/fsmc_nand.c
973 @@ -150,7 +150,6 @@ struct fsmc_nand_platform_data {
974 struct mtd_partition *partitions;
975 unsigned int nr_partitions;
976 unsigned int options;
977 - unsigned int width;
978 unsigned int bank;
979
980 enum access_mode mode;
981 @@ -844,18 +843,19 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
982 u32 val;
983 int ret;
984
985 - /* Set default NAND width to 8 bits */
986 - pdata->width = 8;
987 + pdata->options = 0;
988 +
989 if (!of_property_read_u32(np, "bank-width", &val)) {
990 if (val == 2) {
991 - pdata->width = 16;
992 + pdata->options |= NAND_BUSWIDTH_16;
993 } else if (val != 1) {
994 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
995 return -EINVAL;
996 }
997 }
998 +
999 if (of_get_property(np, "nand-skip-bbtscan", NULL))
1000 - pdata->options = NAND_SKIP_BBTSCAN;
1001 + pdata->options |= NAND_SKIP_BBTSCAN;
1002
1003 pdata->nand_timings = devm_kzalloc(&pdev->dev,
1004 sizeof(*pdata->nand_timings), GFP_KERNEL);
1005 @@ -992,9 +992,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1006 nand->badblockbits = 7;
1007 nand_set_flash_node(nand, np);
1008
1009 - if (pdata->width == FSMC_NAND_BW16)
1010 - nand->options |= NAND_BUSWIDTH_16;
1011 -
1012 switch (host->mode) {
1013 case USE_DMA_ACCESS:
1014 dma_cap_zero(mask);
1015 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1016 index 34481c9be1d1..6d80067e087f 100644
1017 --- a/drivers/net/bonding/bond_main.c
1018 +++ b/drivers/net/bonding/bond_main.c
1019 @@ -4163,7 +4163,6 @@ static void bond_destructor(struct net_device *bond_dev)
1020 struct bonding *bond = netdev_priv(bond_dev);
1021 if (bond->wq)
1022 destroy_workqueue(bond->wq);
1023 - free_netdev(bond_dev);
1024 }
1025
1026 void bond_setup(struct net_device *bond_dev)
1027 @@ -4183,7 +4182,8 @@ void bond_setup(struct net_device *bond_dev)
1028 bond_dev->netdev_ops = &bond_netdev_ops;
1029 bond_dev->ethtool_ops = &bond_ethtool_ops;
1030
1031 - bond_dev->destructor = bond_destructor;
1032 + bond_dev->needs_free_netdev = true;
1033 + bond_dev->priv_destructor = bond_destructor;
1034
1035 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
1036
1037 @@ -4689,7 +4689,7 @@ int bond_create(struct net *net, const char *name)
1038
1039 rtnl_unlock();
1040 if (res < 0)
1041 - bond_destructor(bond_dev);
1042 + free_netdev(bond_dev);
1043 return res;
1044 }
1045
1046 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
1047 index ddabce759456..71a7c3b44fdd 100644
1048 --- a/drivers/net/caif/caif_hsi.c
1049 +++ b/drivers/net/caif/caif_hsi.c
1050 @@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev)
1051 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1052 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1053 dev->priv_flags |= IFF_NO_QUEUE;
1054 - dev->destructor = free_netdev;
1055 + dev->needs_free_netdev = true;
1056 dev->netdev_ops = &cfhsi_netdevops;
1057 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1058 skb_queue_head_init(&cfhsi->qhead[i]);
1059 diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
1060 index c2dea4916e5d..76e1d3545105 100644
1061 --- a/drivers/net/caif/caif_serial.c
1062 +++ b/drivers/net/caif/caif_serial.c
1063 @@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev)
1064 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1065 dev->mtu = CAIF_MAX_MTU;
1066 dev->priv_flags |= IFF_NO_QUEUE;
1067 - dev->destructor = free_netdev;
1068 + dev->needs_free_netdev = true;
1069 skb_queue_head_init(&serdev->head);
1070 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
1071 serdev->common.use_frag = true;
1072 diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
1073 index 3a529fbe539f..fc21afe852b9 100644
1074 --- a/drivers/net/caif/caif_spi.c
1075 +++ b/drivers/net/caif/caif_spi.c
1076 @@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev)
1077 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
1078 dev->priv_flags |= IFF_NO_QUEUE;
1079 dev->mtu = SPI_MAX_PAYLOAD_SIZE;
1080 - dev->destructor = free_netdev;
1081 + dev->needs_free_netdev = true;
1082 skb_queue_head_init(&cfspi->qhead);
1083 skb_queue_head_init(&cfspi->chead);
1084 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1085 diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
1086 index bc0eb47eccee..8bffd251165c 100644
1087 --- a/drivers/net/caif/caif_virtio.c
1088 +++ b/drivers/net/caif/caif_virtio.c
1089 @@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
1090 netdev->tx_queue_len = 100;
1091 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
1092 netdev->mtu = CFV_DEF_MTU_SIZE;
1093 - netdev->destructor = free_netdev;
1094 + netdev->needs_free_netdev = true;
1095 }
1096
1097 /* Create debugfs counters for the device */
1098 diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1099 index eb7173713bbc..6a6e896e52fa 100644
1100 --- a/drivers/net/can/slcan.c
1101 +++ b/drivers/net/can/slcan.c
1102 @@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev)
1103 static void slc_free_netdev(struct net_device *dev)
1104 {
1105 int i = dev->base_addr;
1106 - free_netdev(dev);
1107 +
1108 slcan_devs[i] = NULL;
1109 }
1110
1111 @@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = {
1112 static void slc_setup(struct net_device *dev)
1113 {
1114 dev->netdev_ops = &slc_netdev_ops;
1115 - dev->destructor = slc_free_netdev;
1116 + dev->needs_free_netdev = true;
1117 + dev->priv_destructor = slc_free_netdev;
1118
1119 dev->hard_header_len = 0;
1120 dev->addr_len = 0;
1121 @@ -761,8 +762,6 @@ static void __exit slcan_exit(void)
1122 if (sl->tty) {
1123 printk(KERN_ERR "%s: tty discipline still running\n",
1124 dev->name);
1125 - /* Intentionally leak the control block. */
1126 - dev->destructor = NULL;
1127 }
1128
1129 unregister_netdev(dev);
1130 diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
1131 index 674f367087c5..25d12099593e 100644
1132 --- a/drivers/net/can/vcan.c
1133 +++ b/drivers/net/can/vcan.c
1134 @@ -160,7 +160,7 @@ static void vcan_setup(struct net_device *dev)
1135 dev->flags |= IFF_ECHO;
1136
1137 dev->netdev_ops = &vcan_netdev_ops;
1138 - dev->destructor = free_netdev;
1139 + dev->needs_free_netdev = true;
1140 }
1141
1142 static struct rtnl_link_ops vcan_link_ops __read_mostly = {
1143 diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
1144 index 2c80611b94ae..b66fbe51ceb1 100644
1145 --- a/drivers/net/dummy.c
1146 +++ b/drivers/net/dummy.c
1147 @@ -313,7 +313,6 @@ static void dummy_free_netdev(struct net_device *dev)
1148 struct dummy_priv *priv = netdev_priv(dev);
1149
1150 kfree(priv->vfinfo);
1151 - free_netdev(dev);
1152 }
1153
1154 static void dummy_setup(struct net_device *dev)
1155 @@ -323,7 +322,8 @@ static void dummy_setup(struct net_device *dev)
1156 /* Initialize the device structure. */
1157 dev->netdev_ops = &dummy_netdev_ops;
1158 dev->ethtool_ops = &dummy_ethtool_ops;
1159 - dev->destructor = dummy_free_netdev;
1160 + dev->needs_free_netdev = true;
1161 + dev->priv_destructor = dummy_free_netdev;
1162
1163 /* Fill in device structure with ethernet-generic values. */
1164 dev->flags |= IFF_NOARP;
1165 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1166 index 1238c4ec5215..fb0951929be9 100644
1167 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1168 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1169 @@ -4530,7 +4530,7 @@ static void dummy_setup(struct net_device *dev)
1170 /* Initialize the device structure. */
1171 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
1172 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
1173 - dev->destructor = free_netdev;
1174 + dev->needs_free_netdev = true;
1175 }
1176
1177 static int config_mgmt_dev(struct pci_dev *pdev)
1178 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1179 index 3d9490cd2db1..8f7108c94802 100644
1180 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1181 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1182 @@ -313,13 +313,15 @@ struct mlx5e_dma_info {
1183
1184 struct mlx5e_rx_am_stats {
1185 int ppms; /* packets per msec */
1186 + int bpms; /* bytes per msec */
1187 int epms; /* events per msec */
1188 };
1189
1190 struct mlx5e_rx_am_sample {
1191 - ktime_t time;
1192 - unsigned int pkt_ctr;
1193 - u16 event_ctr;
1194 + ktime_t time;
1195 + u32 pkt_ctr;
1196 + u32 byte_ctr;
1197 + u16 event_ctr;
1198 };
1199
1200 struct mlx5e_rx_am { /* Adaptive Moderation */
1201 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1202 index 949fbadd7817..aae8b6c0edbd 100644
1203 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1204 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1205 @@ -1205,11 +1205,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
1206 SOF_TIMESTAMPING_RX_HARDWARE |
1207 SOF_TIMESTAMPING_RAW_HARDWARE;
1208
1209 - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
1210 - (BIT(1) << HWTSTAMP_TX_ON);
1211 + info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1212 + BIT(HWTSTAMP_TX_ON);
1213
1214 - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
1215 - (BIT(1) << HWTSTAMP_FILTER_ALL);
1216 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1217 + BIT(HWTSTAMP_FILTER_ALL);
1218
1219 return 0;
1220 }
1221 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1222 index 15cc7b469d2e..f778436a2d28 100644
1223 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1224 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1225 @@ -3885,7 +3885,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
1226 return netdev;
1227
1228 err_cleanup_nic:
1229 - profile->cleanup(priv);
1230 + if (profile->cleanup)
1231 + profile->cleanup(priv);
1232 free_netdev(netdev);
1233
1234 return NULL;
1235 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1236 index f621373bd7a5..4be6b346f14a 100644
1237 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1238 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1239 @@ -424,6 +424,8 @@ static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
1240 priv->params.lro_wqe_sz =
1241 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
1242
1243 + mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
1244 +
1245 priv->mdev = mdev;
1246 priv->netdev = netdev;
1247 priv->params.num_channels = profile->max_nch(mdev);
1248 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1249 index cbfac06b7ffd..23ccec4cb7f5 100644
1250 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1251 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1252 @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
1253 mlx5e_am_step(am);
1254 }
1255
1256 +#define IS_SIGNIFICANT_DIFF(val, ref) \
1257 + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
1258 +
1259 static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
1260 struct mlx5e_rx_am_stats *prev)
1261 {
1262 - int diff;
1263 -
1264 - if (!prev->ppms)
1265 - return curr->ppms ? MLX5E_AM_STATS_BETTER :
1266 + if (!prev->bpms)
1267 + return curr->bpms ? MLX5E_AM_STATS_BETTER :
1268 MLX5E_AM_STATS_SAME;
1269
1270 - diff = curr->ppms - prev->ppms;
1271 - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
1272 - return (diff > 0) ? MLX5E_AM_STATS_BETTER :
1273 - MLX5E_AM_STATS_WORSE;
1274 + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
1275 + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
1276 + MLX5E_AM_STATS_WORSE;
1277
1278 - if (!prev->epms)
1279 - return curr->epms ? MLX5E_AM_STATS_WORSE :
1280 - MLX5E_AM_STATS_SAME;
1281 + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
1282 + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
1283 + MLX5E_AM_STATS_WORSE;
1284
1285 - diff = curr->epms - prev->epms;
1286 - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
1287 - return (diff < 0) ? MLX5E_AM_STATS_BETTER :
1288 - MLX5E_AM_STATS_WORSE;
1289 + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
1290 + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
1291 + MLX5E_AM_STATS_WORSE;
1292
1293 return MLX5E_AM_STATS_SAME;
1294 }
1295 @@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
1296 {
1297 s->time = ktime_get();
1298 s->pkt_ctr = rq->stats.packets;
1299 + s->byte_ctr = rq->stats.bytes;
1300 s->event_ctr = rq->cq.event_ctr;
1301 }
1302
1303 #define MLX5E_AM_NEVENTS 64
1304 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
1305 +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
1306
1307 static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
1308 struct mlx5e_rx_am_sample *end,
1309 @@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
1310 {
1311 /* u32 holds up to 71 minutes, should be enough */
1312 u32 delta_us = ktime_us_delta(end->time, start->time);
1313 - unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
1314 + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
1315 + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
1316 + start->byte_ctr);
1317
1318 if (!delta_us)
1319 return;
1320
1321 - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
1322 - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
1323 + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
1324 + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
1325 + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
1326 + delta_us);
1327 }
1328
1329 void mlx5e_rx_am_work(struct work_struct *work)
1330 @@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
1331
1332 switch (am->state) {
1333 case MLX5E_AM_MEASURE_IN_PROGRESS:
1334 - nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
1335 + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
1336 + am->start_sample.event_ctr);
1337 if (nevents < MLX5E_AM_NEVENTS)
1338 break;
1339 mlx5e_am_sample(rq, &end_sample);
1340 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
1341 index 53e4992d6511..f81c3aa60b46 100644
1342 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
1343 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
1344 @@ -417,20 +417,13 @@ struct mlx5e_stats {
1345 };
1346
1347 static const struct counter_desc mlx5e_pme_status_desc[] = {
1348 - { "module_plug", 0 },
1349 { "module_unplug", 8 },
1350 };
1351
1352 static const struct counter_desc mlx5e_pme_error_desc[] = {
1353 - { "module_pwr_budget_exd", 0 }, /* power budget exceed */
1354 - { "module_long_range", 8 }, /* long range for non MLNX cable */
1355 - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
1356 - { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
1357 - { "module_enforce_part", 32 }, /* enforce part number list */
1358 - { "module_unknown_id", 40 }, /* unknown identifier */
1359 - { "module_high_temp", 48 }, /* high temperature */
1360 + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
1361 + { "module_high_temp", 48 }, /* high temperature */
1362 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
1363 - { "module_unknown_status", 64 },
1364 };
1365
1366 #endif /* __MLX5_EN_STATS_H__ */
1367 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1368 index 0ad66324247f..9862a741b32a 100644
1369 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1370 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1371 @@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
1372 },
1373 };
1374
1375 -#define FW_INIT_TIMEOUT_MILI 2000
1376 -#define FW_INIT_WAIT_MS 2
1377 +#define FW_INIT_TIMEOUT_MILI 2000
1378 +#define FW_INIT_WAIT_MS 2
1379 +#define FW_PRE_INIT_TIMEOUT_MILI 10000
1380
1381 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
1382 {
1383 @@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
1384 /* disable cmdif checksum */
1385 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
1386
1387 - /* If the HCA supports 4K UARs use it */
1388 - if (MLX5_CAP_GEN_MAX(dev, uar_4k))
1389 + /* Enable 4K UAR only when HCA supports it and page size is bigger
1390 + * than 4K.
1391 + */
1392 + if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
1393 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
1394
1395 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
1396 @@ -1019,6 +1022,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1397 */
1398 dev->state = MLX5_DEVICE_STATE_UP;
1399
1400 + /* wait for firmware to accept initialization segments configurations
1401 + */
1402 + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
1403 + if (err) {
1404 + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1405 + FW_PRE_INIT_TIMEOUT_MILI);
1406 + goto out;
1407 + }
1408 +
1409 err = mlx5_cmd_init(dev);
1410 if (err) {
1411 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
1412 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1413 index 6ebb0f559a42..199459bd6961 100644
1414 --- a/drivers/net/geneve.c
1415 +++ b/drivers/net/geneve.c
1416 @@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev)
1417
1418 dev->netdev_ops = &geneve_netdev_ops;
1419 dev->ethtool_ops = &geneve_ethtool_ops;
1420 - dev->destructor = free_netdev;
1421 + dev->needs_free_netdev = true;
1422
1423 SET_NETDEV_DEVTYPE(dev, &geneve_type);
1424
1425 diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
1426 index 89698741682f..429387083dce 100644
1427 --- a/drivers/net/gtp.c
1428 +++ b/drivers/net/gtp.c
1429 @@ -618,7 +618,7 @@ static const struct net_device_ops gtp_netdev_ops = {
1430 static void gtp_link_setup(struct net_device *dev)
1431 {
1432 dev->netdev_ops = &gtp_netdev_ops;
1433 - dev->destructor = free_netdev;
1434 + dev->needs_free_netdev = true;
1435
1436 dev->hard_header_len = 0;
1437 dev->addr_len = 0;
1438 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
1439 index 922bf440e9f1..021a8ec411ab 100644
1440 --- a/drivers/net/hamradio/6pack.c
1441 +++ b/drivers/net/hamradio/6pack.c
1442 @@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev)
1443 {
1444 /* Finish setting up the DEVICE info. */
1445 dev->netdev_ops = &sp_netdev_ops;
1446 - dev->destructor = free_netdev;
1447 + dev->needs_free_netdev = true;
1448 dev->mtu = SIXP_MTU;
1449 dev->hard_header_len = AX25_MAX_HEADER_LEN;
1450 dev->header_ops = &ax25_header_ops;
1451 diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
1452 index f62e7f325cf9..78a6414c5fd9 100644
1453 --- a/drivers/net/hamradio/bpqether.c
1454 +++ b/drivers/net/hamradio/bpqether.c
1455 @@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = {
1456 static void bpq_setup(struct net_device *dev)
1457 {
1458 dev->netdev_ops = &bpq_netdev_ops;
1459 - dev->destructor = free_netdev;
1460 + dev->needs_free_netdev = true;
1461
1462 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
1463 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
1464 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
1465 index 312fce7302d3..144ea5ae8ab4 100644
1466 --- a/drivers/net/ifb.c
1467 +++ b/drivers/net/ifb.c
1468 @@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev)
1469 __skb_queue_purge(&txp->tq);
1470 }
1471 kfree(dp->tx_private);
1472 - free_netdev(dev);
1473 }
1474
1475 static void ifb_setup(struct net_device *dev)
1476 @@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev)
1477 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1478 netif_keep_dst(dev);
1479 eth_hw_addr_random(dev);
1480 - dev->destructor = ifb_dev_free;
1481 + dev->needs_free_netdev = true;
1482 + dev->priv_destructor = ifb_dev_free;
1483 }
1484
1485 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
1486 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
1487 index aa8575ccbce3..48e6c4e4914e 100644
1488 --- a/drivers/net/ipvlan/ipvlan_main.c
1489 +++ b/drivers/net/ipvlan/ipvlan_main.c
1490 @@ -621,7 +621,7 @@ void ipvlan_link_setup(struct net_device *dev)
1491 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1492 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
1493 dev->netdev_ops = &ipvlan_netdev_ops;
1494 - dev->destructor = free_netdev;
1495 + dev->needs_free_netdev = true;
1496 dev->header_ops = &ipvlan_header_ops;
1497 dev->ethtool_ops = &ipvlan_ethtool_ops;
1498 }
1499 diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
1500 index b23b71981fd5..80e3ace493cd 100644
1501 --- a/drivers/net/loopback.c
1502 +++ b/drivers/net/loopback.c
1503 @@ -145,7 +145,6 @@ static void loopback_dev_free(struct net_device *dev)
1504 {
1505 dev_net(dev)->loopback_dev = NULL;
1506 free_percpu(dev->lstats);
1507 - free_netdev(dev);
1508 }
1509
1510 static const struct net_device_ops loopback_ops = {
1511 @@ -183,7 +182,8 @@ static void loopback_setup(struct net_device *dev)
1512 dev->ethtool_ops = &loopback_ethtool_ops;
1513 dev->header_ops = &eth_header_ops;
1514 dev->netdev_ops = &loopback_ops;
1515 - dev->destructor = loopback_dev_free;
1516 + dev->needs_free_netdev = true;
1517 + dev->priv_destructor = loopback_dev_free;
1518 }
1519
1520 /* Setup and register the loopback device. */
1521 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1522 index 49ce4e9f4a0f..43f3e1b2d3ca 100644
1523 --- a/drivers/net/macsec.c
1524 +++ b/drivers/net/macsec.c
1525 @@ -2994,7 +2994,6 @@ static void macsec_free_netdev(struct net_device *dev)
1526 free_percpu(macsec->secy.tx_sc.stats);
1527
1528 dev_put(real_dev);
1529 - free_netdev(dev);
1530 }
1531
1532 static void macsec_setup(struct net_device *dev)
1533 @@ -3004,7 +3003,8 @@ static void macsec_setup(struct net_device *dev)
1534 dev->max_mtu = ETH_MAX_MTU;
1535 dev->priv_flags |= IFF_NO_QUEUE;
1536 dev->netdev_ops = &macsec_netdev_ops;
1537 - dev->destructor = macsec_free_netdev;
1538 + dev->needs_free_netdev = true;
1539 + dev->priv_destructor = macsec_free_netdev;
1540 SET_NETDEV_DEVTYPE(dev, &macsec_type);
1541
1542 eth_zero_addr(dev->broadcast);
1543 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1544 index b34eaaae03fd..b8cec52547d7 100644
1545 --- a/drivers/net/macvlan.c
1546 +++ b/drivers/net/macvlan.c
1547 @@ -1089,7 +1089,7 @@ void macvlan_common_setup(struct net_device *dev)
1548 netif_keep_dst(dev);
1549 dev->priv_flags |= IFF_UNICAST_FLT;
1550 dev->netdev_ops = &macvlan_netdev_ops;
1551 - dev->destructor = free_netdev;
1552 + dev->needs_free_netdev = true;
1553 dev->header_ops = &macvlan_hard_header_ops;
1554 dev->ethtool_ops = &macvlan_ethtool_ops;
1555 }
1556 diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
1557 index b91603835d26..c4b3362da4a2 100644
1558 --- a/drivers/net/nlmon.c
1559 +++ b/drivers/net/nlmon.c
1560 @@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev)
1561
1562 dev->netdev_ops = &nlmon_ops;
1563 dev->ethtool_ops = &nlmon_ethtool_ops;
1564 - dev->destructor = free_netdev;
1565 + dev->needs_free_netdev = true;
1566
1567 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
1568 NETIF_F_HIGHDMA | NETIF_F_LLTX;
1569 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
1570 index 1da31dc47f86..74b907206aa7 100644
1571 --- a/drivers/net/slip/slip.c
1572 +++ b/drivers/net/slip/slip.c
1573 @@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev)
1574 static void sl_free_netdev(struct net_device *dev)
1575 {
1576 int i = dev->base_addr;
1577 - free_netdev(dev);
1578 +
1579 slip_devs[i] = NULL;
1580 }
1581
1582 @@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = {
1583 static void sl_setup(struct net_device *dev)
1584 {
1585 dev->netdev_ops = &sl_netdev_ops;
1586 - dev->destructor = sl_free_netdev;
1587 + dev->needs_free_netdev = true;
1588 + dev->priv_destructor = sl_free_netdev;
1589
1590 dev->hard_header_len = 0;
1591 dev->addr_len = 0;
1592 @@ -1369,8 +1370,6 @@ static void __exit slip_exit(void)
1593 if (sl->tty) {
1594 printk(KERN_ERR "%s: tty discipline still running\n",
1595 dev->name);
1596 - /* Intentionally leak the control block. */
1597 - dev->destructor = NULL;
1598 }
1599
1600 unregister_netdev(dev);
1601 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1602 index 85c01247f2e3..eaa6ee762230 100644
1603 --- a/drivers/net/team/team.c
1604 +++ b/drivers/net/team/team.c
1605 @@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev)
1606 struct team *team = netdev_priv(dev);
1607
1608 free_percpu(team->pcpu_stats);
1609 - free_netdev(dev);
1610 }
1611
1612 static int team_open(struct net_device *dev)
1613 @@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev)
1614
1615 dev->netdev_ops = &team_netdev_ops;
1616 dev->ethtool_ops = &team_ethtool_ops;
1617 - dev->destructor = team_destructor;
1618 + dev->needs_free_netdev = true;
1619 + dev->priv_destructor = team_destructor;
1620 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1621 dev->priv_flags |= IFF_NO_QUEUE;
1622 dev->priv_flags |= IFF_TEAM;
1623 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1624 index cc88cd7856f5..7f3db4963fdd 100644
1625 --- a/drivers/net/tun.c
1626 +++ b/drivers/net/tun.c
1627 @@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev)
1628 free_percpu(tun->pcpu_stats);
1629 tun_flow_uninit(tun);
1630 security_tun_dev_free_security(tun->security);
1631 - free_netdev(dev);
1632 }
1633
1634 static void tun_setup(struct net_device *dev)
1635 @@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev)
1636 tun->group = INVALID_GID;
1637
1638 dev->ethtool_ops = &tun_ethtool_ops;
1639 - dev->destructor = tun_free_netdev;
1640 + dev->needs_free_netdev = true;
1641 + dev->priv_destructor = tun_free_netdev;
1642 /* We prefer our own queue length */
1643 dev->tx_queue_len = TUN_READQ_SIZE;
1644 }
1645 diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
1646 index eb52de8205f0..c7a350bbaaa7 100644
1647 --- a/drivers/net/usb/cdc-phonet.c
1648 +++ b/drivers/net/usb/cdc-phonet.c
1649 @@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev)
1650 dev->addr_len = 1;
1651 dev->tx_queue_len = 3;
1652
1653 - dev->destructor = free_netdev;
1654 + dev->needs_free_netdev = true;
1655 }
1656
1657 /*
1658 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1659 index 87746c2bc3d3..d4c8c96bc770 100644
1660 --- a/drivers/net/usb/qmi_wwan.c
1661 +++ b/drivers/net/usb/qmi_wwan.c
1662 @@ -898,6 +898,8 @@ static const struct usb_device_id products[] = {
1663 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1664 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1665 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1666 + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1667 + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1668 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1669 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1670 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1671 @@ -912,6 +914,8 @@ static const struct usb_device_id products[] = {
1672 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1673 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1674 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1675 + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1676 + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1677 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1678 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1679 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
1680 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
1681 index 8c39d6d690e5..2efbe98d0c67 100644
1682 --- a/drivers/net/veth.c
1683 +++ b/drivers/net/veth.c
1684 @@ -227,7 +227,6 @@ static int veth_dev_init(struct net_device *dev)
1685 static void veth_dev_free(struct net_device *dev)
1686 {
1687 free_percpu(dev->vstats);
1688 - free_netdev(dev);
1689 }
1690
1691 #ifdef CONFIG_NET_POLL_CONTROLLER
1692 @@ -322,7 +321,8 @@ static void veth_setup(struct net_device *dev)
1693 NETIF_F_HW_VLAN_STAG_TX |
1694 NETIF_F_HW_VLAN_CTAG_RX |
1695 NETIF_F_HW_VLAN_STAG_RX);
1696 - dev->destructor = veth_dev_free;
1697 + dev->needs_free_netdev = true;
1698 + dev->priv_destructor = veth_dev_free;
1699 dev->max_mtu = ETH_MAX_MTU;
1700
1701 dev->hw_features = VETH_FEATURES;
1702 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1703 index df74efcf237b..2e62c4d1b220 100644
1704 --- a/drivers/net/vrf.c
1705 +++ b/drivers/net/vrf.c
1706 @@ -36,12 +36,14 @@
1707 #include <net/addrconf.h>
1708 #include <net/l3mdev.h>
1709 #include <net/fib_rules.h>
1710 +#include <net/netns/generic.h>
1711
1712 #define DRV_NAME "vrf"
1713 #define DRV_VERSION "1.0"
1714
1715 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
1716 -static bool add_fib_rules = true;
1717 +
1718 +static unsigned int vrf_net_id;
1719
1720 struct net_vrf {
1721 struct rtable __rcu *rth;
1722 @@ -1206,7 +1208,7 @@ static void vrf_setup(struct net_device *dev)
1723 dev->netdev_ops = &vrf_netdev_ops;
1724 dev->l3mdev_ops = &vrf_l3mdev_ops;
1725 dev->ethtool_ops = &vrf_ethtool_ops;
1726 - dev->destructor = free_netdev;
1727 + dev->needs_free_netdev = true;
1728
1729 /* Fill in device structure with ethernet-generic values. */
1730 eth_hw_addr_random(dev);
1731 @@ -1252,6 +1254,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1732 struct nlattr *tb[], struct nlattr *data[])
1733 {
1734 struct net_vrf *vrf = netdev_priv(dev);
1735 + bool *add_fib_rules;
1736 + struct net *net;
1737 int err;
1738
1739 if (!data || !data[IFLA_VRF_TABLE])
1740 @@ -1267,13 +1271,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1741 if (err)
1742 goto out;
1743
1744 - if (add_fib_rules) {
1745 + net = dev_net(dev);
1746 + add_fib_rules = net_generic(net, vrf_net_id);
1747 + if (*add_fib_rules) {
1748 err = vrf_add_fib_rules(dev);
1749 if (err) {
1750 unregister_netdevice(dev);
1751 goto out;
1752 }
1753 - add_fib_rules = false;
1754 + *add_fib_rules = false;
1755 }
1756
1757 out:
1758 @@ -1356,16 +1362,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
1759 .notifier_call = vrf_device_event,
1760 };
1761
1762 +/* Initialize per network namespace state */
1763 +static int __net_init vrf_netns_init(struct net *net)
1764 +{
1765 + bool *add_fib_rules = net_generic(net, vrf_net_id);
1766 +
1767 + *add_fib_rules = true;
1768 +
1769 + return 0;
1770 +}
1771 +
1772 +static struct pernet_operations vrf_net_ops __net_initdata = {
1773 + .init = vrf_netns_init,
1774 + .id = &vrf_net_id,
1775 + .size = sizeof(bool),
1776 +};
1777 +
1778 static int __init vrf_init_module(void)
1779 {
1780 int rc;
1781
1782 register_netdevice_notifier(&vrf_notifier_block);
1783
1784 - rc = rtnl_link_register(&vrf_link_ops);
1785 + rc = register_pernet_subsys(&vrf_net_ops);
1786 if (rc < 0)
1787 goto error;
1788
1789 + rc = rtnl_link_register(&vrf_link_ops);
1790 + if (rc < 0) {
1791 + unregister_pernet_subsys(&vrf_net_ops);
1792 + goto error;
1793 + }
1794 +
1795 return 0;
1796
1797 error:
1798 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1799 index 70dbd5a48b6b..4574b95c7938 100644
1800 --- a/drivers/net/vxlan.c
1801 +++ b/drivers/net/vxlan.c
1802 @@ -2607,7 +2607,7 @@ static void vxlan_setup(struct net_device *dev)
1803 eth_hw_addr_random(dev);
1804 ether_setup(dev);
1805
1806 - dev->destructor = free_netdev;
1807 + dev->needs_free_netdev = true;
1808 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1809
1810 dev->features |= NETIF_F_LLTX;
1811 diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
1812 index 65ee2a6f248c..a0d76f70c428 100644
1813 --- a/drivers/net/wan/dlci.c
1814 +++ b/drivers/net/wan/dlci.c
1815 @@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev)
1816 dev->flags = 0;
1817 dev->header_ops = &dlci_header_ops;
1818 dev->netdev_ops = &dlci_netdev_ops;
1819 - dev->destructor = free_netdev;
1820 + dev->needs_free_netdev = true;
1821
1822 dlp->receive = dlci_receive;
1823
1824 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
1825 index eb915281197e..78596e42a3f3 100644
1826 --- a/drivers/net/wan/hdlc_fr.c
1827 +++ b/drivers/net/wan/hdlc_fr.c
1828 @@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1829 return -EIO;
1830 }
1831
1832 - dev->destructor = free_netdev;
1833 + dev->needs_free_netdev = true;
1834 *get_dev_p(pvc, type) = dev;
1835 if (!used) {
1836 state(hdlc)->dce_changed = 1;
1837 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
1838 index 9df9ed62beff..63f749078a1f 100644
1839 --- a/drivers/net/wan/lapbether.c
1840 +++ b/drivers/net/wan/lapbether.c
1841 @@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
1842 static void lapbeth_setup(struct net_device *dev)
1843 {
1844 dev->netdev_ops = &lapbeth_netdev_ops;
1845 - dev->destructor = free_netdev;
1846 + dev->needs_free_netdev = true;
1847 dev->type = ARPHRD_X25;
1848 dev->hard_header_len = 3;
1849 dev->mtu = 1000;
1850 diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
1851 index 91ee542de3d7..b90c77ef792e 100644
1852 --- a/drivers/net/wireless/ath/ath6kl/main.c
1853 +++ b/drivers/net/wireless/ath/ath6kl/main.c
1854 @@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev)
1855 struct ath6kl *ar = ath6kl_priv(dev);
1856
1857 dev->netdev_ops = &ath6kl_netdev_ops;
1858 - dev->destructor = free_netdev;
1859 + dev->needs_free_netdev = true;
1860 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1861
1862 dev->needed_headroom = ETH_HLEN;
1863 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1864 index 944b83cfc519..017e20b34304 100644
1865 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1866 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
1867 @@ -5209,7 +5209,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
1868
1869 if (vif)
1870 brcmf_free_vif(vif);
1871 - free_netdev(ndev);
1872 }
1873
1874 static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
1875 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1876 index f6b17fb58877..d02e7f6ec4e0 100644
1877 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1878 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
1879 @@ -652,7 +652,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
1880 if (!ndev)
1881 return ERR_PTR(-ENOMEM);
1882
1883 - ndev->destructor = brcmf_cfg80211_free_netdev;
1884 + ndev->needs_free_netdev = true;
1885 + ndev->priv_destructor = brcmf_cfg80211_free_netdev;
1886 ifp = netdev_priv(ndev);
1887 ifp->ndev = ndev;
1888 /* store mapping ifidx to bsscfgidx */
1889 diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
1890 index 544fc09dcb62..1372b20f931e 100644
1891 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c
1892 +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
1893 @@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
1894 dev->mem_end = mdev->mem_end;
1895
1896 hostap_setup_dev(dev, local, type);
1897 - dev->destructor = free_netdev;
1898 + dev->needs_free_netdev = true;
1899
1900 sprintf(dev->name, "%s%s", prefix, name);
1901 if (!rtnl_locked)
1902 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1903 index 50c219fb1a52..2aa5cc6e34ef 100644
1904 --- a/drivers/net/wireless/mac80211_hwsim.c
1905 +++ b/drivers/net/wireless/mac80211_hwsim.c
1906 @@ -2807,7 +2807,7 @@ static const struct net_device_ops hwsim_netdev_ops = {
1907 static void hwsim_mon_setup(struct net_device *dev)
1908 {
1909 dev->netdev_ops = &hwsim_netdev_ops;
1910 - dev->destructor = free_netdev;
1911 + dev->needs_free_netdev = true;
1912 ether_setup(dev);
1913 dev->priv_flags |= IFF_NO_QUEUE;
1914 dev->type = ARPHRD_IEEE80211_RADIOTAP;
1915 diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
1916 index b62e03d11c2e..46073bb554dd 100644
1917 --- a/drivers/net/wireless/marvell/mwifiex/main.c
1918 +++ b/drivers/net/wireless/marvell/mwifiex/main.c
1919 @@ -1277,7 +1277,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
1920 struct net_device *dev)
1921 {
1922 dev->netdev_ops = &mwifiex_netdev_ops;
1923 - dev->destructor = free_netdev;
1924 + dev->needs_free_netdev = true;
1925 /* Initialize private structure */
1926 priv->current_key_index = 0;
1927 priv->media_connected = false;
1928 diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
1929 index d69e357a7a98..d8e81045cffe 100644
1930 --- a/drivers/pinctrl/pinctrl-amd.c
1931 +++ b/drivers/pinctrl/pinctrl-amd.c
1932 @@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
1933 .flags = IRQCHIP_SKIP_SET_WAKE,
1934 };
1935
1936 -static void amd_gpio_irq_handler(struct irq_desc *desc)
1937 +#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
1938 +
1939 +static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
1940 {
1941 - u32 i;
1942 - u32 off;
1943 - u32 reg;
1944 - u32 pin_reg;
1945 - u64 reg64;
1946 - int handled = 0;
1947 - unsigned int irq;
1948 + struct amd_gpio *gpio_dev = dev_id;
1949 + struct gpio_chip *gc = &gpio_dev->gc;
1950 + irqreturn_t ret = IRQ_NONE;
1951 + unsigned int i, irqnr;
1952 unsigned long flags;
1953 - struct irq_chip *chip = irq_desc_get_chip(desc);
1954 - struct gpio_chip *gc = irq_desc_get_handler_data(desc);
1955 - struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
1956 + u32 *regs, regval;
1957 + u64 status, mask;
1958
1959 - chained_irq_enter(chip, desc);
1960 - /*enable GPIO interrupt again*/
1961 + /* Read the wake status */
1962 spin_lock_irqsave(&gpio_dev->lock, flags);
1963 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
1964 - reg64 = reg;
1965 - reg64 = reg64 << 32;
1966 -
1967 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
1968 - reg64 |= reg;
1969 + status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
1970 + status <<= 32;
1971 + status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
1972 spin_unlock_irqrestore(&gpio_dev->lock, flags);
1973
1974 - /*
1975 - * first 46 bits indicates interrupt status.
1976 - * one bit represents four interrupt sources.
1977 - */
1978 - for (off = 0; off < 46 ; off++) {
1979 - if (reg64 & BIT(off)) {
1980 - for (i = 0; i < 4; i++) {
1981 - pin_reg = readl(gpio_dev->base +
1982 - (off * 4 + i) * 4);
1983 - if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
1984 - (pin_reg & BIT(WAKE_STS_OFF))) {
1985 - irq = irq_find_mapping(gc->irqdomain,
1986 - off * 4 + i);
1987 - generic_handle_irq(irq);
1988 - writel(pin_reg,
1989 - gpio_dev->base
1990 - + (off * 4 + i) * 4);
1991 - handled++;
1992 - }
1993 - }
1994 + /* Bit 0-45 contain the relevant status bits */
1995 + status &= (1ULL << 46) - 1;
1996 + regs = gpio_dev->base;
1997 + for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
1998 + if (!(status & mask))
1999 + continue;
2000 + status &= ~mask;
2001 +
2002 + /* Each status bit covers four pins */
2003 + for (i = 0; i < 4; i++) {
2004 + regval = readl(regs + i);
2005 + if (!(regval & PIN_IRQ_PENDING))
2006 + continue;
2007 + irq = irq_find_mapping(gc->irqdomain, irqnr + i);
2008 + generic_handle_irq(irq);
2009 + /* Clear interrupt */
2010 + writel(regval, regs + i);
2011 + ret = IRQ_HANDLED;
2012 }
2013 }
2014
2015 - if (handled == 0)
2016 - handle_bad_irq(desc);
2017 -
2018 + /* Signal EOI to the GPIO unit */
2019 spin_lock_irqsave(&gpio_dev->lock, flags);
2020 - reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
2021 - reg |= EOI_MASK;
2022 - writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
2023 + regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
2024 + regval |= EOI_MASK;
2025 + writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
2026 spin_unlock_irqrestore(&gpio_dev->lock, flags);
2027
2028 - chained_irq_exit(chip, desc);
2029 + return ret;
2030 }
2031
2032 static int amd_get_groups_count(struct pinctrl_dev *pctldev)
2033 @@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
2034 goto out2;
2035 }
2036
2037 - gpiochip_set_chained_irqchip(&gpio_dev->gc,
2038 - &amd_gpio_irqchip,
2039 - irq_base,
2040 - amd_gpio_irq_handler);
2041 + ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
2042 + KBUILD_MODNAME, gpio_dev);
2043 + if (ret)
2044 + goto out2;
2045 +
2046 platform_set_drvdata(pdev, gpio_dev);
2047
2048 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
2049 diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
2050 index 3f85b97ab8d2..6d3494ba84cf 100644
2051 --- a/drivers/s390/net/netiucv.c
2052 +++ b/drivers/s390/net/netiucv.c
2053 @@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev)
2054 privptr->conn = NULL; privptr->fsm = NULL;
2055 /* privptr gets freed by free_netdev() */
2056 }
2057 - free_netdev(dev);
2058 }
2059
2060 /**
2061 @@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev)
2062 dev->mtu = NETIUCV_MTU_DEFAULT;
2063 dev->min_mtu = 576;
2064 dev->max_mtu = NETIUCV_MTU_MAX;
2065 - dev->destructor = netiucv_free_netdevice;
2066 + dev->needs_free_netdev = true;
2067 + dev->priv_destructor = netiucv_free_netdevice;
2068 dev->hard_header_len = NETIUCV_HDRLEN;
2069 dev->addr_len = 0;
2070 dev->type = ARPHRD_SLIP;
2071 diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
2072 index cfe37eb026d6..859d0d6051cd 100644
2073 --- a/drivers/staging/rtl8188eu/os_dep/mon.c
2074 +++ b/drivers/staging/rtl8188eu/os_dep/mon.c
2075 @@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = {
2076 static void mon_setup(struct net_device *dev)
2077 {
2078 dev->netdev_ops = &mon_netdev_ops;
2079 - dev->destructor = free_netdev;
2080 + dev->needs_free_netdev = true;
2081 ether_setup(dev);
2082 dev->priv_flags |= IFF_NO_QUEUE;
2083 dev->type = ARPHRD_IEEE80211;
2084 diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
2085 index b4058f0000e4..6a1ce6a55158 100644
2086 --- a/drivers/usb/gadget/function/f_phonet.c
2087 +++ b/drivers/usb/gadget/function/f_phonet.c
2088 @@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev)
2089 dev->tx_queue_len = 1;
2090
2091 dev->netdev_ops = &pn_netdev_ops;
2092 - dev->destructor = free_netdev;
2093 + dev->needs_free_netdev = true;
2094 dev->header_ops = &phonet_header_ops;
2095 }
2096
2097 diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
2098 index 6fce17d5b9f1..a5775dfd8d5f 100644
2099 --- a/drivers/watchdog/bcm_kona_wdt.c
2100 +++ b/drivers/watchdog/bcm_kona_wdt.c
2101 @@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
2102 if (!wdt)
2103 return -ENOMEM;
2104
2105 + spin_lock_init(&wdt->lock);
2106 +
2107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2108 wdt->base = devm_ioremap_resource(dev, res);
2109 if (IS_ERR(wdt->base))
2110 @@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
2111 return ret;
2112 }
2113
2114 - spin_lock_init(&wdt->lock);
2115 platform_set_drvdata(pdev, wdt);
2116 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
2117 bcm_kona_wdt_wdd.parent = &pdev->dev;
2118 diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
2119 index 773774531aff..c5e27ebd8da8 100644
2120 --- a/fs/nfs/callback.c
2121 +++ b/fs/nfs/callback.c
2122 @@ -280,7 +280,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
2123 printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
2124 cb_info->users);
2125
2126 - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
2127 + serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
2128 if (!serv) {
2129 printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
2130 return ERR_PTR(-ENOMEM);
2131 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2132 index f92ba8d6c556..66c743eddac9 100644
2133 --- a/fs/nfs/dir.c
2134 +++ b/fs/nfs/dir.c
2135 @@ -2002,29 +2002,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2136 }
2137 EXPORT_SYMBOL_GPL(nfs_link);
2138
2139 -static void
2140 -nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data)
2141 -{
2142 - struct dentry *old_dentry = data->old_dentry;
2143 - struct dentry *new_dentry = data->new_dentry;
2144 - struct inode *old_inode = d_inode(old_dentry);
2145 - struct inode *new_inode = d_inode(new_dentry);
2146 -
2147 - nfs_mark_for_revalidate(old_inode);
2148 -
2149 - switch (task->tk_status) {
2150 - case 0:
2151 - if (new_inode != NULL)
2152 - nfs_drop_nlink(new_inode);
2153 - d_move(old_dentry, new_dentry);
2154 - nfs_set_verifier(new_dentry,
2155 - nfs_save_change_attribute(data->new_dir));
2156 - break;
2157 - case -ENOENT:
2158 - nfs_dentry_handle_enoent(old_dentry);
2159 - }
2160 -}
2161 -
2162 /*
2163 * RENAME
2164 * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
2165 @@ -2055,7 +2032,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2166 {
2167 struct inode *old_inode = d_inode(old_dentry);
2168 struct inode *new_inode = d_inode(new_dentry);
2169 - struct dentry *dentry = NULL;
2170 + struct dentry *dentry = NULL, *rehash = NULL;
2171 struct rpc_task *task;
2172 int error = -EBUSY;
2173
2174 @@ -2078,8 +2055,10 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2175 * To prevent any new references to the target during the
2176 * rename, we unhash the dentry in advance.
2177 */
2178 - if (!d_unhashed(new_dentry))
2179 + if (!d_unhashed(new_dentry)) {
2180 d_drop(new_dentry);
2181 + rehash = new_dentry;
2182 + }
2183
2184 if (d_count(new_dentry) > 2) {
2185 int err;
2186 @@ -2096,6 +2075,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2187 goto out;
2188
2189 new_dentry = dentry;
2190 + rehash = NULL;
2191 new_inode = NULL;
2192 }
2193 }
2194 @@ -2104,8 +2084,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2195 if (new_inode != NULL)
2196 NFS_PROTO(new_inode)->return_delegation(new_inode);
2197
2198 - task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
2199 - nfs_complete_rename);
2200 + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
2201 if (IS_ERR(task)) {
2202 error = PTR_ERR(task);
2203 goto out;
2204 @@ -2115,9 +2094,27 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2205 if (error == 0)
2206 error = task->tk_status;
2207 rpc_put_task(task);
2208 + nfs_mark_for_revalidate(old_inode);
2209 out:
2210 + if (rehash)
2211 + d_rehash(rehash);
2212 trace_nfs_rename_exit(old_dir, old_dentry,
2213 new_dir, new_dentry, error);
2214 + if (!error) {
2215 + if (new_inode != NULL)
2216 + nfs_drop_nlink(new_inode);
2217 + /*
2218 + * The d_move() should be here instead of in an async RPC completion
2219 + * handler because we need the proper locks to move the dentry. If
2220 + * we're interrupted by a signal, the async RPC completion handler
2221 + * should mark the directories for revalidation.
2222 + */
2223 + d_move(old_dentry, new_dentry);
2224 + nfs_set_verifier(new_dentry,
2225 + nfs_save_change_attribute(new_dir));
2226 + } else if (error == -ENOENT)
2227 + nfs_dentry_handle_enoent(old_dentry);
2228 +
2229 /* new dentry created? */
2230 if (dentry)
2231 dput(dentry);
2232 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2233 index 8ba6c0d4d499..932a1461758d 100644
2234 --- a/fs/nfs/nfs4proc.c
2235 +++ b/fs/nfs/nfs4proc.c
2236 @@ -2588,7 +2588,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2237
2238 /* Except MODE, it seems harmless of setting twice. */
2239 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
2240 - attrset[1] & FATTR4_WORD1_MODE)
2241 + (attrset[1] & FATTR4_WORD1_MODE ||
2242 + attrset[2] & FATTR4_WORD2_MODE_UMASK))
2243 sattr->ia_valid &= ~ATTR_MODE;
2244
2245 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2246 @@ -8429,6 +8430,7 @@ static void nfs4_layoutget_release(void *calldata)
2247 size_t max_pages = max_response_pages(server);
2248
2249 dprintk("--> %s\n", __func__);
2250 + nfs4_sequence_free_slot(&lgp->res.seq_res);
2251 nfs4_free_pages(lgp->args.layout.pages, max_pages);
2252 pnfs_put_layout_hdr(NFS_I(inode)->layout);
2253 put_nfs_open_context(lgp->args.ctx);
2254 @@ -8503,7 +8505,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
2255 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
2256 if (status == 0 && lgp->res.layoutp->len)
2257 lseg = pnfs_layout_process(lgp);
2258 - nfs4_sequence_free_slot(&lgp->res.seq_res);
2259 rpc_put_task(task);
2260 dprintk("<-- %s status=%d\n", __func__, status);
2261 if (status)
2262 diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
2263 index f6e871760f8d..0da0332725aa 100644
2264 --- a/fs/ocfs2/cluster/heartbeat.c
2265 +++ b/fs/ocfs2/cluster/heartbeat.c
2266 @@ -2242,13 +2242,13 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2267 spin_unlock(&o2hb_live_lock);
2268 }
2269
2270 -static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
2271 +static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
2272 char *page)
2273 {
2274 return sprintf(page, "%u\n", o2hb_dead_threshold);
2275 }
2276
2277 -static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
2278 +static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
2279 const char *page, size_t count)
2280 {
2281 unsigned long tmp;
2282 @@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
2283
2284 }
2285
2286 -CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
2287 +CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
2288 CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
2289
2290 static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
2291 - &o2hb_heartbeat_group_attr_threshold,
2292 + &o2hb_heartbeat_group_attr_dead_threshold,
2293 &o2hb_heartbeat_group_attr_mode,
2294 NULL,
2295 };
2296 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
2297 index 3b7c937a36b5..4689940a953c 100644
2298 --- a/fs/ocfs2/dlmglue.c
2299 +++ b/fs/ocfs2/dlmglue.c
2300 @@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
2301 struct ocfs2_lock_res *lockres;
2302
2303 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2304 + /* had_lock means that the currect process already takes the cluster
2305 + * lock previously. If had_lock is 1, we have nothing to do here, and
2306 + * it will get unlocked where we got the lock.
2307 + */
2308 if (!had_lock) {
2309 ocfs2_remove_holder(lockres, oh);
2310 ocfs2_inode_unlock(inode, ex);
2311 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
2312 index 3c5384d9b3a5..f70c3778d600 100644
2313 --- a/fs/ocfs2/xattr.c
2314 +++ b/fs/ocfs2/xattr.c
2315 @@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
2316 void *buffer,
2317 size_t buffer_size)
2318 {
2319 - int ret;
2320 + int ret, had_lock;
2321 struct buffer_head *di_bh = NULL;
2322 + struct ocfs2_lock_holder oh;
2323
2324 - ret = ocfs2_inode_lock(inode, &di_bh, 0);
2325 - if (ret < 0) {
2326 - mlog_errno(ret);
2327 - return ret;
2328 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
2329 + if (had_lock < 0) {
2330 + mlog_errno(had_lock);
2331 + return had_lock;
2332 }
2333 down_read(&OCFS2_I(inode)->ip_xattr_sem);
2334 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
2335 name, buffer, buffer_size);
2336 up_read(&OCFS2_I(inode)->ip_xattr_sem);
2337
2338 - ocfs2_inode_unlock(inode, 0);
2339 + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
2340
2341 brelse(di_bh);
2342
2343 @@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
2344 {
2345 struct buffer_head *di_bh = NULL;
2346 struct ocfs2_dinode *di;
2347 - int ret, credits, ref_meta = 0, ref_credits = 0;
2348 + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
2349 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2350 struct inode *tl_inode = osb->osb_tl_inode;
2351 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
2352 struct ocfs2_refcount_tree *ref_tree = NULL;
2353 + struct ocfs2_lock_holder oh;
2354
2355 struct ocfs2_xattr_info xi = {
2356 .xi_name_index = name_index,
2357 @@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
2358 return -ENOMEM;
2359 }
2360
2361 - ret = ocfs2_inode_lock(inode, &di_bh, 1);
2362 - if (ret < 0) {
2363 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
2364 + if (had_lock < 0) {
2365 + ret = had_lock;
2366 mlog_errno(ret);
2367 goto cleanup_nolock;
2368 }
2369 @@ -3670,7 +3673,7 @@ int ocfs2_xattr_set(struct inode *inode,
2370 if (ret)
2371 mlog_errno(ret);
2372 }
2373 - ocfs2_inode_unlock(inode, 1);
2374 + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
2375 cleanup_nolock:
2376 brelse(di_bh);
2377 brelse(xbs.xattr_bh);
2378 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
2379 index 5b14c16d1b77..e83e2dc7ae0b 100644
2380 --- a/fs/overlayfs/copy_up.c
2381 +++ b/fs/overlayfs/copy_up.c
2382 @@ -252,15 +252,9 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
2383 .link = link
2384 };
2385
2386 - upper = lookup_one_len(dentry->d_name.name, upperdir,
2387 - dentry->d_name.len);
2388 - err = PTR_ERR(upper);
2389 - if (IS_ERR(upper))
2390 - goto out;
2391 -
2392 err = security_inode_copy_up(dentry, &new_creds);
2393 if (err < 0)
2394 - goto out1;
2395 + goto out;
2396
2397 if (new_creds)
2398 old_creds = override_creds(new_creds);
2399 @@ -284,7 +278,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
2400 }
2401
2402 if (err)
2403 - goto out2;
2404 + goto out;
2405
2406 if (S_ISREG(stat->mode)) {
2407 struct path upperpath;
2408 @@ -317,6 +311,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
2409 if (err)
2410 goto out_cleanup;
2411
2412 + upper = lookup_one_len(dentry->d_name.name, upperdir,
2413 + dentry->d_name.len);
2414 + if (IS_ERR(upper)) {
2415 + err = PTR_ERR(upper);
2416 + upper = NULL;
2417 + goto out_cleanup;
2418 + }
2419 +
2420 if (tmpfile)
2421 err = ovl_do_link(temp, udir, upper, true);
2422 else
2423 @@ -330,17 +332,15 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
2424
2425 /* Restore timestamps on parent (best effort) */
2426 ovl_set_timestamps(upperdir, pstat);
2427 -out2:
2428 +out:
2429 dput(temp);
2430 -out1:
2431 dput(upper);
2432 -out:
2433 return err;
2434
2435 out_cleanup:
2436 if (!tmpfile)
2437 ovl_cleanup(wdir, temp);
2438 - goto out2;
2439 + goto out;
2440 }
2441
2442 /*
2443 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2444 index 97456b2539e4..aa38bb18c81c 100644
2445 --- a/include/linux/netdevice.h
2446 +++ b/include/linux/netdevice.h
2447 @@ -1581,8 +1581,8 @@ enum netdev_priv_flags {
2448 * @rtnl_link_state: This enum represents the phases of creating
2449 * a new link
2450 *
2451 - * @destructor: Called from unregister,
2452 - * can be used to call free_netdev
2453 + * @needs_free_netdev: Should unregister perform free_netdev?
2454 + * @priv_destructor: Called from unregister
2455 * @npinfo: XXX: need comments on this one
2456 * @nd_net: Network namespace this network device is inside
2457 *
2458 @@ -1838,7 +1838,8 @@ struct net_device {
2459 RTNL_LINK_INITIALIZING,
2460 } rtnl_link_state:16;
2461
2462 - void (*destructor)(struct net_device *dev);
2463 + bool needs_free_netdev;
2464 + void (*priv_destructor)(struct net_device *dev);
2465
2466 #ifdef CONFIG_NETPOLL
2467 struct netpoll_info __rcu *npinfo;
2468 @@ -4231,6 +4232,11 @@ static inline const char *netdev_name(const struct net_device *dev)
2469 return dev->name;
2470 }
2471
2472 +static inline bool netdev_unregistering(const struct net_device *dev)
2473 +{
2474 + return dev->reg_state == NETREG_UNREGISTERING;
2475 +}
2476 +
2477 static inline const char *netdev_reg_state(const struct net_device *dev)
2478 {
2479 switch (dev->reg_state) {
2480 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
2481 index 14d82bf16692..85bc8e7ade2e 100644
2482 --- a/include/net/xfrm.h
2483 +++ b/include/net/xfrm.h
2484 @@ -945,10 +945,6 @@ struct xfrm_dst {
2485 struct flow_cache_object flo;
2486 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2487 int num_pols, num_xfrms;
2488 -#ifdef CONFIG_XFRM_SUB_POLICY
2489 - struct flowi *origin;
2490 - struct xfrm_selector *partner;
2491 -#endif
2492 u32 xfrm_genid;
2493 u32 policy_genid;
2494 u32 route_mtu_cached;
2495 @@ -964,12 +960,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
2496 dst_release(xdst->route);
2497 if (likely(xdst->u.dst.xfrm))
2498 xfrm_state_put(xdst->u.dst.xfrm);
2499 -#ifdef CONFIG_XFRM_SUB_POLICY
2500 - kfree(xdst->origin);
2501 - xdst->origin = NULL;
2502 - kfree(xdst->partner);
2503 - xdst->partner = NULL;
2504 -#endif
2505 }
2506 #endif
2507
2508 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
2509 index 0b057628a7ba..926ef9e8c71b 100644
2510 --- a/mm/vmalloc.c
2511 +++ b/mm/vmalloc.c
2512 @@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
2513 if (p4d_none(*p4d))
2514 return NULL;
2515 pud = pud_offset(p4d, addr);
2516 - if (pud_none(*pud))
2517 +
2518 + /*
2519 + * Don't dereference bad PUD or PMD (below) entries. This will also
2520 + * identify huge mappings, which we may encounter on architectures
2521 + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
2522 + * identified as vmalloc addresses by is_vmalloc_addr(), but are
2523 + * not [unambiguously] associated with a struct page, so there is
2524 + * no correct value to return for them.
2525 + */
2526 + WARN_ON_ONCE(pud_bad(*pud));
2527 + if (pud_none(*pud) || pud_bad(*pud))
2528 return NULL;
2529 pmd = pmd_offset(pud, addr);
2530 - if (pmd_none(*pmd))
2531 + WARN_ON_ONCE(pmd_bad(*pmd));
2532 + if (pmd_none(*pmd) || pmd_bad(*pmd))
2533 return NULL;
2534
2535 ptep = pte_offset_map(pmd, addr);
2536 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
2537 index 467069b73ce1..9649579b5b9f 100644
2538 --- a/net/8021q/vlan.c
2539 +++ b/net/8021q/vlan.c
2540 @@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
2541 return 0;
2542
2543 out_free_newdev:
2544 - free_netdev(new_dev);
2545 + if (new_dev->reg_state == NETREG_UNINITIALIZED)
2546 + free_netdev(new_dev);
2547 return err;
2548 }
2549
2550 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2551 index e97ab824e368..0064dfd328d4 100644
2552 --- a/net/8021q/vlan_dev.c
2553 +++ b/net/8021q/vlan_dev.c
2554 @@ -807,7 +807,6 @@ static void vlan_dev_free(struct net_device *dev)
2555
2556 free_percpu(vlan->vlan_pcpu_stats);
2557 vlan->vlan_pcpu_stats = NULL;
2558 - free_netdev(dev);
2559 }
2560
2561 void vlan_setup(struct net_device *dev)
2562 @@ -820,7 +819,8 @@ void vlan_setup(struct net_device *dev)
2563 netif_keep_dst(dev);
2564
2565 dev->netdev_ops = &vlan_netdev_ops;
2566 - dev->destructor = vlan_dev_free;
2567 + dev->needs_free_netdev = true;
2568 + dev->priv_destructor = vlan_dev_free;
2569 dev->ethtool_ops = &vlan_ethtool_ops;
2570
2571 dev->min_mtu = 0;
2572 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
2573 index d042c99af028..07ca0e8f30b2 100644
2574 --- a/net/batman-adv/soft-interface.c
2575 +++ b/net/batman-adv/soft-interface.c
2576 @@ -961,8 +961,6 @@ static void batadv_softif_free(struct net_device *dev)
2577 * netdev and its private data (bat_priv)
2578 */
2579 rcu_barrier();
2580 -
2581 - free_netdev(dev);
2582 }
2583
2584 /**
2585 @@ -976,7 +974,8 @@ static void batadv_softif_init_early(struct net_device *dev)
2586 ether_setup(dev);
2587
2588 dev->netdev_ops = &batadv_netdev_ops;
2589 - dev->destructor = batadv_softif_free;
2590 + dev->needs_free_netdev = true;
2591 + dev->priv_destructor = batadv_softif_free;
2592 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
2593 dev->priv_flags |= IFF_NO_QUEUE;
2594
2595 diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
2596 index d491529332f4..7ffeca72e928 100644
2597 --- a/net/bluetooth/6lowpan.c
2598 +++ b/net/bluetooth/6lowpan.c
2599 @@ -653,7 +653,7 @@ static void netdev_setup(struct net_device *dev)
2600
2601 dev->netdev_ops = &netdev_ops;
2602 dev->header_ops = &header_ops;
2603 - dev->destructor = free_netdev;
2604 + dev->needs_free_netdev = true;
2605 }
2606
2607 static struct device_type bt_type = {
2608 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
2609 index 430b53e7d941..f0f3447e8aa4 100644
2610 --- a/net/bridge/br_device.c
2611 +++ b/net/bridge/br_device.c
2612 @@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
2613 ether_setup(dev);
2614
2615 dev->netdev_ops = &br_netdev_ops;
2616 - dev->destructor = free_netdev;
2617 + dev->needs_free_netdev = true;
2618 dev->ethtool_ops = &br_ethtool_ops;
2619 SET_NETDEV_DEVTYPE(dev, &br_type);
2620 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
2621 diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
2622 index 59ce1fcc220c..71b6ab240dea 100644
2623 --- a/net/caif/cfpkt_skbuff.c
2624 +++ b/net/caif/cfpkt_skbuff.c
2625 @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
2626 {
2627 struct sk_buff *skb;
2628
2629 - if (likely(in_interrupt()))
2630 - skb = alloc_skb(len + pfx, GFP_ATOMIC);
2631 - else
2632 - skb = alloc_skb(len + pfx, GFP_KERNEL);
2633 -
2634 + skb = alloc_skb(len + pfx, GFP_ATOMIC);
2635 if (unlikely(skb == NULL))
2636 return NULL;
2637
2638 diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
2639 index 1816fc9f1ee7..fe3c53efb949 100644
2640 --- a/net/caif/chnl_net.c
2641 +++ b/net/caif/chnl_net.c
2642 @@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
2643 {
2644 struct chnl_net *priv = netdev_priv(dev);
2645 caif_free_client(&priv->chnl);
2646 - free_netdev(dev);
2647 }
2648
2649 static void ipcaif_net_setup(struct net_device *dev)
2650 {
2651 struct chnl_net *priv;
2652 dev->netdev_ops = &netdev_ops;
2653 - dev->destructor = chnl_net_destructor;
2654 + dev->needs_free_netdev = true;
2655 + dev->priv_destructor = chnl_net_destructor;
2656 dev->flags |= IFF_NOARP;
2657 dev->flags |= IFF_POINTOPOINT;
2658 dev->mtu = GPRS_PDP_MTU;
2659 diff --git a/net/core/dev.c b/net/core/dev.c
2660 index 9b5875388c23..9debc1b26ce9 100644
2661 --- a/net/core/dev.c
2662 +++ b/net/core/dev.c
2663 @@ -1251,8 +1251,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
2664 if (!new_ifalias)
2665 return -ENOMEM;
2666 dev->ifalias = new_ifalias;
2667 + memcpy(dev->ifalias, alias, len);
2668 + dev->ifalias[len] = 0;
2669
2670 - strlcpy(dev->ifalias, alias, len+1);
2671 return len;
2672 }
2673
2674 @@ -7345,6 +7346,8 @@ int register_netdevice(struct net_device *dev)
2675 err_uninit:
2676 if (dev->netdev_ops->ndo_uninit)
2677 dev->netdev_ops->ndo_uninit(dev);
2678 + if (dev->priv_destructor)
2679 + dev->priv_destructor(dev);
2680 goto out;
2681 }
2682 EXPORT_SYMBOL(register_netdevice);
2683 @@ -7552,8 +7555,10 @@ void netdev_run_todo(void)
2684 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
2685 WARN_ON(dev->dn_ptr);
2686
2687 - if (dev->destructor)
2688 - dev->destructor(dev);
2689 + if (dev->priv_destructor)
2690 + dev->priv_destructor(dev);
2691 + if (dev->needs_free_netdev)
2692 + free_netdev(dev);
2693
2694 /* Report a network device has been unregistered */
2695 rtnl_lock();
2696 diff --git a/net/core/dst.c b/net/core/dst.c
2697 index 6192f11beec9..13ba4a090c41 100644
2698 --- a/net/core/dst.c
2699 +++ b/net/core/dst.c
2700 @@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
2701 spin_lock_bh(&dst_garbage.lock);
2702 dst = dst_garbage.list;
2703 dst_garbage.list = NULL;
2704 + /* The code in dst_ifdown places a hold on the loopback device.
2705 + * If the gc entry processing is set to expire after a lengthy
2706 + * interval, this hold can cause netdev_wait_allrefs() to hang
2707 + * out and wait for a long time -- until the the loopback
2708 + * interface is released. If we're really unlucky, it'll emit
2709 + * pr_emerg messages to console too. Reset the interval here,
2710 + * so dst cleanups occur in a more timely fashion.
2711 + */
2712 + if (dst_garbage.timer_inc > DST_GC_INC) {
2713 + dst_garbage.timer_inc = DST_GC_INC;
2714 + dst_garbage.timer_expires = DST_GC_MIN;
2715 + mod_delayed_work(system_wq, &dst_gc_work,
2716 + dst_garbage.timer_expires);
2717 + }
2718 spin_unlock_bh(&dst_garbage.lock);
2719
2720 if (last)
2721 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2722 index 8d4a185a8143..05436c02fb7b 100644
2723 --- a/net/core/rtnetlink.c
2724 +++ b/net/core/rtnetlink.c
2725 @@ -934,6 +934,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
2726 + nla_total_size(1) /* IFLA_LINKMODE */
2727 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
2728 + nla_total_size(4) /* IFLA_LINK_NETNSID */
2729 + + nla_total_size(4) /* IFLA_GROUP */
2730 + nla_total_size(ext_filter_mask
2731 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
2732 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
2733 @@ -1127,6 +1128,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
2734 struct ifla_vf_mac vf_mac;
2735 struct ifla_vf_info ivi;
2736
2737 + memset(&ivi, 0, sizeof(ivi));
2738 +
2739 /* Not all SR-IOV capable drivers support the
2740 * spoofcheck and "RSS query enable" query. Preset to
2741 * -1 so the user space tool can detect that the driver
2742 @@ -1135,7 +1138,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
2743 ivi.spoofchk = -1;
2744 ivi.rss_query_en = -1;
2745 ivi.trusted = -1;
2746 - memset(ivi.mac, 0, sizeof(ivi.mac));
2747 /* The default value for VF link state is "auto"
2748 * IFLA_VF_LINK_STATE_AUTO which equals zero
2749 */
2750 @@ -1461,6 +1463,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
2751 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
2752 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
2753 [IFLA_XDP] = { .type = NLA_NESTED },
2754 + [IFLA_GROUP] = { .type = NLA_U32 },
2755 };
2756
2757 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
2758 diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
2759 index b1dc096d22f8..403593bd2b83 100644
2760 --- a/net/decnet/dn_route.c
2761 +++ b/net/decnet/dn_route.c
2762 @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
2763 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
2764 }
2765
2766 -static inline void dnrt_drop(struct dn_route *rt)
2767 -{
2768 - dst_release(&rt->dst);
2769 - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
2770 -}
2771 -
2772 static void dn_dst_check_expire(unsigned long dummy)
2773 {
2774 int i;
2775 @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
2776 }
2777 *rtp = rt->dst.dn_next;
2778 rt->dst.dn_next = NULL;
2779 - dnrt_drop(rt);
2780 + dnrt_free(rt);
2781 break;
2782 }
2783 spin_unlock_bh(&dn_rt_hash_table[i].lock);
2784 @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
2785 dst_use(&rth->dst, now);
2786 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
2787
2788 - dnrt_drop(rt);
2789 + dst_free(&rt->dst);
2790 *rp = rth;
2791 return 0;
2792 }
2793 @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
2794 for(; rt; rt = next) {
2795 next = rcu_dereference_raw(rt->dst.dn_next);
2796 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
2797 - dst_free((struct dst_entry *)rt);
2798 + dnrt_free(rt);
2799 }
2800
2801 nothing_to_declare:
2802 @@ -1187,7 +1181,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
2803 if (dev_out->flags & IFF_LOOPBACK)
2804 flags |= RTCF_LOCAL;
2805
2806 - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
2807 + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
2808 if (rt == NULL)
2809 goto e_nobufs;
2810
2811 diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
2812 index 85f2fdc360c2..29246bc9a7b4 100644
2813 --- a/net/decnet/netfilter/dn_rtmsg.c
2814 +++ b/net/decnet/netfilter/dn_rtmsg.c
2815 @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
2816 {
2817 struct nlmsghdr *nlh = nlmsg_hdr(skb);
2818
2819 - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
2820 + if (skb->len < sizeof(*nlh) ||
2821 + nlh->nlmsg_len < sizeof(*nlh) ||
2822 + skb->len < nlh->nlmsg_len)
2823 return;
2824
2825 if (!netlink_capable(skb, CAP_NET_ADMIN))
2826 diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
2827 index c73160fb11e7..0a0a392dc2bd 100644
2828 --- a/net/hsr/hsr_device.c
2829 +++ b/net/hsr/hsr_device.c
2830 @@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
2831 del_timer_sync(&hsr->announce_timer);
2832
2833 synchronize_rcu();
2834 - free_netdev(hsr_dev);
2835 }
2836
2837 static const struct net_device_ops hsr_device_ops = {
2838 @@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
2839 SET_NETDEV_DEVTYPE(dev, &hsr_type);
2840 dev->priv_flags |= IFF_NO_QUEUE;
2841
2842 - dev->destructor = hsr_dev_destroy;
2843 + dev->needs_free_netdev = true;
2844 + dev->priv_destructor = hsr_dev_destroy;
2845
2846 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
2847 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
2848 diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
2849 index d7efbf0dad20..0a866f332290 100644
2850 --- a/net/ieee802154/6lowpan/core.c
2851 +++ b/net/ieee802154/6lowpan/core.c
2852 @@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
2853
2854 ldev->netdev_ops = &lowpan_netdev_ops;
2855 ldev->header_ops = &lowpan_header_ops;
2856 - ldev->destructor = free_netdev;
2857 + ldev->needs_free_netdev = true;
2858 ldev->features |= NETIF_F_NETNS_LOCAL;
2859 }
2860
2861 diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
2862 index b1e24446e297..9708a32b1f26 100644
2863 --- a/net/ipv4/esp4.c
2864 +++ b/net/ipv4/esp4.c
2865 @@ -212,6 +212,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
2866 u8 *iv;
2867 u8 *tail;
2868 u8 *vaddr;
2869 + int esph_offset;
2870 int blksize;
2871 int clen;
2872 int alen;
2873 @@ -392,12 +393,14 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
2874 }
2875
2876 cow:
2877 + esph_offset = (unsigned char *)esph - skb_transport_header(skb);
2878 +
2879 err = skb_cow_data(skb, tailen, &trailer);
2880 if (err < 0)
2881 goto error;
2882 nfrags = err;
2883 tail = skb_tail_pointer(trailer);
2884 - esph = ip_esp_hdr(skb);
2885 + esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
2886
2887 skip_cow:
2888 esp_output_fill_trailer(tail, tfclen, plen, proto);
2889 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
2890 index fc310db2708b..760311fd7c5b 100644
2891 --- a/net/ipv4/icmp.c
2892 +++ b/net/ipv4/icmp.c
2893 @@ -674,8 +674,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
2894 /* Needed by both icmp_global_allow and icmp_xmit_lock */
2895 local_bh_disable();
2896
2897 - /* Check global sysctl_icmp_msgs_per_sec ratelimit */
2898 - if (!icmpv4_global_allow(net, type, code))
2899 + /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
2900 + * incoming dev is loopback. If outgoing dev change to not be
2901 + * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
2902 + */
2903 + if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
2904 + !icmpv4_global_allow(net, type, code))
2905 goto out_bh_enable;
2906
2907 sk = icmp_xmit_lock(net);
2908 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2909 index 44fd86de2823..ec9a396fa466 100644
2910 --- a/net/ipv4/igmp.c
2911 +++ b/net/ipv4/igmp.c
2912 @@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
2913 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
2914 if (!pmc)
2915 return;
2916 + spin_lock_init(&pmc->lock);
2917 spin_lock_bh(&im->lock);
2918 pmc->interface = im->interface;
2919 in_dev_hold(in_dev);
2920 @@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2921
2922 static void ip_mc_clear_src(struct ip_mc_list *pmc)
2923 {
2924 - struct ip_sf_list *psf, *nextpsf;
2925 + struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
2926
2927 - for (psf = pmc->tomb; psf; psf = nextpsf) {
2928 + spin_lock_bh(&pmc->lock);
2929 + tomb = pmc->tomb;
2930 + pmc->tomb = NULL;
2931 + sources = pmc->sources;
2932 + pmc->sources = NULL;
2933 + pmc->sfmode = MCAST_EXCLUDE;
2934 + pmc->sfcount[MCAST_INCLUDE] = 0;
2935 + pmc->sfcount[MCAST_EXCLUDE] = 1;
2936 + spin_unlock_bh(&pmc->lock);
2937 +
2938 + for (psf = tomb; psf; psf = nextpsf) {
2939 nextpsf = psf->sf_next;
2940 kfree(psf);
2941 }
2942 - pmc->tomb = NULL;
2943 - for (psf = pmc->sources; psf; psf = nextpsf) {
2944 + for (psf = sources; psf; psf = nextpsf) {
2945 nextpsf = psf->sf_next;
2946 kfree(psf);
2947 }
2948 - pmc->sources = NULL;
2949 - pmc->sfmode = MCAST_EXCLUDE;
2950 - pmc->sfcount[MCAST_INCLUDE] = 0;
2951 - pmc->sfcount[MCAST_EXCLUDE] = 1;
2952 }
2953
2954 /* Join a multicast group
2955 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2956 index 823abaef006b..b2d1be55ca94 100644
2957 --- a/net/ipv4/ip_tunnel.c
2958 +++ b/net/ipv4/ip_tunnel.c
2959 @@ -961,7 +961,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
2960 gro_cells_destroy(&tunnel->gro_cells);
2961 dst_cache_destroy(&tunnel->dst_cache);
2962 free_percpu(dev->tstats);
2963 - free_netdev(dev);
2964 }
2965
2966 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
2967 @@ -1148,7 +1147,8 @@ int ip_tunnel_init(struct net_device *dev)
2968 struct iphdr *iph = &tunnel->parms.iph;
2969 int err;
2970
2971 - dev->destructor = ip_tunnel_dev_free;
2972 + dev->needs_free_netdev = true;
2973 + dev->priv_destructor = ip_tunnel_dev_free;
2974 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2975 if (!dev->tstats)
2976 return -ENOMEM;
2977 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
2978 index b036e85e093b..96c5681abb05 100644
2979 --- a/net/ipv4/ipmr.c
2980 +++ b/net/ipv4/ipmr.c
2981 @@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
2982 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
2983 dev->flags = IFF_NOARP;
2984 dev->netdev_ops = &reg_vif_netdev_ops;
2985 - dev->destructor = free_netdev;
2986 + dev->needs_free_netdev = true;
2987 dev->features |= NETIF_F_NETNS_LOCAL;
2988 }
2989
2990 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2991 index 311f45641673..9725e8faf56d 100644
2992 --- a/net/ipv6/addrconf.c
2993 +++ b/net/ipv6/addrconf.c
2994 @@ -328,9 +328,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
2995 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
2996 unsigned long delay)
2997 {
2998 - if (!delayed_work_pending(&ifp->dad_work))
2999 - in6_ifa_hold(ifp);
3000 - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
3001 + in6_ifa_hold(ifp);
3002 + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
3003 + in6_ifa_put(ifp);
3004 }
3005
3006 static int snmp6_alloc_dev(struct inet6_dev *idev)
3007 diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
3008 index eea23b57c6a5..ec849d88a662 100644
3009 --- a/net/ipv6/fib6_rules.c
3010 +++ b/net/ipv6/fib6_rules.c
3011 @@ -32,7 +32,6 @@ struct fib6_rule {
3012 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
3013 int flags, pol_lookup_t lookup)
3014 {
3015 - struct rt6_info *rt;
3016 struct fib_lookup_arg arg = {
3017 .lookup_ptr = lookup,
3018 .flags = FIB_LOOKUP_NOREF,
3019 @@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
3020 fib_rules_lookup(net->ipv6.fib6_rules_ops,
3021 flowi6_to_flowi(fl6), flags, &arg);
3022
3023 - rt = arg.result;
3024 + if (arg.result)
3025 + return arg.result;
3026
3027 - if (!rt) {
3028 - dst_hold(&net->ipv6.ip6_null_entry->dst);
3029 - return &net->ipv6.ip6_null_entry->dst;
3030 - }
3031 -
3032 - if (rt->rt6i_flags & RTF_REJECT &&
3033 - rt->dst.error == -EAGAIN) {
3034 - ip6_rt_put(rt);
3035 - rt = net->ipv6.ip6_null_entry;
3036 - dst_hold(&rt->dst);
3037 - }
3038 -
3039 - return &rt->dst;
3040 + dst_hold(&net->ipv6.ip6_null_entry->dst);
3041 + return &net->ipv6.ip6_null_entry->dst;
3042 }
3043
3044 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
3045 @@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
3046 flp6->saddr = saddr;
3047 }
3048 err = rt->dst.error;
3049 - goto out;
3050 + if (err != -EAGAIN)
3051 + goto out;
3052 }
3053 again:
3054 ip6_rt_put(rt);
3055 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
3056 index 230b5aac9f03..8d7b113958b1 100644
3057 --- a/net/ipv6/icmp.c
3058 +++ b/net/ipv6/icmp.c
3059 @@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
3060 local_bh_disable();
3061
3062 /* Check global sysctl_icmp_msgs_per_sec ratelimit */
3063 - if (!icmpv6_global_allow(type))
3064 + if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
3065 goto out_bh_enable;
3066
3067 mip6_addr_swap(skb);
3068 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
3069 index d4bf2c68a545..e6b78ba0e636 100644
3070 --- a/net/ipv6/ip6_fib.c
3071 +++ b/net/ipv6/ip6_fib.c
3072 @@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
3073 struct rt6_info *rt;
3074
3075 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
3076 - if (rt->rt6i_flags & RTF_REJECT &&
3077 - rt->dst.error == -EAGAIN) {
3078 + if (rt->dst.error == -EAGAIN) {
3079 ip6_rt_put(rt);
3080 rt = net->ipv6.ip6_null_entry;
3081 dst_hold(&rt->dst);
3082 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3083 index 4d60164c17e2..0f1c24935b7a 100644
3084 --- a/net/ipv6/ip6_gre.c
3085 +++ b/net/ipv6/ip6_gre.c
3086 @@ -986,13 +986,13 @@ static void ip6gre_dev_free(struct net_device *dev)
3087
3088 dst_cache_destroy(&t->dst_cache);
3089 free_percpu(dev->tstats);
3090 - free_netdev(dev);
3091 }
3092
3093 static void ip6gre_tunnel_setup(struct net_device *dev)
3094 {
3095 dev->netdev_ops = &ip6gre_netdev_ops;
3096 - dev->destructor = ip6gre_dev_free;
3097 + dev->needs_free_netdev = true;
3098 + dev->priv_destructor = ip6gre_dev_free;
3099
3100 dev->type = ARPHRD_IP6GRE;
3101
3102 @@ -1143,7 +1143,7 @@ static int __net_init ip6gre_init_net(struct net *net)
3103 return 0;
3104
3105 err_reg_dev:
3106 - ip6gre_dev_free(ign->fb_tunnel_dev);
3107 + free_netdev(ign->fb_tunnel_dev);
3108 err_alloc_dev:
3109 return err;
3110 }
3111 @@ -1292,7 +1292,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
3112 ether_setup(dev);
3113
3114 dev->netdev_ops = &ip6gre_tap_netdev_ops;
3115 - dev->destructor = ip6gre_dev_free;
3116 + dev->needs_free_netdev = true;
3117 + dev->priv_destructor = ip6gre_dev_free;
3118
3119 dev->features |= NETIF_F_NETNS_LOCAL;
3120 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3121 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3122 index e2d7867f3112..292f24c48c94 100644
3123 --- a/net/ipv6/ip6_tunnel.c
3124 +++ b/net/ipv6/ip6_tunnel.c
3125 @@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
3126 gro_cells_destroy(&t->gro_cells);
3127 dst_cache_destroy(&t->dst_cache);
3128 free_percpu(dev->tstats);
3129 - free_netdev(dev);
3130 }
3131
3132 static int ip6_tnl_create2(struct net_device *dev)
3133 @@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
3134 return t;
3135
3136 failed_free:
3137 - ip6_dev_free(dev);
3138 + free_netdev(dev);
3139 failed:
3140 return ERR_PTR(err);
3141 }
3142 @@ -1772,7 +1771,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
3143 static void ip6_tnl_dev_setup(struct net_device *dev)
3144 {
3145 dev->netdev_ops = &ip6_tnl_netdev_ops;
3146 - dev->destructor = ip6_dev_free;
3147 + dev->needs_free_netdev = true;
3148 + dev->priv_destructor = ip6_dev_free;
3149
3150 dev->type = ARPHRD_TUNNEL6;
3151 dev->flags |= IFF_NOARP;
3152 @@ -2211,7 +2211,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
3153 return 0;
3154
3155 err_register:
3156 - ip6_dev_free(ip6n->fb_tnl_dev);
3157 + free_netdev(ip6n->fb_tnl_dev);
3158 err_alloc_dev:
3159 return err;
3160 }
3161 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
3162 index 3d8a3b63b4fd..5659f4a40f38 100644
3163 --- a/net/ipv6/ip6_vti.c
3164 +++ b/net/ipv6/ip6_vti.c
3165 @@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
3166 static void vti6_dev_free(struct net_device *dev)
3167 {
3168 free_percpu(dev->tstats);
3169 - free_netdev(dev);
3170 }
3171
3172 static int vti6_tnl_create2(struct net_device *dev)
3173 @@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
3174 return t;
3175
3176 failed_free:
3177 - vti6_dev_free(dev);
3178 + free_netdev(dev);
3179 failed:
3180 return NULL;
3181 }
3182 @@ -841,7 +840,8 @@ static const struct net_device_ops vti6_netdev_ops = {
3183 static void vti6_dev_setup(struct net_device *dev)
3184 {
3185 dev->netdev_ops = &vti6_netdev_ops;
3186 - dev->destructor = vti6_dev_free;
3187 + dev->needs_free_netdev = true;
3188 + dev->priv_destructor = vti6_dev_free;
3189
3190 dev->type = ARPHRD_TUNNEL6;
3191 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
3192 @@ -1092,7 +1092,7 @@ static int __net_init vti6_init_net(struct net *net)
3193 return 0;
3194
3195 err_register:
3196 - vti6_dev_free(ip6n->fb_tnl_dev);
3197 + free_netdev(ip6n->fb_tnl_dev);
3198 err_alloc_dev:
3199 return err;
3200 }
3201 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
3202 index bf34d0950752..576fd9332235 100644
3203 --- a/net/ipv6/ip6mr.c
3204 +++ b/net/ipv6/ip6mr.c
3205 @@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
3206 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
3207 dev->flags = IFF_NOARP;
3208 dev->netdev_ops = &reg_vif_netdev_ops;
3209 - dev->destructor = free_netdev;
3210 + dev->needs_free_netdev = true;
3211 dev->features |= NETIF_F_NETNS_LOCAL;
3212 }
3213
3214 diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
3215 index cc8e3ae9ca73..e88bcb8ff0fd 100644
3216 --- a/net/ipv6/proc.c
3217 +++ b/net/ipv6/proc.c
3218 @@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
3219 u64 buff64[SNMP_MIB_MAX];
3220 int i;
3221
3222 - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
3223 + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
3224
3225 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
3226 for (i = 0; itemlist[i].name; i++)
3227 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3228 index d316d00e11ab..1072fc189708 100644
3229 --- a/net/ipv6/route.c
3230 +++ b/net/ipv6/route.c
3231 @@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
3232 if ((rt->dst.dev == dev || !dev) &&
3233 rt != adn->net->ipv6.ip6_null_entry &&
3234 (rt->rt6i_nsiblings == 0 ||
3235 + (dev && netdev_unregistering(dev)) ||
3236 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3237 return -1;
3238
3239 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3240 index 99853c6e33a8..4f07a211ac2a 100644
3241 --- a/net/ipv6/sit.c
3242 +++ b/net/ipv6/sit.c
3243 @@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
3244 return nt;
3245
3246 failed_free:
3247 - ipip6_dev_free(dev);
3248 + free_netdev(dev);
3249 failed:
3250 return NULL;
3251 }
3252 @@ -1333,7 +1333,6 @@ static void ipip6_dev_free(struct net_device *dev)
3253
3254 dst_cache_destroy(&tunnel->dst_cache);
3255 free_percpu(dev->tstats);
3256 - free_netdev(dev);
3257 }
3258
3259 #define SIT_FEATURES (NETIF_F_SG | \
3260 @@ -1348,7 +1347,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
3261 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
3262
3263 dev->netdev_ops = &ipip6_netdev_ops;
3264 - dev->destructor = ipip6_dev_free;
3265 + dev->needs_free_netdev = true;
3266 + dev->priv_destructor = ipip6_dev_free;
3267
3268 dev->type = ARPHRD_SIT;
3269 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
3270 diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
3271 index 08a807b29298..3ef5d913e7a3 100644
3272 --- a/net/ipv6/xfrm6_input.c
3273 +++ b/net/ipv6/xfrm6_input.c
3274 @@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
3275 return 1;
3276 #endif
3277
3278 - ipv6_hdr(skb)->payload_len = htons(skb->len);
3279 __skb_push(skb, skb->data - skb_network_header(skb));
3280 + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
3281
3282 if (xo && (xo->flags & XFRM_GRO)) {
3283 skb_mac_header_rebuild(skb);
3284 diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
3285 index 74d09f91709e..3be852808a9d 100644
3286 --- a/net/irda/irlan/irlan_eth.c
3287 +++ b/net/irda/irlan/irlan_eth.c
3288 @@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
3289 ether_setup(dev);
3290
3291 dev->netdev_ops = &irlan_eth_netdev_ops;
3292 - dev->destructor = free_netdev;
3293 + dev->needs_free_netdev = true;
3294 dev->min_mtu = 0;
3295 dev->max_mtu = ETH_MAX_MTU;
3296
3297 diff --git a/net/key/af_key.c b/net/key/af_key.c
3298 index be8cecc65002..358175c83180 100644
3299 --- a/net/key/af_key.c
3300 +++ b/net/key/af_key.c
3301 @@ -1157,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
3302 goto out;
3303 }
3304
3305 + err = -ENOBUFS;
3306 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
3307 if (sa->sadb_sa_auth) {
3308 int keysize = 0;
3309 @@ -1168,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
3310 if (key)
3311 keysize = (key->sadb_key_bits + 7) / 8;
3312 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
3313 - if (!x->aalg)
3314 + if (!x->aalg) {
3315 + err = -ENOMEM;
3316 goto out;
3317 + }
3318 strcpy(x->aalg->alg_name, a->name);
3319 x->aalg->alg_key_len = 0;
3320 if (key) {
3321 @@ -1188,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
3322 goto out;
3323 }
3324 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
3325 - if (!x->calg)
3326 + if (!x->calg) {
3327 + err = -ENOMEM;
3328 goto out;
3329 + }
3330 strcpy(x->calg->alg_name, a->name);
3331 x->props.calgo = sa->sadb_sa_encrypt;
3332 } else {
3333 @@ -1203,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
3334 if (key)
3335 keysize = (key->sadb_key_bits + 7) / 8;
3336 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
3337 - if (!x->ealg)
3338 + if (!x->ealg) {
3339 + err = -ENOMEM;
3340 goto out;
3341 + }
3342 strcpy(x->ealg->alg_name, a->name);
3343 x->ealg->alg_key_len = 0;
3344 if (key) {
3345 @@ -1249,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
3346 struct xfrm_encap_tmpl *natt;
3347
3348 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
3349 - if (!x->encap)
3350 + if (!x->encap) {
3351 + err = -ENOMEM;
3352 goto out;
3353 + }
3354
3355 natt = x->encap;
3356 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
3357 @@ -2755,6 +2764,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
3358 int err, err2;
3359
3360 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
3361 + if (!err)
3362 + xfrm_garbage_collect(net);
3363 err2 = unicast_flush_resp(sk, hdr);
3364 if (err || err2) {
3365 if (err == -ESRCH) /* empty table - old silent behavior */
3366 diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
3367 index 6fd41d7afe1e..cafb667b3c85 100644
3368 --- a/net/l2tp/l2tp_eth.c
3369 +++ b/net/l2tp/l2tp_eth.c
3370 @@ -133,7 +133,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
3371 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3372 dev->features |= NETIF_F_LLTX;
3373 dev->netdev_ops = &l2tp_eth_netdev_ops;
3374 - dev->destructor = free_netdev;
3375 + dev->needs_free_netdev = true;
3376 }
3377
3378 static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
3379 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
3380 index 5bb0c5012819..8f783d527f29 100644
3381 --- a/net/mac80211/iface.c
3382 +++ b/net/mac80211/iface.c
3383 @@ -1198,7 +1198,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
3384 static void ieee80211_if_free(struct net_device *dev)
3385 {
3386 free_percpu(dev->tstats);
3387 - free_netdev(dev);
3388 }
3389
3390 static void ieee80211_if_setup(struct net_device *dev)
3391 @@ -1206,7 +1205,8 @@ static void ieee80211_if_setup(struct net_device *dev)
3392 ether_setup(dev);
3393 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3394 dev->netdev_ops = &ieee80211_dataif_ops;
3395 - dev->destructor = ieee80211_if_free;
3396 + dev->needs_free_netdev = true;
3397 + dev->priv_destructor = ieee80211_if_free;
3398 }
3399
3400 static void ieee80211_if_setup_no_queue(struct net_device *dev)
3401 @@ -1810,6 +1810,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
3402 ret = dev_alloc_name(ndev, ndev->name);
3403 if (ret < 0) {
3404 ieee80211_if_free(ndev);
3405 + free_netdev(ndev);
3406 return ret;
3407 }
3408
3409 @@ -1899,7 +1900,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
3410
3411 ret = register_netdevice(ndev);
3412 if (ret) {
3413 - ieee80211_if_free(ndev);
3414 + free_netdev(ndev);
3415 return ret;
3416 }
3417 }
3418 diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
3419 index 06019dba4b10..bd88a9b80773 100644
3420 --- a/net/mac802154/iface.c
3421 +++ b/net/mac802154/iface.c
3422 @@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
3423 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
3424
3425 mac802154_llsec_destroy(&sdata->sec);
3426 -
3427 - free_netdev(dev);
3428 }
3429
3430 static void ieee802154_if_setup(struct net_device *dev)
3431 @@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
3432 sdata->dev->dev_addr);
3433
3434 sdata->dev->header_ops = &mac802154_header_ops;
3435 - sdata->dev->destructor = mac802154_wpan_free;
3436 + sdata->dev->needs_free_netdev = true;
3437 + sdata->dev->priv_destructor = mac802154_wpan_free;
3438 sdata->dev->netdev_ops = &mac802154_wpan_ops;
3439 sdata->dev->ml_priv = &mac802154_mlme_wpan;
3440 wpan_dev->promiscuous_mode = false;
3441 @@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
3442
3443 break;
3444 case NL802154_IFTYPE_MONITOR:
3445 - sdata->dev->destructor = free_netdev;
3446 + sdata->dev->needs_free_netdev = true;
3447 sdata->dev->netdev_ops = &mac802154_monitor_ops;
3448 wpan_dev->promiscuous_mode = true;
3449 break;
3450 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
3451 index dc7dfd68fafe..8555659d2e07 100644
3452 --- a/net/netfilter/nf_conntrack_netlink.c
3453 +++ b/net/netfilter/nf_conntrack_netlink.c
3454 @@ -45,6 +45,8 @@
3455 #include <net/netfilter/nf_conntrack_zones.h>
3456 #include <net/netfilter/nf_conntrack_timestamp.h>
3457 #include <net/netfilter/nf_conntrack_labels.h>
3458 +#include <net/netfilter/nf_conntrack_seqadj.h>
3459 +#include <net/netfilter/nf_conntrack_synproxy.h>
3460 #ifdef CONFIG_NF_NAT_NEEDED
3461 #include <net/netfilter/nf_nat_core.h>
3462 #include <net/netfilter/nf_nat_l4proto.h>
3463 @@ -1814,6 +1816,8 @@ ctnetlink_create_conntrack(struct net *net,
3464 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
3465 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
3466 nf_ct_labels_ext_add(ct);
3467 + nfct_seqadj_ext_add(ct);
3468 + nfct_synproxy_ext_add(ct);
3469
3470 /* we must add conntrack extensions before confirmation. */
3471 ct->status |= IPS_CONFIRMED;
3472 diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
3473 index 89193a634da4..04a3128adcf0 100644
3474 --- a/net/openvswitch/vport-internal_dev.c
3475 +++ b/net/openvswitch/vport-internal_dev.c
3476 @@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
3477 struct vport *vport = ovs_internal_dev_get_vport(dev);
3478
3479 ovs_vport_free(vport);
3480 - free_netdev(dev);
3481 }
3482
3483 static void
3484 @@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
3485 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
3486 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
3487 IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
3488 - netdev->destructor = internal_dev_destructor;
3489 + netdev->needs_free_netdev = true;
3490 + netdev->priv_destructor = internal_dev_destructor;
3491 netdev->ethtool_ops = &internal_dev_ethtool_ops;
3492 netdev->rtnl_link_ops = &internal_dev_link_ops;
3493
3494 diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
3495 index 21c28b51be94..2c9337946e30 100644
3496 --- a/net/phonet/pep-gprs.c
3497 +++ b/net/phonet/pep-gprs.c
3498 @@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
3499 dev->tx_queue_len = 10;
3500
3501 dev->netdev_ops = &gprs_netdev_ops;
3502 - dev->destructor = free_netdev;
3503 + dev->needs_free_netdev = true;
3504 }
3505
3506 /*
3507 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3508 index d9d4c92e06b3..74f761022450 100644
3509 --- a/net/sctp/socket.c
3510 +++ b/net/sctp/socket.c
3511 @@ -4586,13 +4586,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
3512
3513 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
3514 hash++, head++) {
3515 - read_lock(&head->lock);
3516 + read_lock_bh(&head->lock);
3517 sctp_for_each_hentry(epb, &head->chain) {
3518 err = cb(sctp_ep(epb), p);
3519 if (err)
3520 break;
3521 }
3522 - read_unlock(&head->lock);
3523 + read_unlock_bh(&head->lock);
3524 }
3525
3526 return err;
3527 @@ -4630,9 +4630,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
3528 if (err)
3529 return err;
3530
3531 - sctp_transport_get_idx(net, &hti, pos);
3532 - obj = sctp_transport_get_next(net, &hti);
3533 - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
3534 + obj = sctp_transport_get_idx(net, &hti, pos + 1);
3535 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
3536 struct sctp_transport *transport = obj;
3537
3538 if (!sctp_transport_hold(transport))
3539 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
3540 index 312ef7de57d7..ab3087687a32 100644
3541 --- a/net/tipc/msg.c
3542 +++ b/net/tipc/msg.c
3543 @@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
3544 }
3545
3546 if (skb_cloned(_skb) &&
3547 - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
3548 + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
3549 goto exit;
3550
3551 /* Now reverse the concerned fields */
3552 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3553 index 928691c43408..475e1170597c 100644
3554 --- a/net/unix/af_unix.c
3555 +++ b/net/unix/af_unix.c
3556 @@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3557 struct path path = { NULL, NULL };
3558
3559 err = -EINVAL;
3560 - if (sunaddr->sun_family != AF_UNIX)
3561 + if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
3562 + sunaddr->sun_family != AF_UNIX)
3563 goto out;
3564
3565 if (addr_len == sizeof(short)) {
3566 @@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
3567 unsigned int hash;
3568 int err;
3569
3570 + err = -EINVAL;
3571 + if (alen < offsetofend(struct sockaddr, sa_family))
3572 + goto out;
3573 +
3574 if (addr->sa_family != AF_UNSPEC) {
3575 err = unix_mkname(sunaddr, alen, &hash);
3576 if (err < 0)
3577 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3578 index dfc77b9c5e5a..56fba8f073f5 100644
3579 --- a/net/xfrm/xfrm_policy.c
3580 +++ b/net/xfrm/xfrm_policy.c
3581 @@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
3582 err = -ESRCH;
3583 out:
3584 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3585 -
3586 - if (cnt)
3587 - xfrm_garbage_collect(net);
3588 -
3589 return err;
3590 }
3591 EXPORT_SYMBOL(xfrm_policy_flush);
3592 @@ -1797,43 +1793,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
3593 goto out;
3594 }
3595
3596 -#ifdef CONFIG_XFRM_SUB_POLICY
3597 -static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
3598 -{
3599 - if (!*target) {
3600 - *target = kmalloc(size, GFP_ATOMIC);
3601 - if (!*target)
3602 - return -ENOMEM;
3603 - }
3604 -
3605 - memcpy(*target, src, size);
3606 - return 0;
3607 -}
3608 -#endif
3609 -
3610 -static int xfrm_dst_update_parent(struct dst_entry *dst,
3611 - const struct xfrm_selector *sel)
3612 -{
3613 -#ifdef CONFIG_XFRM_SUB_POLICY
3614 - struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3615 - return xfrm_dst_alloc_copy((void **)&(xdst->partner),
3616 - sel, sizeof(*sel));
3617 -#else
3618 - return 0;
3619 -#endif
3620 -}
3621 -
3622 -static int xfrm_dst_update_origin(struct dst_entry *dst,
3623 - const struct flowi *fl)
3624 -{
3625 -#ifdef CONFIG_XFRM_SUB_POLICY
3626 - struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3627 - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
3628 -#else
3629 - return 0;
3630 -#endif
3631 -}
3632 -
3633 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
3634 struct xfrm_policy **pols,
3635 int *num_pols, int *num_xfrms)
3636 @@ -1905,16 +1864,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
3637
3638 xdst = (struct xfrm_dst *)dst;
3639 xdst->num_xfrms = err;
3640 - if (num_pols > 1)
3641 - err = xfrm_dst_update_parent(dst, &pols[1]->selector);
3642 - else
3643 - err = xfrm_dst_update_origin(dst, fl);
3644 - if (unlikely(err)) {
3645 - dst_free(dst);
3646 - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
3647 - return ERR_PTR(err);
3648 - }
3649 -
3650 xdst->num_pols = num_pols;
3651 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3652 xdst->policy_genid = atomic_read(&pols[0]->genid);
3653 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
3654 index 40a8aa39220d..66698552fbd6 100644
3655 --- a/net/xfrm/xfrm_user.c
3656 +++ b/net/xfrm/xfrm_user.c
3657 @@ -1999,6 +1999,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
3658 return 0;
3659 return err;
3660 }
3661 + xfrm_garbage_collect(net);
3662
3663 c.data.type = type;
3664 c.event = nlh->nlmsg_type;
3665 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
3666 index f17f25245e52..a206320b8eea 100644
3667 --- a/sound/pci/hda/hda_codec.h
3668 +++ b/sound/pci/hda/hda_codec.h
3669 @@ -294,6 +294,8 @@ struct hda_codec {
3670
3671 #define list_for_each_codec(c, bus) \
3672 list_for_each_entry(c, &(bus)->core.codec_list, core.list)
3673 +#define list_for_each_codec_safe(c, n, bus) \
3674 + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
3675
3676 /* snd_hda_codec_read/write optional flags */
3677 #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
3678 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
3679 index 3715a5725613..1c60beb5b70a 100644
3680 --- a/sound/pci/hda/hda_controller.c
3681 +++ b/sound/pci/hda/hda_controller.c
3682 @@ -1337,8 +1337,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
3683 /* configure each codec instance */
3684 int azx_codec_configure(struct azx *chip)
3685 {
3686 - struct hda_codec *codec;
3687 - list_for_each_codec(codec, &chip->bus) {
3688 + struct hda_codec *codec, *next;
3689 +
3690 + /* use _safe version here since snd_hda_codec_configure() deregisters
3691 + * the device upon error and deletes itself from the bus list.
3692 + */
3693 + list_for_each_codec_safe(codec, next, &chip->bus) {
3694 snd_hda_codec_configure(codec);
3695 }
3696 return 0;
3697 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
3698 index e7c8f4f076d5..b0bd29003b5d 100644
3699 --- a/sound/pci/hda/hda_generic.c
3700 +++ b/sound/pci/hda/hda_generic.c
3701 @@ -3169,6 +3169,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
3702 spec->input_paths[i][nums]);
3703 spec->input_paths[i][nums] =
3704 spec->input_paths[i][n];
3705 + spec->input_paths[i][n] = 0;
3706 }
3707 }
3708 nums++;
3709 diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
3710 index 49e6ebac7e73..98dcc112b363 100644
3711 --- a/tools/arch/x86/lib/memcpy_64.S
3712 +++ b/tools/arch/x86/lib/memcpy_64.S
3713 @@ -286,7 +286,7 @@ ENDPROC(memcpy_mcsafe_unrolled)
3714 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
3715 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
3716 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
3717 - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
3718 + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
3719 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
3720 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
3721 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)