Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0151-3.10.52-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2639 - (show annotations) (download)
Tue Jul 21 16:20:19 2015 UTC (8 years, 9 months ago) by niro
File size: 50714 byte(s)
-linux-3.10.52
1 diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
2 index 881582f75c9c..bd4370487b07 100644
3 --- a/Documentation/x86/x86_64/mm.txt
4 +++ b/Documentation/x86/x86_64/mm.txt
5 @@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
6 ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
7 ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
8 ... unused hole ...
9 +ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
10 +... unused hole ...
11 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
12 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
13 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
14 diff --git a/Makefile b/Makefile
15 index f9f6ee59c61a..b94f00938acc 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 10
21 -SUBLEVEL = 51
22 +SUBLEVEL = 52
23 EXTRAVERSION =
24 NAME = TOSSUG Baby Fish
25
26 diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
27 index 83cb3ac27095..c61d2373408c 100644
28 --- a/arch/arm/mm/idmap.c
29 +++ b/arch/arm/mm/idmap.c
30 @@ -24,6 +24,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
31 pr_warning("Failed to allocate identity pmd.\n");
32 return;
33 }
34 + /*
35 + * Copy the original PMD to ensure that the PMD entries for
36 + * the kernel image are preserved.
37 + */
38 + if (!pud_none(*pud))
39 + memcpy(pmd, pmd_offset(pud, 0),
40 + PTRS_PER_PMD * sizeof(pmd_t));
41 pud_populate(&init_mm, pud, pmd);
42 pmd += pmd_index(addr);
43 } else
44 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
45 index af88b27ce313..a649cb686692 100644
46 --- a/arch/x86/Kconfig
47 +++ b/arch/x86/Kconfig
48 @@ -952,10 +952,27 @@ config VM86
49 default y
50 depends on X86_32
51 ---help---
52 - This option is required by programs like DOSEMU to run 16-bit legacy
53 - code on X86 processors. It also may be needed by software like
54 - XFree86 to initialize some video cards via BIOS. Disabling this
55 - option saves about 6k.
56 + This option is required by programs like DOSEMU to run
57 + 16-bit real mode legacy code on x86 processors. It also may
58 + be needed by software like XFree86 to initialize some video
59 + cards via BIOS. Disabling this option saves about 6K.
60 +
61 +config X86_16BIT
62 + bool "Enable support for 16-bit segments" if EXPERT
63 + default y
64 + ---help---
65 + This option is required by programs like Wine to run 16-bit
66 + protected mode legacy code on x86 processors. Disabling
67 + this option saves about 300 bytes on i386, or around 6K text
68 + plus 16K runtime memory on x86-64,
69 +
70 +config X86_ESPFIX32
71 + def_bool y
72 + depends on X86_16BIT && X86_32
73 +
74 +config X86_ESPFIX64
75 + def_bool y
76 + depends on X86_16BIT && X86_64
77
78 config TOSHIBA
79 tristate "Toshiba Laptop support"
80 diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
81 new file mode 100644
82 index 000000000000..99efebb2f69d
83 --- /dev/null
84 +++ b/arch/x86/include/asm/espfix.h
85 @@ -0,0 +1,16 @@
86 +#ifndef _ASM_X86_ESPFIX_H
87 +#define _ASM_X86_ESPFIX_H
88 +
89 +#ifdef CONFIG_X86_64
90 +
91 +#include <asm/percpu.h>
92 +
93 +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
94 +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
95 +
96 +extern void init_espfix_bsp(void);
97 +extern void init_espfix_ap(void);
98 +
99 +#endif /* CONFIG_X86_64 */
100 +
101 +#endif /* _ASM_X86_ESPFIX_H */
102 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
103 index bba3cf88e624..0a8b519226b8 100644
104 --- a/arch/x86/include/asm/irqflags.h
105 +++ b/arch/x86/include/asm/irqflags.h
106 @@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
107
108 #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
109
110 -#define INTERRUPT_RETURN iretq
111 +#define INTERRUPT_RETURN jmp native_iret
112 #define USERGS_SYSRET64 \
113 swapgs; \
114 sysretq;
115 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
116 index 2d883440cb9a..b1609f2c524c 100644
117 --- a/arch/x86/include/asm/pgtable_64_types.h
118 +++ b/arch/x86/include/asm/pgtable_64_types.h
119 @@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
120 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
121 #define MODULES_END _AC(0xffffffffff000000, UL)
122 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
123 +#define ESPFIX_PGD_ENTRY _AC(-2, UL)
124 +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
125
126 #define EARLY_DYNAMIC_PAGE_TABLES 64
127
128 diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
129 index b7bf3505e1ec..2e327f114a1b 100644
130 --- a/arch/x86/include/asm/setup.h
131 +++ b/arch/x86/include/asm/setup.h
132 @@ -62,6 +62,8 @@ static inline void x86_ce4100_early_setup(void) { }
133
134 #ifndef _SETUP
135
136 +#include <asm/espfix.h>
137 +
138 /*
139 * This is set up by the setup-routine at boot-time
140 */
141 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
142 index 7bd3bd310106..111eb356dbea 100644
143 --- a/arch/x86/kernel/Makefile
144 +++ b/arch/x86/kernel/Makefile
145 @@ -27,6 +27,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
146 obj-y += syscall_$(BITS).o
147 obj-$(CONFIG_X86_64) += vsyscall_64.o
148 obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
149 +obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
150 obj-y += bootflag.o e820.o
151 obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
152 obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
153 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
154 index 08fa44443a01..5c38e2b298cd 100644
155 --- a/arch/x86/kernel/entry_32.S
156 +++ b/arch/x86/kernel/entry_32.S
157 @@ -532,6 +532,7 @@ syscall_exit:
158 restore_all:
159 TRACE_IRQS_IRET
160 restore_all_notrace:
161 +#ifdef CONFIG_X86_ESPFIX32
162 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
163 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
164 # are returning to the kernel.
165 @@ -542,6 +543,7 @@ restore_all_notrace:
166 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
167 CFI_REMEMBER_STATE
168 je ldt_ss # returning to user-space with LDT SS
169 +#endif
170 restore_nocheck:
171 RESTORE_REGS 4 # skip orig_eax/error_code
172 irq_return:
173 @@ -554,6 +556,7 @@ ENTRY(iret_exc)
174 .previous
175 _ASM_EXTABLE(irq_return,iret_exc)
176
177 +#ifdef CONFIG_X86_ESPFIX32
178 CFI_RESTORE_STATE
179 ldt_ss:
180 #ifdef CONFIG_PARAVIRT
181 @@ -597,6 +600,7 @@ ldt_ss:
182 lss (%esp), %esp /* switch to espfix segment */
183 CFI_ADJUST_CFA_OFFSET -8
184 jmp restore_nocheck
185 +#endif
186 CFI_ENDPROC
187 ENDPROC(system_call)
188
189 @@ -709,6 +713,7 @@ END(syscall_badsys)
190 * the high word of the segment base from the GDT and swiches to the
191 * normal stack and adjusts ESP with the matching offset.
192 */
193 +#ifdef CONFIG_X86_ESPFIX32
194 /* fixup the stack */
195 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
196 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
197 @@ -718,8 +723,10 @@ END(syscall_badsys)
198 pushl_cfi %eax
199 lss (%esp), %esp /* switch to the normal stack segment */
200 CFI_ADJUST_CFA_OFFSET -8
201 +#endif
202 .endm
203 .macro UNWIND_ESPFIX_STACK
204 +#ifdef CONFIG_X86_ESPFIX32
205 movl %ss, %eax
206 /* see if on espfix stack */
207 cmpw $__ESPFIX_SS, %ax
208 @@ -730,6 +737,7 @@ END(syscall_badsys)
209 /* switch to normal stack */
210 FIXUP_ESPFIX_STACK
211 27:
212 +#endif
213 .endm
214
215 /*
216 @@ -1337,11 +1345,13 @@ END(debug)
217 ENTRY(nmi)
218 RING0_INT_FRAME
219 ASM_CLAC
220 +#ifdef CONFIG_X86_ESPFIX32
221 pushl_cfi %eax
222 movl %ss, %eax
223 cmpw $__ESPFIX_SS, %ax
224 popl_cfi %eax
225 je nmi_espfix_stack
226 +#endif
227 cmpl $ia32_sysenter_target,(%esp)
228 je nmi_stack_fixup
229 pushl_cfi %eax
230 @@ -1381,6 +1391,7 @@ nmi_debug_stack_check:
231 FIX_STACK 24, nmi_stack_correct, 1
232 jmp nmi_stack_correct
233
234 +#ifdef CONFIG_X86_ESPFIX32
235 nmi_espfix_stack:
236 /* We have a RING0_INT_FRAME here.
237 *
238 @@ -1402,6 +1413,7 @@ nmi_espfix_stack:
239 lss 12+4(%esp), %esp # back to espfix stack
240 CFI_ADJUST_CFA_OFFSET -24
241 jmp irq_return
242 +#endif
243 CFI_ENDPROC
244 END(nmi)
245
246 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
247 index 7ac938a4bfab..39ba6914bbc6 100644
248 --- a/arch/x86/kernel/entry_64.S
249 +++ b/arch/x86/kernel/entry_64.S
250 @@ -58,6 +58,7 @@
251 #include <asm/asm.h>
252 #include <asm/context_tracking.h>
253 #include <asm/smap.h>
254 +#include <asm/pgtable_types.h>
255 #include <linux/err.h>
256
257 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
258 @@ -1056,12 +1057,45 @@ restore_args:
259
260 irq_return:
261 INTERRUPT_RETURN
262 - _ASM_EXTABLE(irq_return, bad_iret)
263
264 -#ifdef CONFIG_PARAVIRT
265 ENTRY(native_iret)
266 + /*
267 + * Are we returning to a stack segment from the LDT? Note: in
268 + * 64-bit mode SS:RSP on the exception stack is always valid.
269 + */
270 +#ifdef CONFIG_X86_ESPFIX64
271 + testb $4,(SS-RIP)(%rsp)
272 + jnz native_irq_return_ldt
273 +#endif
274 +
275 +native_irq_return_iret:
276 iretq
277 - _ASM_EXTABLE(native_iret, bad_iret)
278 + _ASM_EXTABLE(native_irq_return_iret, bad_iret)
279 +
280 +#ifdef CONFIG_X86_ESPFIX64
281 +native_irq_return_ldt:
282 + pushq_cfi %rax
283 + pushq_cfi %rdi
284 + SWAPGS
285 + movq PER_CPU_VAR(espfix_waddr),%rdi
286 + movq %rax,(0*8)(%rdi) /* RAX */
287 + movq (2*8)(%rsp),%rax /* RIP */
288 + movq %rax,(1*8)(%rdi)
289 + movq (3*8)(%rsp),%rax /* CS */
290 + movq %rax,(2*8)(%rdi)
291 + movq (4*8)(%rsp),%rax /* RFLAGS */
292 + movq %rax,(3*8)(%rdi)
293 + movq (6*8)(%rsp),%rax /* SS */
294 + movq %rax,(5*8)(%rdi)
295 + movq (5*8)(%rsp),%rax /* RSP */
296 + movq %rax,(4*8)(%rdi)
297 + andl $0xffff0000,%eax
298 + popq_cfi %rdi
299 + orq PER_CPU_VAR(espfix_stack),%rax
300 + SWAPGS
301 + movq %rax,%rsp
302 + popq_cfi %rax
303 + jmp native_irq_return_iret
304 #endif
305
306 .section .fixup,"ax"
307 @@ -1127,9 +1161,40 @@ ENTRY(retint_kernel)
308 call preempt_schedule_irq
309 jmp exit_intr
310 #endif
311 -
312 CFI_ENDPROC
313 END(common_interrupt)
314 +
315 + /*
316 + * If IRET takes a fault on the espfix stack, then we
317 + * end up promoting it to a doublefault. In that case,
318 + * modify the stack to make it look like we just entered
319 + * the #GP handler from user space, similar to bad_iret.
320 + */
321 +#ifdef CONFIG_X86_ESPFIX64
322 + ALIGN
323 +__do_double_fault:
324 + XCPT_FRAME 1 RDI+8
325 + movq RSP(%rdi),%rax /* Trap on the espfix stack? */
326 + sarq $PGDIR_SHIFT,%rax
327 + cmpl $ESPFIX_PGD_ENTRY,%eax
328 + jne do_double_fault /* No, just deliver the fault */
329 + cmpl $__KERNEL_CS,CS(%rdi)
330 + jne do_double_fault
331 + movq RIP(%rdi),%rax
332 + cmpq $native_irq_return_iret,%rax
333 + jne do_double_fault /* This shouldn't happen... */
334 + movq PER_CPU_VAR(kernel_stack),%rax
335 + subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
336 + movq %rax,RSP(%rdi)
337 + movq $0,(%rax) /* Missing (lost) #GP error code */
338 + movq $general_protection,RIP(%rdi)
339 + retq
340 + CFI_ENDPROC
341 +END(__do_double_fault)
342 +#else
343 +# define __do_double_fault do_double_fault
344 +#endif
345 +
346 /*
347 * End of kprobes section
348 */
349 @@ -1298,7 +1363,7 @@ zeroentry overflow do_overflow
350 zeroentry bounds do_bounds
351 zeroentry invalid_op do_invalid_op
352 zeroentry device_not_available do_device_not_available
353 -paranoiderrorentry double_fault do_double_fault
354 +paranoiderrorentry double_fault __do_double_fault
355 zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
356 errorentry invalid_TSS do_invalid_TSS
357 errorentry segment_not_present do_segment_not_present
358 @@ -1585,7 +1650,7 @@ error_sti:
359 */
360 error_kernelspace:
361 incl %ebx
362 - leaq irq_return(%rip),%rcx
363 + leaq native_irq_return_iret(%rip),%rcx
364 cmpq %rcx,RIP+8(%rsp)
365 je error_swapgs
366 movl %ecx,%eax /* zero extend */
367 diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
368 new file mode 100644
369 index 000000000000..94d857fb1033
370 --- /dev/null
371 +++ b/arch/x86/kernel/espfix_64.c
372 @@ -0,0 +1,208 @@
373 +/* ----------------------------------------------------------------------- *
374 + *
375 + * Copyright 2014 Intel Corporation; author: H. Peter Anvin
376 + *
377 + * This program is free software; you can redistribute it and/or modify it
378 + * under the terms and conditions of the GNU General Public License,
379 + * version 2, as published by the Free Software Foundation.
380 + *
381 + * This program is distributed in the hope it will be useful, but WITHOUT
382 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
383 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
384 + * more details.
385 + *
386 + * ----------------------------------------------------------------------- */
387 +
388 +/*
389 + * The IRET instruction, when returning to a 16-bit segment, only
390 + * restores the bottom 16 bits of the user space stack pointer. This
391 + * causes some 16-bit software to break, but it also leaks kernel state
392 + * to user space.
393 + *
394 + * This works around this by creating percpu "ministacks", each of which
395 + * is mapped 2^16 times 64K apart. When we detect that the return SS is
396 + * on the LDT, we copy the IRET frame to the ministack and use the
397 + * relevant alias to return to userspace. The ministacks are mapped
398 + * readonly, so if the IRET fault we promote #GP to #DF which is an IST
399 + * vector and thus has its own stack; we then do the fixup in the #DF
400 + * handler.
401 + *
402 + * This file sets up the ministacks and the related page tables. The
403 + * actual ministack invocation is in entry_64.S.
404 + */
405 +
406 +#include <linux/init.h>
407 +#include <linux/init_task.h>
408 +#include <linux/kernel.h>
409 +#include <linux/percpu.h>
410 +#include <linux/gfp.h>
411 +#include <linux/random.h>
412 +#include <asm/pgtable.h>
413 +#include <asm/pgalloc.h>
414 +#include <asm/setup.h>
415 +#include <asm/espfix.h>
416 +
417 +/*
418 + * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
419 + * it up to a cache line to avoid unnecessary sharing.
420 + */
421 +#define ESPFIX_STACK_SIZE (8*8UL)
422 +#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
423 +
424 +/* There is address space for how many espfix pages? */
425 +#define ESPFIX_PAGE_SPACE (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
426 +
427 +#define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
428 +#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
429 +# error "Need more than one PGD for the ESPFIX hack"
430 +#endif
431 +
432 +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
433 +
434 +/* This contains the *bottom* address of the espfix stack */
435 +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
436 +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
437 +
438 +/* Initialization mutex - should this be a spinlock? */
439 +static DEFINE_MUTEX(espfix_init_mutex);
440 +
441 +/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
442 +#define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
443 +static void *espfix_pages[ESPFIX_MAX_PAGES];
444 +
445 +static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
446 + __aligned(PAGE_SIZE);
447 +
448 +static unsigned int page_random, slot_random;
449 +
450 +/*
451 + * This returns the bottom address of the espfix stack for a specific CPU.
452 + * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
453 + * we have to account for some amount of padding at the end of each page.
454 + */
455 +static inline unsigned long espfix_base_addr(unsigned int cpu)
456 +{
457 + unsigned long page, slot;
458 + unsigned long addr;
459 +
460 + page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
461 + slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
462 + addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
463 + addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
464 + addr += ESPFIX_BASE_ADDR;
465 + return addr;
466 +}
467 +
468 +#define PTE_STRIDE (65536/PAGE_SIZE)
469 +#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
470 +#define ESPFIX_PMD_CLONES PTRS_PER_PMD
471 +#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
472 +
473 +#define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
474 +
475 +static void init_espfix_random(void)
476 +{
477 + unsigned long rand;
478 +
479 + /*
480 + * This is run before the entropy pools are initialized,
481 + * but this is hopefully better than nothing.
482 + */
483 + if (!arch_get_random_long(&rand)) {
484 + /* The constant is an arbitrary large prime */
485 + rdtscll(rand);
486 + rand *= 0xc345c6b72fd16123UL;
487 + }
488 +
489 + slot_random = rand % ESPFIX_STACKS_PER_PAGE;
490 + page_random = (rand / ESPFIX_STACKS_PER_PAGE)
491 + & (ESPFIX_PAGE_SPACE - 1);
492 +}
493 +
494 +void __init init_espfix_bsp(void)
495 +{
496 + pgd_t *pgd_p;
497 + pteval_t ptemask;
498 +
499 + ptemask = __supported_pte_mask;
500 +
501 + /* Install the espfix pud into the kernel page directory */
502 + pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
503 + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
504 +
505 + /* Randomize the locations */
506 + init_espfix_random();
507 +
508 + /* The rest is the same as for any other processor */
509 + init_espfix_ap();
510 +}
511 +
512 +void init_espfix_ap(void)
513 +{
514 + unsigned int cpu, page;
515 + unsigned long addr;
516 + pud_t pud, *pud_p;
517 + pmd_t pmd, *pmd_p;
518 + pte_t pte, *pte_p;
519 + int n;
520 + void *stack_page;
521 + pteval_t ptemask;
522 +
523 + /* We only have to do this once... */
524 + if (likely(this_cpu_read(espfix_stack)))
525 + return; /* Already initialized */
526 +
527 + cpu = smp_processor_id();
528 + addr = espfix_base_addr(cpu);
529 + page = cpu/ESPFIX_STACKS_PER_PAGE;
530 +
531 + /* Did another CPU already set this up? */
532 + stack_page = ACCESS_ONCE(espfix_pages[page]);
533 + if (likely(stack_page))
534 + goto done;
535 +
536 + mutex_lock(&espfix_init_mutex);
537 +
538 + /* Did we race on the lock? */
539 + stack_page = ACCESS_ONCE(espfix_pages[page]);
540 + if (stack_page)
541 + goto unlock_done;
542 +
543 + ptemask = __supported_pte_mask;
544 +
545 + pud_p = &espfix_pud_page[pud_index(addr)];
546 + pud = *pud_p;
547 + if (!pud_present(pud)) {
548 + pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
549 + pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
550 + paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
551 + for (n = 0; n < ESPFIX_PUD_CLONES; n++)
552 + set_pud(&pud_p[n], pud);
553 + }
554 +
555 + pmd_p = pmd_offset(&pud, addr);
556 + pmd = *pmd_p;
557 + if (!pmd_present(pmd)) {
558 + pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
559 + pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
560 + paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
561 + for (n = 0; n < ESPFIX_PMD_CLONES; n++)
562 + set_pmd(&pmd_p[n], pmd);
563 + }
564 +
565 + pte_p = pte_offset_kernel(&pmd, addr);
566 + stack_page = (void *)__get_free_page(GFP_KERNEL);
567 + pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
568 + for (n = 0; n < ESPFIX_PTE_CLONES; n++)
569 + set_pte(&pte_p[n*PTE_STRIDE], pte);
570 +
571 + /* Job is done for this CPU and any CPU which shares this page */
572 + ACCESS_ONCE(espfix_pages[page]) = stack_page;
573 +
574 +unlock_done:
575 + mutex_unlock(&espfix_init_mutex);
576 +done:
577 + this_cpu_write(espfix_stack, addr);
578 + this_cpu_write(espfix_waddr, (unsigned long)stack_page
579 + + (addr & ~PAGE_MASK));
580 +}
581 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
582 index dcbbaa165bde..c37886d759cc 100644
583 --- a/arch/x86/kernel/ldt.c
584 +++ b/arch/x86/kernel/ldt.c
585 @@ -20,8 +20,6 @@
586 #include <asm/mmu_context.h>
587 #include <asm/syscalls.h>
588
589 -int sysctl_ldt16 = 0;
590 -
591 #ifdef CONFIG_SMP
592 static void flush_ldt(void *current_mm)
593 {
594 @@ -231,16 +229,10 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
595 }
596 }
597
598 - /*
599 - * On x86-64 we do not support 16-bit segments due to
600 - * IRET leaking the high bits of the kernel stack address.
601 - */
602 -#ifdef CONFIG_X86_64
603 - if (!ldt_info.seg_32bit && !sysctl_ldt16) {
604 + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
605 error = -EINVAL;
606 goto out_unlock;
607 }
608 -#endif
609
610 fill_ldt(&ldt, &ldt_info);
611 if (oldmode)
612 diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
613 index 3f08f34f93eb..a1da6737ba5b 100644
614 --- a/arch/x86/kernel/paravirt_patch_64.c
615 +++ b/arch/x86/kernel/paravirt_patch_64.c
616 @@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
617 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
618 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
619 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
620 -DEF_NATIVE(pv_cpu_ops, iret, "iretq");
621 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
622 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
623 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
624 @@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
625 PATCH_SITE(pv_irq_ops, save_fl);
626 PATCH_SITE(pv_irq_ops, irq_enable);
627 PATCH_SITE(pv_irq_ops, irq_disable);
628 - PATCH_SITE(pv_cpu_ops, iret);
629 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
630 PATCH_SITE(pv_cpu_ops, usergs_sysret32);
631 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
632 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
633 index bfd348e99369..fe862750583b 100644
634 --- a/arch/x86/kernel/smpboot.c
635 +++ b/arch/x86/kernel/smpboot.c
636 @@ -265,6 +265,13 @@ notrace static void __cpuinit start_secondary(void *unused)
637 check_tsc_sync_target();
638
639 /*
640 + * Enable the espfix hack for this CPU
641 + */
642 +#ifdef CONFIG_X86_ESPFIX64
643 + init_espfix_ap();
644 +#endif
645 +
646 + /*
647 * We need to hold vector_lock so there the set of online cpus
648 * does not change while we are assigning vectors to cpus. Holding
649 * this lock ensures we don't half assign or remove an irq from a cpu.
650 diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
651 index 0002a3a33081..e04e67753238 100644
652 --- a/arch/x86/mm/dump_pagetables.c
653 +++ b/arch/x86/mm/dump_pagetables.c
654 @@ -30,11 +30,13 @@ struct pg_state {
655 unsigned long start_address;
656 unsigned long current_address;
657 const struct addr_marker *marker;
658 + unsigned long lines;
659 };
660
661 struct addr_marker {
662 unsigned long start_address;
663 const char *name;
664 + unsigned long max_lines;
665 };
666
667 /* indices for address_markers; keep sync'd w/ address_markers below */
668 @@ -45,6 +47,7 @@ enum address_markers_idx {
669 LOW_KERNEL_NR,
670 VMALLOC_START_NR,
671 VMEMMAP_START_NR,
672 + ESPFIX_START_NR,
673 HIGH_KERNEL_NR,
674 MODULES_VADDR_NR,
675 MODULES_END_NR,
676 @@ -67,6 +70,7 @@ static struct addr_marker address_markers[] = {
677 { PAGE_OFFSET, "Low Kernel Mapping" },
678 { VMALLOC_START, "vmalloc() Area" },
679 { VMEMMAP_START, "Vmemmap" },
680 + { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
681 { __START_KERNEL_map, "High Kernel Mapping" },
682 { MODULES_VADDR, "Modules" },
683 { MODULES_END, "End Modules" },
684 @@ -163,7 +167,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
685 pgprot_t new_prot, int level)
686 {
687 pgprotval_t prot, cur;
688 - static const char units[] = "KMGTPE";
689 + static const char units[] = "BKMGTPE";
690
691 /*
692 * If we have a "break" in the series, we need to flush the state that
693 @@ -178,6 +182,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
694 st->current_prot = new_prot;
695 st->level = level;
696 st->marker = address_markers;
697 + st->lines = 0;
698 seq_printf(m, "---[ %s ]---\n", st->marker->name);
699 } else if (prot != cur || level != st->level ||
700 st->current_address >= st->marker[1].start_address) {
701 @@ -188,17 +193,21 @@ static void note_page(struct seq_file *m, struct pg_state *st,
702 /*
703 * Now print the actual finished series
704 */
705 - seq_printf(m, "0x%0*lx-0x%0*lx ",
706 - width, st->start_address,
707 - width, st->current_address);
708 -
709 - delta = (st->current_address - st->start_address) >> 10;
710 - while (!(delta & 1023) && unit[1]) {
711 - delta >>= 10;
712 - unit++;
713 + if (!st->marker->max_lines ||
714 + st->lines < st->marker->max_lines) {
715 + seq_printf(m, "0x%0*lx-0x%0*lx ",
716 + width, st->start_address,
717 + width, st->current_address);
718 +
719 + delta = (st->current_address - st->start_address);
720 + while (!(delta & 1023) && unit[1]) {
721 + delta >>= 10;
722 + unit++;
723 + }
724 + seq_printf(m, "%9lu%c ", delta, *unit);
725 + printk_prot(m, st->current_prot, st->level);
726 }
727 - seq_printf(m, "%9lu%c ", delta, *unit);
728 - printk_prot(m, st->current_prot, st->level);
729 + st->lines++;
730
731 /*
732 * We print markers for special areas of address space,
733 @@ -206,7 +215,15 @@ static void note_page(struct seq_file *m, struct pg_state *st,
734 * This helps in the interpretation.
735 */
736 if (st->current_address >= st->marker[1].start_address) {
737 + if (st->marker->max_lines &&
738 + st->lines > st->marker->max_lines) {
739 + unsigned long nskip =
740 + st->lines - st->marker->max_lines;
741 + seq_printf(m, "... %lu entr%s skipped ... \n",
742 + nskip, nskip == 1 ? "y" : "ies");
743 + }
744 st->marker++;
745 + st->lines = 0;
746 seq_printf(m, "---[ %s ]---\n", st->marker->name);
747 }
748
749 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
750 index 0f134c7cfc24..0faad646f5fd 100644
751 --- a/arch/x86/vdso/vdso32-setup.c
752 +++ b/arch/x86/vdso/vdso32-setup.c
753 @@ -41,7 +41,6 @@ enum {
754 #ifdef CONFIG_X86_64
755 #define vdso_enabled sysctl_vsyscall32
756 #define arch_setup_additional_pages syscall32_setup_pages
757 -extern int sysctl_ldt16;
758 #endif
759
760 /*
761 @@ -381,13 +380,6 @@ static ctl_table abi_table2[] = {
762 .mode = 0644,
763 .proc_handler = proc_dointvec
764 },
765 - {
766 - .procname = "ldt16",
767 - .data = &sysctl_ldt16,
768 - .maxlen = sizeof(int),
769 - .mode = 0644,
770 - .proc_handler = proc_dointvec
771 - },
772 {}
773 };
774
775 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
776 index ac33d5f30778..bf948e134981 100644
777 --- a/crypto/af_alg.c
778 +++ b/crypto/af_alg.c
779 @@ -21,6 +21,7 @@
780 #include <linux/module.h>
781 #include <linux/net.h>
782 #include <linux/rwsem.h>
783 +#include <linux/security.h>
784
785 struct alg_type_list {
786 const struct af_alg_type *type;
787 @@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
788
789 sock_init_data(newsock, sk2);
790 sock_graft(sk2, newsock);
791 + security_sk_clone(sk, sk2);
792
793 err = type->accept(ask->private, sk2);
794 if (err) {
795 diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
796 index d344cf3ac9e3..e13c5f4b12cb 100644
797 --- a/drivers/iio/industrialio-buffer.c
798 +++ b/drivers/iio/industrialio-buffer.c
799 @@ -849,7 +849,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
800
801 /* Now we have the two masks, work from least sig and build up sizes */
802 for_each_set_bit(out_ind,
803 - indio_dev->active_scan_mask,
804 + buffer->scan_mask,
805 indio_dev->masklength) {
806 in_ind = find_next_bit(indio_dev->active_scan_mask,
807 indio_dev->masklength,
808 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
809 index 658613021919..f8821ce27802 100644
810 --- a/drivers/net/ethernet/marvell/mvneta.c
811 +++ b/drivers/net/ethernet/marvell/mvneta.c
812 @@ -99,16 +99,56 @@
813 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
814 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
815 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
816 +
817 +/* Exception Interrupt Port/Queue Cause register */
818 +
819 #define MVNETA_INTR_NEW_CAUSE 0x25a0
820 -#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
821 #define MVNETA_INTR_NEW_MASK 0x25a4
822 +
823 +/* bits 0..7 = TXQ SENT, one bit per queue.
824 + * bits 8..15 = RXQ OCCUP, one bit per queue.
825 + * bits 16..23 = RXQ FREE, one bit per queue.
826 + * bit 29 = OLD_REG_SUM, see old reg ?
827 + * bit 30 = TX_ERR_SUM, one bit for 4 ports
828 + * bit 31 = MISC_SUM, one bit for 4 ports
829 + */
830 +#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
831 +#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
832 +#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
833 +#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
834 +
835 #define MVNETA_INTR_OLD_CAUSE 0x25a8
836 #define MVNETA_INTR_OLD_MASK 0x25ac
837 +
838 +/* Data Path Port/Queue Cause Register */
839 #define MVNETA_INTR_MISC_CAUSE 0x25b0
840 #define MVNETA_INTR_MISC_MASK 0x25b4
841 +
842 +#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
843 +#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
844 +#define MVNETA_CAUSE_PTP BIT(4)
845 +
846 +#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
847 +#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
848 +#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
849 +#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
850 +#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
851 +#define MVNETA_CAUSE_PRBS_ERR BIT(12)
852 +#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
853 +#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
854 +
855 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
856 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
857 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
858 +
859 +#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
860 +#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
861 +#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
862 +
863 #define MVNETA_INTR_ENABLE 0x25b8
864 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
865 -#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
866 +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
867 +
868 #define MVNETA_RXQ_CMD 0x2680
869 #define MVNETA_RXQ_DISABLE_SHIFT 8
870 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
871 @@ -174,9 +214,6 @@
872 #define MVNETA_RX_COAL_PKTS 32
873 #define MVNETA_RX_COAL_USEC 100
874
875 -/* Timer */
876 -#define MVNETA_TX_DONE_TIMER_PERIOD 10
877 -
878 /* Napi polling weight */
879 #define MVNETA_RX_POLL_WEIGHT 64
880
881 @@ -219,10 +256,12 @@
882
883 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
884
885 -struct mvneta_stats {
886 +struct mvneta_pcpu_stats {
887 struct u64_stats_sync syncp;
888 - u64 packets;
889 - u64 bytes;
890 + u64 rx_packets;
891 + u64 rx_bytes;
892 + u64 tx_packets;
893 + u64 tx_bytes;
894 };
895
896 struct mvneta_port {
897 @@ -230,16 +269,11 @@ struct mvneta_port {
898 void __iomem *base;
899 struct mvneta_rx_queue *rxqs;
900 struct mvneta_tx_queue *txqs;
901 - struct timer_list tx_done_timer;
902 struct net_device *dev;
903
904 u32 cause_rx_tx;
905 struct napi_struct napi;
906
907 - /* Flags */
908 - unsigned long flags;
909 -#define MVNETA_F_TX_DONE_TIMER_BIT 0
910 -
911 /* Napi weight */
912 int weight;
913
914 @@ -248,8 +282,7 @@ struct mvneta_port {
915 u8 mcast_count[256];
916 u16 tx_ring_size;
917 u16 rx_ring_size;
918 - struct mvneta_stats tx_stats;
919 - struct mvneta_stats rx_stats;
920 + struct mvneta_pcpu_stats *stats;
921
922 struct mii_bus *mii_bus;
923 struct phy_device *phy_dev;
924 @@ -428,21 +461,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
925 {
926 struct mvneta_port *pp = netdev_priv(dev);
927 unsigned int start;
928 + int cpu;
929
930 - memset(stats, 0, sizeof(struct rtnl_link_stats64));
931 -
932 - do {
933 - start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
934 - stats->rx_packets = pp->rx_stats.packets;
935 - stats->rx_bytes = pp->rx_stats.bytes;
936 - } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
937 + for_each_possible_cpu(cpu) {
938 + struct mvneta_pcpu_stats *cpu_stats;
939 + u64 rx_packets;
940 + u64 rx_bytes;
941 + u64 tx_packets;
942 + u64 tx_bytes;
943
944 + cpu_stats = per_cpu_ptr(pp->stats, cpu);
945 + do {
946 + start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
947 + rx_packets = cpu_stats->rx_packets;
948 + rx_bytes = cpu_stats->rx_bytes;
949 + tx_packets = cpu_stats->tx_packets;
950 + tx_bytes = cpu_stats->tx_bytes;
951 + } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
952
953 - do {
954 - start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
955 - stats->tx_packets = pp->tx_stats.packets;
956 - stats->tx_bytes = pp->tx_stats.bytes;
957 - } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
958 + stats->rx_packets += rx_packets;
959 + stats->rx_bytes += rx_bytes;
960 + stats->tx_packets += tx_packets;
961 + stats->tx_bytes += tx_bytes;
962 + }
963
964 stats->rx_errors = dev->stats.rx_errors;
965 stats->rx_dropped = dev->stats.rx_dropped;
966 @@ -1063,17 +1104,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
967 txq->done_pkts_coal = value;
968 }
969
970 -/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
971 -static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
972 -{
973 - if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
974 - pp->tx_done_timer.expires = jiffies +
975 - msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
976 - add_timer(&pp->tx_done_timer);
977 - }
978 -}
979 -
980 -
981 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
982 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
983 u32 phys_addr, u32 cookie)
984 @@ -1354,6 +1384,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
985 {
986 struct net_device *dev = pp->dev;
987 int rx_done, rx_filled;
988 + u32 rcvd_pkts = 0;
989 + u32 rcvd_bytes = 0;
990
991 /* Get number of received packets */
992 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
993 @@ -1391,10 +1423,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
994
995 rx_bytes = rx_desc->data_size -
996 (ETH_FCS_LEN + MVNETA_MH_SIZE);
997 - u64_stats_update_begin(&pp->rx_stats.syncp);
998 - pp->rx_stats.packets++;
999 - pp->rx_stats.bytes += rx_bytes;
1000 - u64_stats_update_end(&pp->rx_stats.syncp);
1001 + rcvd_pkts++;
1002 + rcvd_bytes += rx_bytes;
1003
1004 /* Linux processing */
1005 skb_reserve(skb, MVNETA_MH_SIZE);
1006 @@ -1415,6 +1445,15 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1007 }
1008 }
1009
1010 + if (rcvd_pkts) {
1011 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1012 +
1013 + u64_stats_update_begin(&stats->syncp);
1014 + stats->rx_packets += rcvd_pkts;
1015 + stats->rx_bytes += rcvd_bytes;
1016 + u64_stats_update_end(&stats->syncp);
1017 + }
1018 +
1019 /* Update rxq management counters */
1020 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1021
1022 @@ -1545,25 +1584,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1023
1024 out:
1025 if (frags > 0) {
1026 - u64_stats_update_begin(&pp->tx_stats.syncp);
1027 - pp->tx_stats.packets++;
1028 - pp->tx_stats.bytes += skb->len;
1029 - u64_stats_update_end(&pp->tx_stats.syncp);
1030 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1031
1032 + u64_stats_update_begin(&stats->syncp);
1033 + stats->tx_packets++;
1034 + stats->tx_bytes += skb->len;
1035 + u64_stats_update_end(&stats->syncp);
1036 } else {
1037 dev->stats.tx_dropped++;
1038 dev_kfree_skb_any(skb);
1039 }
1040
1041 - if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1042 - mvneta_txq_done(pp, txq);
1043 -
1044 - /* If after calling mvneta_txq_done, count equals
1045 - * frags, we need to set the timer
1046 - */
1047 - if (txq->count == frags && frags > 0)
1048 - mvneta_add_tx_done_timer(pp);
1049 -
1050 return NETDEV_TX_OK;
1051 }
1052
1053 @@ -1839,14 +1870,22 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
1054
1055 /* Read cause register */
1056 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1057 - MVNETA_RX_INTR_MASK(rxq_number);
1058 + (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1059 +
1060 + /* Release Tx descriptors */
1061 + if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
1062 + int tx_todo = 0;
1063 +
1064 + mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL), &tx_todo);
1065 + cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
1066 + }
1067
1068 /* For the case where the last mvneta_poll did not process all
1069 * RX packets
1070 */
1071 cause_rx_tx |= pp->cause_rx_tx;
1072 if (rxq_number > 1) {
1073 - while ((cause_rx_tx != 0) && (budget > 0)) {
1074 + while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
1075 int count;
1076 struct mvneta_rx_queue *rxq;
1077 /* get rx queue number from cause_rx_tx */
1078 @@ -1878,7 +1917,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
1079 napi_complete(napi);
1080 local_irq_save(flags);
1081 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1082 - MVNETA_RX_INTR_MASK(rxq_number));
1083 + MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1084 local_irq_restore(flags);
1085 }
1086
1087 @@ -1886,26 +1925,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
1088 return rx_done;
1089 }
1090
1091 -/* tx done timer callback */
1092 -static void mvneta_tx_done_timer_callback(unsigned long data)
1093 -{
1094 - struct net_device *dev = (struct net_device *)data;
1095 - struct mvneta_port *pp = netdev_priv(dev);
1096 - int tx_done = 0, tx_todo = 0;
1097 -
1098 - if (!netif_running(dev))
1099 - return ;
1100 -
1101 - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1102 -
1103 - tx_done = mvneta_tx_done_gbe(pp,
1104 - (((1 << txq_number) - 1) &
1105 - MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1106 - &tx_todo);
1107 - if (tx_todo > 0)
1108 - mvneta_add_tx_done_timer(pp);
1109 -}
1110 -
1111 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1112 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1113 int num)
1114 @@ -2155,7 +2174,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
1115
1116 /* Unmask interrupts */
1117 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1118 - MVNETA_RX_INTR_MASK(rxq_number));
1119 + MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1120
1121 phy_start(pp->phy_dev);
1122 netif_tx_start_all_queues(pp->dev);
1123 @@ -2188,16 +2207,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
1124 mvneta_rx_reset(pp);
1125 }
1126
1127 -/* tx timeout callback - display a message and stop/start the network device */
1128 -static void mvneta_tx_timeout(struct net_device *dev)
1129 -{
1130 - struct mvneta_port *pp = netdev_priv(dev);
1131 -
1132 - netdev_info(dev, "tx timeout\n");
1133 - mvneta_stop_dev(pp);
1134 - mvneta_start_dev(pp);
1135 -}
1136 -
1137 /* Return positive if MTU is valid */
1138 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
1139 {
1140 @@ -2426,8 +2435,6 @@ static int mvneta_stop(struct net_device *dev)
1141 free_irq(dev->irq, pp);
1142 mvneta_cleanup_rxqs(pp);
1143 mvneta_cleanup_txqs(pp);
1144 - del_timer(&pp->tx_done_timer);
1145 - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1146
1147 return 0;
1148 }
1149 @@ -2548,7 +2555,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
1150 .ndo_set_rx_mode = mvneta_set_rx_mode,
1151 .ndo_set_mac_address = mvneta_set_mac_addr,
1152 .ndo_change_mtu = mvneta_change_mtu,
1153 - .ndo_tx_timeout = mvneta_tx_timeout,
1154 .ndo_get_stats64 = mvneta_get_stats64,
1155 };
1156
1157 @@ -2729,10 +2735,6 @@ static int mvneta_probe(struct platform_device *pdev)
1158
1159 pp = netdev_priv(dev);
1160
1161 - pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
1162 - init_timer(&pp->tx_done_timer);
1163 - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1164 -
1165 pp->weight = MVNETA_RX_POLL_WEIGHT;
1166 pp->phy_node = phy_node;
1167 pp->phy_interface = phy_mode;
1168 @@ -2751,7 +2753,12 @@ static int mvneta_probe(struct platform_device *pdev)
1169
1170 clk_prepare_enable(pp->clk);
1171
1172 - pp->tx_done_timer.data = (unsigned long)dev;
1173 + /* Alloc per-cpu stats */
1174 + pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
1175 + if (!pp->stats) {
1176 + err = -ENOMEM;
1177 + goto err_clk;
1178 + }
1179
1180 pp->tx_ring_size = MVNETA_MAX_TXD;
1181 pp->rx_ring_size = MVNETA_MAX_RXD;
1182 @@ -2762,7 +2769,7 @@ static int mvneta_probe(struct platform_device *pdev)
1183 err = mvneta_init(pp, phy_addr);
1184 if (err < 0) {
1185 dev_err(&pdev->dev, "can't init eth hal\n");
1186 - goto err_clk;
1187 + goto err_free_stats;
1188 }
1189 mvneta_port_power_up(pp, phy_mode);
1190
1191 @@ -2791,6 +2798,8 @@ static int mvneta_probe(struct platform_device *pdev)
1192
1193 err_deinit:
1194 mvneta_deinit(pp);
1195 +err_free_stats:
1196 + free_percpu(pp->stats);
1197 err_clk:
1198 clk_disable_unprepare(pp->clk);
1199 err_unmap:
1200 @@ -2811,6 +2820,7 @@ static int mvneta_remove(struct platform_device *pdev)
1201 unregister_netdev(dev);
1202 mvneta_deinit(pp);
1203 clk_disable_unprepare(pp->clk);
1204 + free_percpu(pp->stats);
1205 iounmap(pp->base);
1206 irq_dispose_mapping(dev->irq);
1207 free_netdev(dev);
1208 diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
1209 index 91245f5dbe81..47257b6eea84 100644
1210 --- a/drivers/rapidio/devices/tsi721_dma.c
1211 +++ b/drivers/rapidio/devices/tsi721_dma.c
1212 @@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
1213 "desc %p not ACKed\n", tx_desc);
1214 }
1215
1216 + if (ret == NULL) {
1217 + dev_dbg(bdma_chan->dchan.device->dev,
1218 + "%s: unable to obtain tx descriptor\n", __func__);
1219 + goto err_out;
1220 + }
1221 +
1222 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
1223 if (i == bdma_chan->bd_num - 1) {
1224 i = 0;
1225 @@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
1226 tx_desc->txd.phys = bdma_chan->bd_phys +
1227 i * sizeof(struct tsi721_dma_desc);
1228 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
1229 -
1230 +err_out:
1231 spin_unlock_bh(&bdma_chan->lock);
1232
1233 return ret;
1234 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1235 index 86d522004a20..e5953c8018c5 100644
1236 --- a/drivers/scsi/scsi_lib.c
1237 +++ b/drivers/scsi/scsi_lib.c
1238 @@ -815,6 +815,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1239 scsi_next_command(cmd);
1240 return;
1241 }
1242 + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
1243 + /*
1244 + * Certain non BLOCK_PC requests are commands that don't
1245 + * actually transfer anything (FLUSH), so cannot use
1246 + * good_bytes != blk_rq_bytes(req) as the signal for an error.
1247 + * This sets the error explicitly for the problem case.
1248 + */
1249 + error = __scsi_error_from_host_byte(cmd, result);
1250 }
1251
1252 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
1253 diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
1254 index f983915168b7..3496a77612ba 100644
1255 --- a/drivers/staging/vt6655/bssdb.c
1256 +++ b/drivers/staging/vt6655/bssdb.c
1257 @@ -1026,7 +1026,7 @@ start:
1258 pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
1259 }
1260
1261 - {
1262 + if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
1263 pDevice->byReAssocCount++;
1264 if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout
1265 printk("Re-association timeout!!!\n");
1266 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
1267 index 08b250f01dae..d170b6f9db7c 100644
1268 --- a/drivers/staging/vt6655/device_main.c
1269 +++ b/drivers/staging/vt6655/device_main.c
1270 @@ -2434,6 +2434,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
1271 int handled = 0;
1272 unsigned char byData = 0;
1273 int ii = 0;
1274 + unsigned long flags;
1275 // unsigned char byRSSI;
1276
1277 MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
1278 @@ -2459,7 +2460,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
1279
1280 handled = 1;
1281 MACvIntDisable(pDevice->PortOffset);
1282 - spin_lock_irq(&pDevice->lock);
1283 +
1284 + spin_lock_irqsave(&pDevice->lock, flags);
1285
1286 //Make sure current page is 0
1287 VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
1288 @@ -2700,7 +2702,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
1289 MACvSelectPage1(pDevice->PortOffset);
1290 }
1291
1292 - spin_unlock_irq(&pDevice->lock);
1293 + spin_unlock_irqrestore(&pDevice->lock, flags);
1294 +
1295 MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
1296
1297 return IRQ_RETVAL(handled);
1298 diff --git a/include/linux/printk.h b/include/linux/printk.h
1299 index 22c7052e9372..708b8a84f6c0 100644
1300 --- a/include/linux/printk.h
1301 +++ b/include/linux/printk.h
1302 @@ -124,9 +124,9 @@ asmlinkage __printf(1, 2) __cold
1303 int printk(const char *fmt, ...);
1304
1305 /*
1306 - * Special printk facility for scheduler use only, _DO_NOT_USE_ !
1307 + * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
1308 */
1309 -__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
1310 +__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
1311
1312 /*
1313 * Please don't use printk_ratelimit(), because it shares ratelimiting state
1314 @@ -161,7 +161,7 @@ int printk(const char *s, ...)
1315 return 0;
1316 }
1317 static inline __printf(1, 2) __cold
1318 -int printk_sched(const char *s, ...)
1319 +int printk_deferred(const char *s, ...)
1320 {
1321 return 0;
1322 }
1323 diff --git a/init/main.c b/init/main.c
1324 index e83ac04fda97..2132ffd5e031 100644
1325 --- a/init/main.c
1326 +++ b/init/main.c
1327 @@ -606,6 +606,10 @@ asmlinkage void __init start_kernel(void)
1328 if (efi_enabled(EFI_RUNTIME_SERVICES))
1329 efi_enter_virtual_mode();
1330 #endif
1331 +#ifdef CONFIG_X86_ESPFIX64
1332 + /* Should be run before the first non-init thread is created */
1333 + init_espfix_bsp();
1334 +#endif
1335 thread_info_cache_init();
1336 cred_init();
1337 fork_init(totalram_pages);
1338 diff --git a/kernel/printk.c b/kernel/printk.c
1339 index d37d45c90ae6..f7aff4bd5454 100644
1340 --- a/kernel/printk.c
1341 +++ b/kernel/printk.c
1342 @@ -2485,7 +2485,7 @@ void wake_up_klogd(void)
1343 preempt_enable();
1344 }
1345
1346 -int printk_sched(const char *fmt, ...)
1347 +int printk_deferred(const char *fmt, ...)
1348 {
1349 unsigned long flags;
1350 va_list args;
1351 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1352 index 2672eca82a2b..c771f2547bef 100644
1353 --- a/kernel/sched/core.c
1354 +++ b/kernel/sched/core.c
1355 @@ -1235,7 +1235,7 @@ out:
1356 * leave kernel.
1357 */
1358 if (p->mm && printk_ratelimit()) {
1359 - printk_sched("process %d (%s) no longer affine to cpu%d\n",
1360 + printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1361 task_pid_nr(p), p->comm, cpu);
1362 }
1363 }
1364 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
1365 index 15334e6de832..2dffc7b5d469 100644
1366 --- a/kernel/sched/rt.c
1367 +++ b/kernel/sched/rt.c
1368 @@ -892,7 +892,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
1369
1370 if (!once) {
1371 once = true;
1372 - printk_sched("sched: RT throttling activated\n");
1373 + printk_deferred("sched: RT throttling activated\n");
1374 }
1375 } else {
1376 /*
1377 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
1378 index 9df0e3b19f09..58e8430165b5 100644
1379 --- a/kernel/time/clockevents.c
1380 +++ b/kernel/time/clockevents.c
1381 @@ -138,7 +138,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
1382 {
1383 /* Nothing to do if we already reached the limit */
1384 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
1385 - printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
1386 + printk_deferred(KERN_WARNING
1387 + "CE: Reprogramming failure. Giving up\n");
1388 dev->next_event.tv64 = KTIME_MAX;
1389 return -ETIME;
1390 }
1391 @@ -151,9 +152,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
1392 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
1393 dev->min_delta_ns = MIN_DELTA_LIMIT;
1394
1395 - printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
1396 - dev->name ? dev->name : "?",
1397 - (unsigned long long) dev->min_delta_ns);
1398 + printk_deferred(KERN_WARNING
1399 + "CE: %s increased min_delta_ns to %llu nsec\n",
1400 + dev->name ? dev->name : "?",
1401 + (unsigned long long) dev->min_delta_ns);
1402 return 0;
1403 }
1404
1405 diff --git a/lib/btree.c b/lib/btree.c
1406 index f9a484676cb6..4264871ea1a0 100644
1407 --- a/lib/btree.c
1408 +++ b/lib/btree.c
1409 @@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
1410
1411 void btree_destroy(struct btree_head *head)
1412 {
1413 + mempool_free(head->node, head->mempool);
1414 mempool_destroy(head->mempool);
1415 head->mempool = NULL;
1416 }
1417 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1418 index 0ab02fb8e9b1..71305c6aba5b 100644
1419 --- a/mm/page_alloc.c
1420 +++ b/mm/page_alloc.c
1421 @@ -2339,7 +2339,7 @@ static inline int
1422 gfp_to_alloc_flags(gfp_t gfp_mask)
1423 {
1424 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1425 - const gfp_t wait = gfp_mask & __GFP_WAIT;
1426 + const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
1427
1428 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1429 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
1430 @@ -2348,20 +2348,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1431 * The caller may dip into page reserves a bit more if the caller
1432 * cannot run direct reclaim, or if the caller has realtime scheduling
1433 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1434 - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1435 + * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
1436 */
1437 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
1438
1439 - if (!wait) {
1440 + if (atomic) {
1441 /*
1442 - * Not worth trying to allocate harder for
1443 - * __GFP_NOMEMALLOC even if it can't schedule.
1444 + * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
1445 + * if it can't schedule.
1446 */
1447 - if (!(gfp_mask & __GFP_NOMEMALLOC))
1448 + if (!(gfp_mask & __GFP_NOMEMALLOC))
1449 alloc_flags |= ALLOC_HARDER;
1450 /*
1451 - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1452 - * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1453 + * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
1454 + * comment for __cpuset_node_allowed_softwall().
1455 */
1456 alloc_flags &= ~ALLOC_CPUSET;
1457 } else if (unlikely(rt_task(current)) && !in_interrupt())
1458 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1459 index 9a0e5874e73e..164fa9dcd97d 100644
1460 --- a/net/l2tp/l2tp_ppp.c
1461 +++ b/net/l2tp/l2tp_ppp.c
1462 @@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1463 int err;
1464
1465 if (level != SOL_PPPOL2TP)
1466 - return udp_prot.setsockopt(sk, level, optname, optval, optlen);
1467 + return -EINVAL;
1468
1469 if (optlen < sizeof(int))
1470 return -EINVAL;
1471 @@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1472 struct pppol2tp_session *ps;
1473
1474 if (level != SOL_PPPOL2TP)
1475 - return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1476 + return -EINVAL;
1477
1478 if (get_user(len, optlen))
1479 return -EFAULT;
1480 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1481 index d566cdba24ec..10eea2326022 100644
1482 --- a/net/mac80211/tx.c
1483 +++ b/net/mac80211/tx.c
1484 @@ -398,6 +398,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
1485 if (ieee80211_has_order(hdr->frame_control))
1486 return TX_CONTINUE;
1487
1488 + if (ieee80211_is_probe_req(hdr->frame_control))
1489 + return TX_CONTINUE;
1490 +
1491 /* no stations in PS mode */
1492 if (!atomic_read(&ps->num_sta_ps))
1493 return TX_CONTINUE;
1494 @@ -447,6 +450,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1495 {
1496 struct sta_info *sta = tx->sta;
1497 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1498 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1499 struct ieee80211_local *local = tx->local;
1500
1501 if (unlikely(!sta))
1502 @@ -457,6 +461,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1503 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
1504 int ac = skb_get_queue_mapping(tx->skb);
1505
1506 + /* only deauth, disassoc and action are bufferable MMPDUs */
1507 + if (ieee80211_is_mgmt(hdr->frame_control) &&
1508 + !ieee80211_is_deauth(hdr->frame_control) &&
1509 + !ieee80211_is_disassoc(hdr->frame_control) &&
1510 + !ieee80211_is_action(hdr->frame_control)) {
1511 + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1512 + return TX_CONTINUE;
1513 + }
1514 +
1515 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
1516 sta->sta.addr, sta->sta.aid, ac);
1517 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
1518 @@ -514,22 +527,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1519 static ieee80211_tx_result debug_noinline
1520 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
1521 {
1522 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1523 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1524 -
1525 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
1526 return TX_CONTINUE;
1527 -
1528 - /* only deauth, disassoc and action are bufferable MMPDUs */
1529 - if (ieee80211_is_mgmt(hdr->frame_control) &&
1530 - !ieee80211_is_deauth(hdr->frame_control) &&
1531 - !ieee80211_is_disassoc(hdr->frame_control) &&
1532 - !ieee80211_is_action(hdr->frame_control)) {
1533 - if (tx->flags & IEEE80211_TX_UNICAST)
1534 - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1535 - return TX_CONTINUE;
1536 - }
1537 -
1538 if (tx->flags & IEEE80211_TX_UNICAST)
1539 return ieee80211_tx_h_unicast_ps_buf(tx);
1540 else
1541 diff --git a/net/wireless/trace.h b/net/wireless/trace.h
1542 index 5755bc14abbd..bc5a75b1aef8 100644
1543 --- a/net/wireless/trace.h
1544 +++ b/net/wireless/trace.h
1545 @@ -1972,7 +1972,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
1546 MAC_ASSIGN(addr, addr);
1547 __entry->key_type = key_type;
1548 __entry->key_id = key_id;
1549 - memcpy(__entry->tsc, tsc, 6);
1550 + if (tsc)
1551 + memcpy(__entry->tsc, tsc, 6);
1552 ),
1553 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
1554 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,