Magellan Linux

Diff of /trunk/kernel-magellan/patches-3.15/0108-3.15.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 2493 by niro, Thu Sep 4 19:55:25 2014 UTC revision 2496 by niro, Thu Sep 4 20:16:04 2014 UTC
# Line 1  Line 1 
1    diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
2    index c584a51add15..afe68ddbe6a4 100644
3    --- a/Documentation/x86/x86_64/mm.txt
4    +++ b/Documentation/x86/x86_64/mm.txt
5    @@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
6     ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
7     ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
8     ... unused hole ...
9    +ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
10    +... unused hole ...
11     ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
12     ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
13     ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
14  diff --git a/Makefile b/Makefile  diff --git a/Makefile b/Makefile
15  index 25b85aba1e2e..76b75f7b8485 100644  index d5d9a22a404a..25b85aba1e2e 100644
16  --- a/Makefile  --- a/Makefile
17  +++ b/Makefile  +++ b/Makefile
18  @@ -1,6 +1,6 @@  @@ -1,6 +1,6 @@
19   VERSION = 3   VERSION = 3
20   PATCHLEVEL = 15   PATCHLEVEL = 15
21  -SUBLEVEL = 9  -SUBLEVEL = 8
22  +SUBLEVEL = 10  +SUBLEVEL = 9
23   EXTRAVERSION =   EXTRAVERSION =
24   NAME = Double Funky Skunk   NAME = Double Funky Skunk
25    
26  diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h  diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
27  index 3c3c89f52643..7f9bab26a499 100644  index 5babba0a3a75..904dcf5973f3 100644
28  --- a/arch/sparc/include/asm/tlbflush_64.h  --- a/arch/arm/boot/dts/dra7-evm.dts
29  +++ b/arch/sparc/include/asm/tlbflush_64.h  +++ b/arch/arm/boot/dts/dra7-evm.dts
30  @@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,  @@ -182,6 +182,7 @@
31   {   regulator-name = "ldo3";
32     regulator-min-microvolt = <1800000>;
33     regulator-max-microvolt = <1800000>;
34    + regulator-always-on;
35     regulator-boot-on;
36     };
37    
38    diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
39    index ab1116d086be..83a5b8685bd9 100644
40    --- a/arch/arm/boot/dts/hi3620.dtsi
41    +++ b/arch/arm/boot/dts/hi3620.dtsi
42    @@ -73,7 +73,7 @@
43    
44     L2: l2-cache {
45     compatible = "arm,pl310-cache";
46    - reg = <0xfc10000 0x100000>;
47    + reg = <0x100000 0x100000>;
48     interrupts = <0 15 4>;
49     cache-unified;
50     cache-level = <2>;
51    diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
52    index 4522366da759..15468fbbdea3 100644
53    --- a/arch/arm/crypto/aesbs-glue.c
54    +++ b/arch/arm/crypto/aesbs-glue.c
55    @@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
56     dst += AES_BLOCK_SIZE;
57     } while (--blocks);
58     }
59    - err = blkcipher_walk_done(desc, &walk, 0);
60    + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
61     }
62     return err;
63   }   }
64    @@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
65  +void flush_tlb_kernel_range(unsigned long start, unsigned long end);   bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
66      walk.nbytes, &ctx->dec, walk.iv);
67     kernel_neon_end();
68    - err = blkcipher_walk_done(desc, &walk, 0);
69    + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
70     }
71     while (walk.nbytes) {
72     u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
73    @@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
74     dst += AES_BLOCK_SIZE;
75     src += AES_BLOCK_SIZE;
76     } while (--blocks);
77    - err = blkcipher_walk_done(desc, &walk, 0);
78    + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
79     }
80     return err;
81     }
82    @@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
83     bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
84      walk.nbytes, &ctx->enc, walk.iv);
85     kernel_neon_end();
86    - err = blkcipher_walk_done(desc, &walk, 0);
87    + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
88     }
89     return err;
90     }
91    @@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
92     bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
93      walk.nbytes, &ctx->dec, walk.iv);
94     kernel_neon_end();
95    - err = blkcipher_walk_done(desc, &walk, 0);
96    + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
97     }
98     return err;
99     }
100    diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
101    index 17cd39360afe..93914d220069 100644
102    --- a/arch/arm/mach-omap2/gpmc-nand.c
103    +++ b/arch/arm/mach-omap2/gpmc-nand.c
104    @@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
105     soc_is_omap54xx() || soc_is_dra7xx())
106     return 1;
107    
108    + if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
109    + ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
110    + if (cpu_is_omap24xx())
111    + return 0;
112    + else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
113    + return 0;
114    + else
115    + return 1;
116    + }
117  +  +
118   #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE   /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
119     * which require H/W based ECC error detection */
120     if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
121    @@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
122     (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
123     return 0;
124    
125   extern void flush_tlb_pending(void);  - /*
126  @@ -48,11 +50,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);  - * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
127    - * and AM33xx derivates. Other chips may be added if confirmed to work.
128    - */
129    - if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
130    -    (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
131    - return 0;
132    -
133     /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
134     if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
135     return 1;
136    diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
137    index 8e0e52eb76b5..d7a0ee898d24 100644
138    --- a/arch/arm/mm/idmap.c
139    +++ b/arch/arm/mm/idmap.c
140    @@ -25,6 +25,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
141     pr_warning("Failed to allocate identity pmd.\n");
142     return;
143     }
144    + /*
145    + * Copy the original PMD to ensure that the PMD entries for
146    + * the kernel image are preserved.
147    + */
148    + if (!pud_none(*pud))
149    + memcpy(pmd, pmd_offset(pud, 0),
150    +       PTRS_PER_PMD * sizeof(pmd_t));
151     pud_populate(&init_mm, pud, pmd);
152     pmd += pmd_index(addr);
153     } else
154    diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
155    index b68c6b22e1c8..f15c22e8bcd5 100644
156    --- a/arch/arm/mm/mmu.c
157    +++ b/arch/arm/mm/mmu.c
158    @@ -1436,8 +1436,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
159     return;
160    
161   #ifndef CONFIG_SMP   /* remap kernel code and data */
162    - map_start = init_mm.start_code;
163    - map_end   = init_mm.brk;
164    + map_start = init_mm.start_code & PMD_MASK;
165    + map_end   = ALIGN(init_mm.brk, PMD_SIZE);
166    
167     /* get a handle on things... */
168     pgd0 = pgd_offset_k(0);
169    @@ -1472,7 +1472,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
170     }
171    
172  -#define flush_tlb_kernel_range(start,end) \   /* remap pmds for kernel mapping */
173  -do { flush_tsb_kernel_range(start,end); \  - phys = __pa(map_start) & PMD_MASK;
174  - __flush_tlb_kernel_range(start,end); \  + phys = __pa(map_start);
175  -} while (0)   do {
176  -   *pmdk++ = __pmd(phys | pmdprot);
177   static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)   phys += PMD_SIZE;
178   {  diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
179   __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);  index 6b0641c3f03f..fe52db2eea6a 100644
180  @@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad  --- a/arch/powerpc/perf/core-book3s.c
181   extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);  +++ b/arch/powerpc/perf/core-book3s.c
182   extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);  @@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
183      out_enable:
184  -#define flush_tlb_kernel_range(start, end) \   pmao_restore_workaround(ebb);
185  -do { flush_tsb_kernel_range(start,end); \  
186  - smp_flush_tlb_kernel_range(start, end); \  + if (ppmu->flags & PPMU_ARCH_207S)
187  -} while (0)  + mtspr(SPRN_MMCR2, 0);
188    +
189     mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
190    
191     mb();
192    @@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
193    
194     write_mmcr0(cpuhw, mmcr0);
195    
196    - if (ppmu->flags & PPMU_ARCH_207S)
197    - mtspr(SPRN_MMCR2, 0);
198  -  -
199   #define global_flush_tlb_page(mm, vaddr) \   /*
200   smp_flush_tlb_page(mm, vaddr)   * Enable instruction sampling if necessary
201     */
202    diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
203    index 512e45f0c204..1dd140824b49 100644
204    --- a/arch/x86/Kconfig
205    +++ b/arch/x86/Kconfig
206    @@ -910,10 +910,27 @@ config VM86
207     default y
208     depends on X86_32
209     ---help---
210    -  This option is required by programs like DOSEMU to run 16-bit legacy
211    -  code on X86 processors. It also may be needed by software like
212    -  XFree86 to initialize some video cards via BIOS. Disabling this
213    -  option saves about 6k.
214    +  This option is required by programs like DOSEMU to run
215    +  16-bit real mode legacy code on x86 processors. It also may
216    +  be needed by software like XFree86 to initialize some video
217    +  cards via BIOS. Disabling this option saves about 6K.
218    +
219    +config X86_16BIT
220    + bool "Enable support for 16-bit segments" if EXPERT
221    + default y
222    + ---help---
223    +  This option is required by programs like Wine to run 16-bit
224    +  protected mode legacy code on x86 processors.  Disabling
225    +  this option saves about 300 bytes on i386, or around 6K text
226    +  plus 16K runtime memory on x86-64,
227    +
228    +config X86_ESPFIX32
229    + def_bool y
230    + depends on X86_16BIT && X86_32
231    +
232    +config X86_ESPFIX64
233    + def_bool y
234    + depends on X86_16BIT && X86_64
235    
236     config TOSHIBA
237     tristate "Toshiba Laptop support"
238    diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
239    new file mode 100644
240    index 000000000000..99efebb2f69d
241    --- /dev/null
242    +++ b/arch/x86/include/asm/espfix.h
243    @@ -0,0 +1,16 @@
244    +#ifndef _ASM_X86_ESPFIX_H
245    +#define _ASM_X86_ESPFIX_H
246    +
247    +#ifdef CONFIG_X86_64
248    +
249    +#include <asm/percpu.h>
250    +
251    +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
252    +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
253    +
254    +extern void init_espfix_bsp(void);
255    +extern void init_espfix_ap(void);
256    +
257    +#endif /* CONFIG_X86_64 */
258    +
259    +#endif /* _ASM_X86_ESPFIX_H */
260    diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
261    index bba3cf88e624..0a8b519226b8 100644
262    --- a/arch/x86/include/asm/irqflags.h
263    +++ b/arch/x86/include/asm/irqflags.h
264    @@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
265    
266     #define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
267    
268    -#define INTERRUPT_RETURN iretq
269    +#define INTERRUPT_RETURN jmp native_iret
270     #define USERGS_SYSRET64 \
271     swapgs; \
272     sysretq;
273    diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
274    index c883bf726398..7166e25ecb57 100644
275    --- a/arch/x86/include/asm/pgtable_64_types.h
276    +++ b/arch/x86/include/asm/pgtable_64_types.h
277    @@ -61,6 +61,8 @@ typedef struct { pteval_t pte; } pte_t;
278     #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
279     #define MODULES_END      _AC(0xffffffffff000000, UL)
280     #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
281    +#define ESPFIX_PGD_ENTRY _AC(-2, UL)
282    +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
283    
284     #define EARLY_DYNAMIC_PAGE_TABLES 64
285    
286    diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
287    index 9264f04a4c55..ff4e7b236e21 100644
288    --- a/arch/x86/include/asm/setup.h
289    +++ b/arch/x86/include/asm/setup.h
290    @@ -59,6 +59,8 @@ static inline void x86_ce4100_early_setup(void) { }
291    
292  diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h   #ifndef _SETUP
 index b73274fb961a..42f2bca1d338 100644  
 --- a/arch/sparc/include/uapi/asm/unistd.h  
 +++ b/arch/sparc/include/uapi/asm/unistd.h  
 @@ -410,8 +410,9 @@  
  #define __NR_finit_module 342  
  #define __NR_sched_setattr 343  
  #define __NR_sched_getattr 344  
 +#define __NR_renameat2 345  
   
 -#define NR_syscalls 345  
 +#define NR_syscalls 346  
   
  /* Bitmask values returned from kern_features system call.  */  
  #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001  
 diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c  
 index e01d75d40329..66dacd56bb10 100644  
 --- a/arch/sparc/kernel/ldc.c  
 +++ b/arch/sparc/kernel/ldc.c  
 @@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)  
  if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||  
     !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||  
     lp->hs_state != LDC_HS_OPEN)  
 - err = -EINVAL;  
 + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);  
  else  
  err = start_handshake(lp);  
293    
294  diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S  +#include <asm/espfix.h>
 index d066eb18650c..f834224208ed 100644  
 --- a/arch/sparc/kernel/sys32.S  
 +++ b/arch/sparc/kernel/sys32.S  
 @@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)  
  SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)  
  SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)  
  SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)  
 +SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)  
   
  .globl sys32_mmap2  
  sys32_mmap2:  
 diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S  
 index 151ace8766cc..85fe9b1087cd 100644  
 --- a/arch/sparc/kernel/systbls_32.S  
 +++ b/arch/sparc/kernel/systbls_32.S  
 @@ -86,3 +86,4 @@ sys_call_table:  
  /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime  
  /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev  
  /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr  
 +/*345*/ .long sys_renameat2  
 diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S  
 index 4bd4e2bb26cf..33ecba2826ea 100644  
 --- a/arch/sparc/kernel/systbls_64.S  
 +++ b/arch/sparc/kernel/systbls_64.S  
 @@ -87,6 +87,7 @@ sys_call_table32:  
  /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime  
  .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev  
  /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr  
 + .word sys32_renameat2  
   
  #endif /* CONFIG_COMPAT */  
   
 @@ -165,3 +166,4 @@ sys_call_table:  
  /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime  
  .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev  
  /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr  
 + .word sys_renameat2  
 diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c  
 index aa4d55b0bdf0..5ce8f2f64604 100644  
 --- a/arch/sparc/math-emu/math_32.c  
 +++ b/arch/sparc/math-emu/math_32.c  
 @@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)  
  case 0: fsr = *pfsr;  
  if (IR == -1) IR = 2;  
  /* fcc is always fcc0 */  
 - fsr &= ~0xc00; fsr |= (IR << 10); break;  
 + fsr &= ~0xc00; fsr |= (IR << 10);  
  *pfsr = fsr;  
  break;  
  case 1: rd->s = IR; break;  
 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c  
 index ed3c969a5f4c..96862241b342 100644  
 --- a/arch/sparc/mm/init_64.c  
 +++ b/arch/sparc/mm/init_64.c  
 @@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *  
   
  mm = vma->vm_mm;  
   
 + /* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */  
 + if (!pte_accessible(mm, pte))  
 + return;  
 +  
  spin_lock_irqsave(&mm->context.lock, flags);  
   
  #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)  
 @@ -2614,6 +2618,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,  
   
  pte = pmd_val(entry);  
   
 + /* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */  
 + if (!(pte & _PAGE_VALID))  
 + return;  
295  +  +
296   /* We are fabricating 8MB pages using 4MB real hw pages.  */   /*
297   pte |= (addr & (1UL << REAL_HPAGE_SHIFT));    * This is set up by the setup-routine at boot-time
298      */
299    diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
300    index f4d96000d33a..491ef3e59850 100644
301    --- a/arch/x86/kernel/Makefile
302    +++ b/arch/x86/kernel/Makefile
303    @@ -29,6 +29,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
304     obj-y += syscall_$(BITS).o vsyscall_gtod.o
305     obj-$(CONFIG_X86_64) += vsyscall_64.o
306     obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
307    +obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
308     obj-$(CONFIG_SYSFS) += ksysfs.o
309     obj-y += bootflag.o e820.o
310     obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
311    diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
312    index c87810b1b557..c5a9cb94dee6 100644
313    --- a/arch/x86/kernel/entry_32.S
314    +++ b/arch/x86/kernel/entry_32.S
315    @@ -529,6 +529,7 @@ syscall_exit:
316     restore_all:
317     TRACE_IRQS_IRET
318     restore_all_notrace:
319    +#ifdef CONFIG_X86_ESPFIX32
320     movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
321     # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
322     # are returning to the kernel.
323    @@ -539,6 +540,7 @@ restore_all_notrace:
324     cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
325     CFI_REMEMBER_STATE
326     je ldt_ss # returning to user-space with LDT SS
327    +#endif
328     restore_nocheck:
329     RESTORE_REGS 4 # skip orig_eax/error_code
330     irq_return:
331    @@ -551,6 +553,7 @@ ENTRY(iret_exc)
332     .previous
333     _ASM_EXTABLE(irq_return,iret_exc)
334    
335    +#ifdef CONFIG_X86_ESPFIX32
336     CFI_RESTORE_STATE
337     ldt_ss:
338     #ifdef CONFIG_PARAVIRT
339    @@ -594,6 +597,7 @@ ldt_ss:
340     lss (%esp), %esp /* switch to espfix segment */
341     CFI_ADJUST_CFA_OFFSET -8
342     jmp restore_nocheck
343    +#endif
344     CFI_ENDPROC
345     ENDPROC(system_call)
346    
347  @@ -2694,3 +2702,26 @@ void hugetlb_setup(struct pt_regs *regs)  @@ -706,6 +710,7 @@ END(syscall_badsys)
348   }    * the high word of the segment base from the GDT and swiches to the
349   }    * normal stack and adjusts ESP with the matching offset.
350      */
351    +#ifdef CONFIG_X86_ESPFIX32
352     /* fixup the stack */
353     mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
354     mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
355    @@ -715,8 +720,10 @@ END(syscall_badsys)
356     pushl_cfi %eax
357     lss (%esp), %esp /* switch to the normal stack segment */
358     CFI_ADJUST_CFA_OFFSET -8
359    +#endif
360     .endm
361     .macro UNWIND_ESPFIX_STACK
362    +#ifdef CONFIG_X86_ESPFIX32
363     movl %ss, %eax
364     /* see if on espfix stack */
365     cmpw $__ESPFIX_SS, %ax
366    @@ -727,6 +734,7 @@ END(syscall_badsys)
367     /* switch to normal stack */
368     FIXUP_ESPFIX_STACK
369     27:
370    +#endif
371     .endm
372    
373     /*
374    @@ -1357,11 +1365,13 @@ END(debug)
375     ENTRY(nmi)
376     RING0_INT_FRAME
377     ASM_CLAC
378    +#ifdef CONFIG_X86_ESPFIX32
379     pushl_cfi %eax
380     movl %ss, %eax
381     cmpw $__ESPFIX_SS, %ax
382     popl_cfi %eax
383     je nmi_espfix_stack
384    +#endif
385     cmpl $ia32_sysenter_target,(%esp)
386     je nmi_stack_fixup
387     pushl_cfi %eax
388    @@ -1401,6 +1411,7 @@ nmi_debug_stack_check:
389     FIX_STACK 24, nmi_stack_correct, 1
390     jmp nmi_stack_correct
391    
392    +#ifdef CONFIG_X86_ESPFIX32
393     nmi_espfix_stack:
394     /* We have a RING0_INT_FRAME here.
395     *
396    @@ -1422,6 +1433,7 @@ nmi_espfix_stack:
397     lss 12+4(%esp), %esp # back to espfix stack
398     CFI_ADJUST_CFA_OFFSET -24
399     jmp irq_return
400    +#endif
401     CFI_ENDPROC
402     END(nmi)
403    
404    diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
405    index 1e96c3628bf2..03cd2a8f6009 100644
406    --- a/arch/x86/kernel/entry_64.S
407    +++ b/arch/x86/kernel/entry_64.S
408    @@ -58,6 +58,7 @@
409     #include <asm/asm.h>
410     #include <asm/context_tracking.h>
411     #include <asm/smap.h>
412    +#include <asm/pgtable_types.h>
413     #include <linux/err.h>
414    
415     /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
416    @@ -1041,12 +1042,45 @@ restore_args:
417    
418     irq_return:
419     INTERRUPT_RETURN
420    - _ASM_EXTABLE(irq_return, bad_iret)
421    
422    -#ifdef CONFIG_PARAVIRT
423     ENTRY(native_iret)
424    + /*
425    + * Are we returning to a stack segment from the LDT?  Note: in
426    + * 64-bit mode SS:RSP on the exception stack is always valid.
427    + */
428    +#ifdef CONFIG_X86_ESPFIX64
429    + testb $4,(SS-RIP)(%rsp)
430    + jnz native_irq_return_ldt
431    +#endif
432    +
433    +native_irq_return_iret:
434     iretq
435    - _ASM_EXTABLE(native_iret, bad_iret)
436    + _ASM_EXTABLE(native_irq_return_iret, bad_iret)
437    +
438    +#ifdef CONFIG_X86_ESPFIX64
439    +native_irq_return_ldt:
440    + pushq_cfi %rax
441    + pushq_cfi %rdi
442    + SWAPGS
443    + movq PER_CPU_VAR(espfix_waddr),%rdi
444    + movq %rax,(0*8)(%rdi) /* RAX */
445    + movq (2*8)(%rsp),%rax /* RIP */
446    + movq %rax,(1*8)(%rdi)
447    + movq (3*8)(%rsp),%rax /* CS */
448    + movq %rax,(2*8)(%rdi)
449    + movq (4*8)(%rsp),%rax /* RFLAGS */
450    + movq %rax,(3*8)(%rdi)
451    + movq (6*8)(%rsp),%rax /* SS */
452    + movq %rax,(5*8)(%rdi)
453    + movq (5*8)(%rsp),%rax /* RSP */
454    + movq %rax,(4*8)(%rdi)
455    + andl $0xffff0000,%eax
456    + popq_cfi %rdi
457    + orq PER_CPU_VAR(espfix_stack),%rax
458    + SWAPGS
459    + movq %rax,%rsp
460    + popq_cfi %rax
461    + jmp native_irq_return_iret
462   #endif   #endif
463    
464     .section .fixup,"ax"
465    @@ -1110,9 +1144,40 @@ ENTRY(retint_kernel)
466     call preempt_schedule_irq
467     jmp exit_intr
468     #endif
469    -
470     CFI_ENDPROC
471     END(common_interrupt)
472  +  +
473  +#ifdef CONFIG_SMP  + /*
474  +#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range  + * If IRET takes a fault on the espfix stack, then we
475    + * end up promoting it to a doublefault.  In that case,
476    + * modify the stack to make it look like we just entered
477    + * the #GP handler from user space, similar to bad_iret.
478    + */
479    +#ifdef CONFIG_X86_ESPFIX64
480    + ALIGN
481    +__do_double_fault:
482    + XCPT_FRAME 1 RDI+8
483    + movq RSP(%rdi),%rax /* Trap on the espfix stack? */
484    + sarq $PGDIR_SHIFT,%rax
485    + cmpl $ESPFIX_PGD_ENTRY,%eax
486    + jne do_double_fault /* No, just deliver the fault */
487    + cmpl $__KERNEL_CS,CS(%rdi)
488    + jne do_double_fault
489    + movq RIP(%rdi),%rax
490    + cmpq $native_irq_return_iret,%rax
491    + jne do_double_fault /* This shouldn't happen... */
492    + movq PER_CPU_VAR(kernel_stack),%rax
493    + subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
494    + movq %rax,RSP(%rdi)
495    + movq $0,(%rax) /* Missing (lost) #GP error code */
496    + movq $general_protection,RIP(%rdi)
497    + retq
498    + CFI_ENDPROC
499    +END(__do_double_fault)
500  +#else  +#else
501  +#define do_flush_tlb_kernel_range __flush_tlb_kernel_range  +# define __do_double_fault do_double_fault
502  +#endif  +#endif
503  +  +
504  +void flush_tlb_kernel_range(unsigned long start, unsigned long end)   /*
505      * End of kprobes section
506      */
507    @@ -1314,7 +1379,7 @@ zeroentry overflow do_overflow
508     zeroentry bounds do_bounds
509     zeroentry invalid_op do_invalid_op
510     zeroentry device_not_available do_device_not_available
511    -paranoiderrorentry double_fault do_double_fault
512    +paranoiderrorentry double_fault __do_double_fault
513     zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
514     errorentry invalid_TSS do_invalid_TSS
515     errorentry segment_not_present do_segment_not_present
516    @@ -1601,7 +1666,7 @@ error_sti:
517      */
518     error_kernelspace:
519     incl %ebx
520    - leaq irq_return(%rip),%rcx
521    + leaq native_irq_return_iret(%rip),%rcx
522     cmpq %rcx,RIP+8(%rsp)
523     je error_swapgs
524     movl %ecx,%eax /* zero extend */
525    diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
526    new file mode 100644
527    index 000000000000..94d857fb1033
528    --- /dev/null
529    +++ b/arch/x86/kernel/espfix_64.c
530    @@ -0,0 +1,208 @@
531    +/* ----------------------------------------------------------------------- *
532    + *
533    + *   Copyright 2014 Intel Corporation; author: H. Peter Anvin
534    + *
535    + *   This program is free software; you can redistribute it and/or modify it
536    + *   under the terms and conditions of the GNU General Public License,
537    + *   version 2, as published by the Free Software Foundation.
538    + *
539    + *   This program is distributed in the hope it will be useful, but WITHOUT
540    + *   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
541    + *   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
542    + *   more details.
543    + *
544    + * ----------------------------------------------------------------------- */
545    +
546    +/*
547    + * The IRET instruction, when returning to a 16-bit segment, only
548    + * restores the bottom 16 bits of the user space stack pointer.  This
549    + * causes some 16-bit software to break, but it also leaks kernel state
550    + * to user space.
551    + *
552    + * This works around this by creating percpu "ministacks", each of which
553    + * is mapped 2^16 times 64K apart.  When we detect that the return SS is
554    + * on the LDT, we copy the IRET frame to the ministack and use the
555    + * relevant alias to return to userspace.  The ministacks are mapped
556    + * readonly, so if the IRET fault we promote #GP to #DF which is an IST
557    + * vector and thus has its own stack; we then do the fixup in the #DF
558    + * handler.
559    + *
560    + * This file sets up the ministacks and the related page tables.  The
561    + * actual ministack invocation is in entry_64.S.
562    + */
563    +
564    +#include <linux/init.h>
565    +#include <linux/init_task.h>
566    +#include <linux/kernel.h>
567    +#include <linux/percpu.h>
568    +#include <linux/gfp.h>
569    +#include <linux/random.h>
570    +#include <asm/pgtable.h>
571    +#include <asm/pgalloc.h>
572    +#include <asm/setup.h>
573    +#include <asm/espfix.h>
574    +
575    +/*
576    + * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
577    + * it up to a cache line to avoid unnecessary sharing.
578    + */
579    +#define ESPFIX_STACK_SIZE (8*8UL)
580    +#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
581    +
582    +/* There is address space for how many espfix pages? */
583    +#define ESPFIX_PAGE_SPACE (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
584    +
585    +#define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
586    +#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
587    +# error "Need more than one PGD for the ESPFIX hack"
588    +#endif
589    +
590    +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
591    +
592    +/* This contains the *bottom* address of the espfix stack */
593    +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
594    +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
595    +
596    +/* Initialization mutex - should this be a spinlock? */
597    +static DEFINE_MUTEX(espfix_init_mutex);
598    +
599    +/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
600    +#define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
601    +static void *espfix_pages[ESPFIX_MAX_PAGES];
602    +
603    +static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
604    + __aligned(PAGE_SIZE);
605    +
606    +static unsigned int page_random, slot_random;
607    +
608    +/*
609    + * This returns the bottom address of the espfix stack for a specific CPU.
610    + * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
611    + * we have to account for some amount of padding at the end of each page.
612    + */
613    +static inline unsigned long espfix_base_addr(unsigned int cpu)
614  +{  +{
615  + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {  + unsigned long page, slot;
616  + if (start < LOW_OBP_ADDRESS) {  + unsigned long addr;
617  + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);  +
618  + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);  + page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
619  + }  + slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
620  + if (end > HI_OBP_ADDRESS) {  + addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
621  + flush_tsb_kernel_range(end, HI_OBP_ADDRESS);  + addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
622  + do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);  + addr += ESPFIX_BASE_ADDR;
623  + }  + return addr;
 + } else {  
 + flush_tsb_kernel_range(start, end);  
 + do_flush_tlb_kernel_range(start, end);  
 + }  
624  +}  +}
 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h  
 index 4d8f8aba0ea5..b87434c99f4d 100644  
 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h  
 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h  
 @@ -346,6 +346,7 @@ struct sw_tx_bd {  
  u8 flags;  
  /* Set on the first BD descriptor when there is a split BD */  
  #define BNX2X_TSO_SPLIT_BD (1<<0)  
 +#define BNX2X_HAS_SECOND_PBD (1<<1)  
  };  
   
  struct sw_rx_page {  
 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c  
 index 0979967577a1..b2b0d2e684ef 100644  
 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c  
 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c  
 @@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,  
  --nbd;  
  bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));  
   
 + if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {  
 + /* Skip second parse bd... */  
 + --nbd;  
 + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));  
 + }  
625  +  +
626   /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */  +#define PTE_STRIDE        (65536/PAGE_SIZE)
627   if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {  +#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
628   tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;  +#define ESPFIX_PMD_CLONES PTRS_PER_PMD
629  @@ -3877,6 +3883,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)  +#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
630   /* set encapsulation flag in start BD */  +
631   SET_FLAG(tx_start_bd->general_data,  +#define PGTABLE_PROT  ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
632   ETH_TX_START_BD_TUNNEL_EXIST, 1);  +
633  +  +static void init_espfix_random(void)
634  + tx_buf->flags |= BNX2X_HAS_SECOND_PBD;  +{
635  +  + unsigned long rand;
636   nbd++;  +
637   } else if (xmit_type & XMIT_CSUM) {  + /*
638   /* Set PBD in checksum offload case w/o encapsulation */  + * This is run before the entropy pools are initialized,
639  diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c  + * but this is hopefully better than nothing.
640  index 0966bd04375f..837224639148 100644  + */
641  --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c  + if (!arch_get_random_long(&rand)) {
642  +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c  + /* The constant is an arbitrary large prime */
643  @@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)  + rdtscll(rand);
644   goto out;  + rand *= 0xc345c6b72fd16123UL;
  }  
   
 + if (skb_padto(skb, ETH_ZLEN)) {  
 + ret = NETDEV_TX_OK;  
 + goto out;  
645  + }  + }
646  +  +
647   /* set the SKB transmit checksum */  + slot_random = rand % ESPFIX_STACKS_PER_PAGE;
648   if (priv->desc_64b_en) {  + page_random = (rand / ESPFIX_STACKS_PER_PAGE)
649   ret = bcmgenet_put_tx_csum(dev, skb);  + & (ESPFIX_PAGE_SPACE - 1);
650  diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c  +}
 index 675550fe8ee9..ac1ebe0374be 100644  
 --- a/drivers/net/ethernet/brocade/bna/bnad.c  
 +++ b/drivers/net/ethernet/brocade/bna/bnad.c  
 @@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)  
  prefetch(bnad->netdev);  
   
  cq = ccb->sw_q;  
 - cmpl = &cq[ccb->producer_index];  
   
  while (packets < budget) {  
 + cmpl = &cq[ccb->producer_index];  
  if (!cmpl->valid)  
  break;  
  /* The 'valid' field is set by the adapter, only after writing  
 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c  
 index 7eec598c5cb6..d650b911dae4 100644  
 --- a/drivers/net/macvlan.c  
 +++ b/drivers/net/macvlan.c  
 @@ -547,6 +547,7 @@ static int macvlan_init(struct net_device *dev)  
   (lowerdev->state & MACVLAN_STATE_MASK);  
  dev->features = lowerdev->features & MACVLAN_FEATURES;  
  dev->features |= ALWAYS_ON_FEATURES;  
 + dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;  
  dev->gso_max_size = lowerdev->gso_max_size;  
  dev->iflink = lowerdev->ifindex;  
  dev->hard_header_len = lowerdev->hard_header_len;  
 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c  
 index 4987a1c6dc52..b96c4a226624 100644  
 --- a/drivers/net/phy/phy_device.c  
 +++ b/drivers/net/phy/phy_device.c  
 @@ -354,7 +354,7 @@ int phy_device_register(struct phy_device *phydev)  
  phydev->bus->phy_map[phydev->addr] = phydev;  
   
  /* Run all of the fixups for this PHY */  
 - err = phy_init_hw(phydev);  
 + err = phy_scan_fixups(phydev);  
  if (err) {  
  pr_err("PHY %d failed to initialize\n", phydev->addr);  
  goto out;  
 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c  
 index 01805319e1e0..1aff970be33e 100644  
 --- a/drivers/net/ppp/pptp.c  
 +++ b/drivers/net/ppp/pptp.c  
 @@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)  
  nf_reset(skb);  
   
  skb->ip_summed = CHECKSUM_NONE;  
 - ip_select_ident(skb, &rt->dst, NULL);  
 + ip_select_ident(skb, NULL);  
  ip_send_check(iph);  
   
  ip_local_out(skb);  
 diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c  
 index 160e7510aca6..0787b9756165 100644  
 --- a/drivers/sbus/char/bbc_envctrl.c  
 +++ b/drivers/sbus/char/bbc_envctrl.c  
 @@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,  
  if (!tp)  
  return;  
   
 + INIT_LIST_HEAD(&tp->bp_list);  
 + INIT_LIST_HEAD(&tp->glob_list);  
651  +  +
652   tp->client = bbc_i2c_attach(bp, op);  +void __init init_espfix_bsp(void)
653   if (!tp->client) {  +{
654   kfree(tp);  + pgd_t *pgd_p;
655  @@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,  + pteval_t ptemask;
656   if (!fp)  +
657   return;  + ptemask = __supported_pte_mask;
658    +
659  + INIT_LIST_HEAD(&fp->bp_list);  + /* Install the espfix pud into the kernel page directory */
660  + INIT_LIST_HEAD(&fp->glob_list);  + pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
661    + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
662    +
663    + /* Randomize the locations */
664    + init_espfix_random();
665    +
666    + /* The rest is the same as for any other processor */
667    + init_espfix_ap();
668    +}
669  +  +
670   fp->client = bbc_i2c_attach(bp, op);  +void init_espfix_ap(void)
671   if (!fp->client) {  +{
672   kfree(fp);  + unsigned int cpu, page;
673  diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c  + unsigned long addr;
674  index c7763e482eb2..812b5f0361b6 100644  + pud_t pud, *pud_p;
675  --- a/drivers/sbus/char/bbc_i2c.c  + pmd_t pmd, *pmd_p;
676  +++ b/drivers/sbus/char/bbc_i2c.c  + pte_t pte, *pte_p;
677  @@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index  + int n;
678   if (!bp)  + void *stack_page;
679   return NULL;  + pteval_t ptemask;
680    +
681  + INIT_LIST_HEAD(&bp->temps);  + /* We only have to do this once... */
682  + INIT_LIST_HEAD(&bp->fans);  + if (likely(this_cpu_read(espfix_stack)))
683  +  + return; /* Already initialized */
684   bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");  +
685   if (!bp->i2c_control_regs)  + cpu = smp_processor_id();
686   goto fail;  + addr = espfix_base_addr(cpu);
687    + page = cpu/ESPFIX_STACKS_PER_PAGE;
688  - bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");  +
689  - if (!bp->i2c_bussel_reg)  + /* Did another CPU already set this up? */
690  - goto fail;  + stack_page = ACCESS_ONCE(espfix_pages[page]);
691  + if (op->num_resources == 2) {  + if (likely(stack_page))
692  + bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");  + goto done;
693  + if (!bp->i2c_bussel_reg)  +
694  + goto fail;  + mutex_lock(&espfix_init_mutex);
695    +
696    + /* Did we race on the lock? */
697    + stack_page = ACCESS_ONCE(espfix_pages[page]);
698    + if (stack_page)
699    + goto unlock_done;
700    +
701    + ptemask = __supported_pte_mask;
702    +
703    + pud_p = &espfix_pud_page[pud_index(addr)];
704    + pud = *pud_p;
705    + if (!pud_present(pud)) {
706    + pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
707    + pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
708    + paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
709    + for (n = 0; n < ESPFIX_PUD_CLONES; n++)
710    + set_pud(&pud_p[n], pud);
711  + }  + }
712    +
713   bp->waiting = 0;  + pmd_p = pmd_offset(&pud, addr);
714   init_waitqueue_head(&bp->wq);  + pmd = *pmd_p;
715  diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c  + if (!pmd_present(pmd)) {
716  index 2f57df9a71d9..a1e09c0d46f2 100644  + pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
717  --- a/drivers/tty/serial/sunsab.c  + pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
718  +++ b/drivers/tty/serial/sunsab.c  + paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
719  @@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,  + for (n = 0; n < ESPFIX_PMD_CLONES; n++)
720      (up->port.line == up->port.cons->index))  + set_pmd(&pmd_p[n], pmd);
  saw_console_brk = 1;  
   
 + if (count == 0) {  
 + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {  
 + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |  
 +     SAB82532_ISR0_FERR);  
 + up->port.icount.brk++;  
 + uart_handle_break(&up->port);  
 + }  
721  + }  + }
722  +  +
723   for (i = 0; i < count; i++) {  + pte_p = pte_offset_kernel(&pmd, addr);
724   unsigned char ch = buf[i], flag;  + stack_page = (void *)__get_free_page(GFP_KERNEL);
725    + pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
726  diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h  + for (n = 0; n < ESPFIX_PTE_CLONES; n++)
727  index 2c4004475e71..84e0deb95abd 100644  + set_pte(&pte_p[n*PTE_STRIDE], pte);
728  --- a/fs/xfs/xfs_log.h  +
729  +++ b/fs/xfs/xfs_log.h  + /* Job is done for this CPU and any CPU which shares this page */
730  @@ -24,7 +24,8 @@ struct xfs_log_vec {  + ACCESS_ONCE(espfix_pages[page]) = stack_page;
731   struct xfs_log_iovec *lv_iovecp; /* iovec array */  +
732   struct xfs_log_item *lv_item; /* owner */  +unlock_done:
733   char *lv_buf; /* formatted buffer */  + mutex_unlock(&espfix_init_mutex);
734  - int lv_buf_len; /* size of formatted buffer */  +done:
735  + int lv_bytes; /* accounted space in buffer */  + this_cpu_write(espfix_stack, addr);
736  + int lv_buf_len; /* aligned size of buffer */  + this_cpu_write(espfix_waddr, (unsigned long)stack_page
737   int lv_size; /* size of allocated lv */  +       + (addr & ~PAGE_MASK));
738   };  +}
739    diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
740  @@ -52,15 +53,21 @@ xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,  index dcbbaa165bde..c37886d759cc 100644
741   return vec->i_addr;  --- a/arch/x86/kernel/ldt.c
742   }  +++ b/arch/x86/kernel/ldt.c
743    @@ -20,8 +20,6 @@
744  +/*   #include <asm/mmu_context.h>
745  + * We need to make sure the next buffer is naturally aligned for the biggest   #include <asm/syscalls.h>
 + * basic data type we put into it.  We already accounted for this padding when  
 + * sizing the buffer.  
 + *  
 + * However, this padding does not get written into the log, and hence we have to  
 + * track the space used by the log vectors separately to prevent log space hangs  
 + * due to inaccurate accounting (i.e. a leak) of the used log space through the  
 + * CIL context ticket.  
 + */  
  static inline void  
  xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)  
  {  
 - /*  
 - * We need to make sure the next buffer is naturally aligned for the  
 - * biggest basic data type we put into it.  We already accounted for  
 - * this when sizing the buffer.  
 - */  
  lv->lv_buf_len += round_up(len, sizeof(uint64_t));  
 + lv->lv_bytes += len;  
  vec->i_len = len;  
  }  
746    
747  diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c  -int sysctl_ldt16 = 0;
748  index 7e5455391176..de835da6764d 100644  -
749  --- a/fs/xfs/xfs_log_cil.c   #ifdef CONFIG_SMP
750  +++ b/fs/xfs/xfs_log_cil.c   static void flush_ldt(void *current_mm)
 @@ -97,7 +97,7 @@ xfs_cil_prepare_item(  
751   {   {
752   /* Account for the new LV being passed in */  @@ -231,16 +229,10 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
753   if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {   }
 - *diff_len += lv->lv_buf_len;  
 + *diff_len += lv->lv_bytes;  
  *diff_iovecs += lv->lv_niovecs;  
754   }   }
755    
756  @@ -111,7 +111,7 @@ xfs_cil_prepare_item(  - /*
757   else if (old_lv != lv) {  - * On x86-64 we do not support 16-bit segments due to
758   ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);  - * IRET leaking the high bits of the kernel stack address.
759    - */
760  - *diff_len -= old_lv->lv_buf_len;  -#ifdef CONFIG_X86_64
761  + *diff_len -= old_lv->lv_bytes;  - if (!ldt_info.seg_32bit && !sysctl_ldt16) {
762   *diff_iovecs -= old_lv->lv_niovecs;  + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
763   kmem_free(old_lv);   error = -EINVAL;
764     goto out_unlock;
765   }   }
766  @@ -239,7 +239,7 @@ xlog_cil_insert_format_items(  -#endif
  * that the space reservation accounting is correct.  
  */  
  *diff_iovecs -= lv->lv_niovecs;  
 - *diff_len -= lv->lv_buf_len;  
 + *diff_len -= lv->lv_bytes;  
  } else {  
  /* allocate new data chunk */  
  lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);  
 @@ -259,6 +259,7 @@ xlog_cil_insert_format_items(  
   
  /* The allocated data region lies beyond the iovec region */  
  lv->lv_buf_len = 0;  
 + lv->lv_bytes = 0;  
  lv->lv_buf = (char *)lv + buf_size - nbytes;  
  ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));  
   
 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h  
 index 058271bde27a..823ec7bb9c67 100644  
 --- a/include/net/inetpeer.h  
 +++ b/include/net/inetpeer.h  
 @@ -41,14 +41,13 @@ struct inet_peer {  
  struct rcu_head     gc_rcu;  
  };  
  /*  
 - * Once inet_peer is queued for deletion (refcnt == -1), following fields  
 - * are not available: rid, ip_id_count  
 + * Once inet_peer is queued for deletion (refcnt == -1), following field  
 + * is not available: rid  
  * We can share memory with rcu_head to help keep inet_peer small.  
  */  
  union {  
  struct {  
  atomic_t rid; /* Frag reception counter */  
 - atomic_t ip_id_count; /* IP ID for the next packet */  
  };  
  struct rcu_head         rcu;  
  struct inet_peer *gc_next;  
 @@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);  
  void inetpeer_invalidate_tree(struct inet_peer_base *);  
767    
768   /*   fill_ldt(&ldt, &ldt_info);
769  - * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,   if (oldmode)
770  + * temporary check to make sure we dont access rid, tcp_ts,  diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
771    * tcp_ts_stamp if no refcount is taken on inet_peer  index 3f08f34f93eb..a1da6737ba5b 100644
772    */  --- a/arch/x86/kernel/paravirt_patch_64.c
773   static inline void inet_peer_refcheck(const struct inet_peer *p)  +++ b/arch/x86/kernel/paravirt_patch_64.c
774  @@ -173,13 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)  @@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
775   WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);   DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
776   }   DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
777     DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
778    -DEF_NATIVE(pv_cpu_ops, iret, "iretq");
779     DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
780     DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
781     DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
782    @@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
783     PATCH_SITE(pv_irq_ops, save_fl);
784     PATCH_SITE(pv_irq_ops, irq_enable);
785     PATCH_SITE(pv_irq_ops, irq_disable);
786    - PATCH_SITE(pv_cpu_ops, iret);
787     PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
788     PATCH_SITE(pv_cpu_ops, usergs_sysret32);
789     PATCH_SITE(pv_cpu_ops, usergs_sysret64);
790    diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
791    index ae2fd975b782..5492798930ef 100644
792    --- a/arch/x86/kernel/smpboot.c
793    +++ b/arch/x86/kernel/smpboot.c
794    @@ -244,6 +244,13 @@ static void notrace start_secondary(void *unused)
795     check_tsc_sync_target();
796    
797  -   /*
798  -/* can be called with or without local BH being disabled */  + * Enable the espfix hack for this CPU
799  -static inline int inet_getid(struct inet_peer *p, int more)  + */
800  -{  +#ifdef CONFIG_X86_ESPFIX64
801  - more++;  + init_espfix_ap();
802  - inet_peer_refcheck(p);  +#endif
803  - return atomic_add_return(more, &p->ip_id_count) - more;  +
804  -}  + /*
805  -   * We need to hold vector_lock so there the set of online cpus
806   #endif /* _NET_INETPEER_H */   * does not change while we are assigning vectors to cpus.  Holding
807  diff --git a/include/net/ip.h b/include/net/ip.h   * this lock ensures we don't half assign or remove an irq from a cpu.
808  index 3ec2b0fb9d83..54de0292ac53 100644  diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
809  --- a/include/net/ip.h  index 20621d753d5f..167ffcac16ed 100644
810  +++ b/include/net/ip.h  --- a/arch/x86/mm/dump_pagetables.c
811  @@ -310,9 +310,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)  +++ b/arch/x86/mm/dump_pagetables.c
812   }  @@ -30,12 +30,14 @@ struct pg_state {
813   }   unsigned long start_address;
814     unsigned long current_address;
815     const struct addr_marker *marker;
816    + unsigned long lines;
817     bool to_dmesg;
818     };
819    
820  -void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);   struct addr_marker {
821  +u32 ip_idents_reserve(u32 hash, int segs);   unsigned long start_address;
822  +void __ip_select_ident(struct iphdr *iph, int segs);   const char *name;
823    + unsigned long max_lines;
824     };
825    
826  -static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)   /* indices for address_markers; keep sync'd w/ address_markers below */
827  +static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)  @@ -46,6 +48,7 @@ enum address_markers_idx {
828     LOW_KERNEL_NR,
829     VMALLOC_START_NR,
830     VMEMMAP_START_NR,
831    + ESPFIX_START_NR,
832     HIGH_KERNEL_NR,
833     MODULES_VADDR_NR,
834     MODULES_END_NR,
835    @@ -68,6 +71,7 @@ static struct addr_marker address_markers[] = {
836     { PAGE_OFFSET, "Low Kernel Mapping" },
837     { VMALLOC_START,        "vmalloc() Area" },
838     { VMEMMAP_START,        "Vmemmap" },
839    + { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
840     { __START_KERNEL_map,   "High Kernel Mapping" },
841     { MODULES_VADDR,        "Modules" },
842     { MODULES_END,          "End Modules" },
843    @@ -182,7 +186,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
844          pgprot_t new_prot, int level)
845   {   {
846   struct iphdr *iph = ip_hdr(skb);   pgprotval_t prot, cur;
847    - static const char units[] = "KMGTPE";
848    + static const char units[] = "BKMGTPE";
849    
850  @@ -322,24 +323,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s   /*
851   * does not change, they drop every other packet in   * If we have a "break" in the series, we need to flush the state that
852   * a TCP stream using header compression.  @@ -197,6 +201,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
853     st->current_prot = new_prot;
854     st->level = level;
855     st->marker = address_markers;
856    + st->lines = 0;
857     pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
858       st->marker->name);
859     } else if (prot != cur || level != st->level ||
860    @@ -208,17 +213,24 @@ static void note_page(struct seq_file *m, struct pg_state *st,
861     /*
862     * Now print the actual finished series
863   */   */
864  - iph->id = (sk && inet_sk(sk)->inet_daddr) ?  - pt_dump_seq_printf(m, st->to_dmesg,  "0x%0*lx-0x%0*lx   ",
865  - htons(inet_sk(sk)->inet_id++) : 0;  -   width, st->start_address,
866  - } else  -   width, st->current_address);
 - __ip_select_ident(iph, dst, 0);  
 -}  
867  -  -
868  -static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)  - delta = (st->current_address - st->start_address) >> 10;
869  -{  - while (!(delta & 1023) && unit[1]) {
870  - struct iphdr *iph = ip_hdr(skb);  - delta >>= 10;
871  -  - unit++;
872  - if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {  + if (!st->marker->max_lines ||
873   if (sk && inet_sk(sk)->inet_daddr) {  +    st->lines < st->marker->max_lines) {
874   iph->id = htons(inet_sk(sk)->inet_id);  + pt_dump_seq_printf(m, st->to_dmesg,
875  - inet_sk(sk)->inet_id += 1 + more;  +   "0x%0*lx-0x%0*lx   ",
876  - } else  +   width, st->start_address,
877  + inet_sk(sk)->inet_id += segs;  +   width, st->current_address);
878  + } else {  +
879   iph->id = 0;  + delta = st->current_address - st->start_address;
880  - } else  + while (!(delta & 1023) && unit[1]) {
881  - __ip_select_ident(iph, dst, more);  + delta >>= 10;
882  + }  + unit++;
883  + } else {  + }
884  + __ip_select_ident(iph, segs);  + pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
885  + }  +    delta, *unit);
886  +}  + printk_prot(m, st->current_prot, st->level,
887  +  +    st->to_dmesg);
888  +static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)   }
889  +{  - pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", delta, *unit);
890  + ip_select_ident_segs(skb, sk, 1);  - printk_prot(m, st->current_prot, st->level, st->to_dmesg);
891   }  + st->lines++;
892    
893     /*
894     * We print markers for special areas of address space,
895    @@ -226,7 +238,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
896     * This helps in the interpretation.
897     */
898     if (st->current_address >= st->marker[1].start_address) {
899    + if (st->marker->max_lines &&
900    +    st->lines > st->marker->max_lines) {
901    + unsigned long nskip =
902    + st->lines - st->marker->max_lines;
903    + pt_dump_seq_printf(m, st->to_dmesg,
904    +   "... %lu entr%s skipped ... \n",
905    +   nskip,
906    +   nskip == 1 ? "y" : "ies");
907    + }
908     st->marker++;
909    + st->lines = 0;
910     pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
911       st->marker->name);
912     }
913    diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
914    index 310c5f0dbef1..3adf2e6ad7d6 100644
915    --- a/arch/x86/vdso/vdso32-setup.c
916    +++ b/arch/x86/vdso/vdso32-setup.c
917    @@ -39,7 +39,6 @@
918     #ifdef CONFIG_X86_64
919     #define vdso_enabled sysctl_vsyscall32
920     #define arch_setup_additional_pages syscall32_setup_pages
921    -extern int sysctl_ldt16;
922     #endif
923    
924   /*   /*
925  diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h  @@ -251,13 +250,6 @@ static struct ctl_table abi_table2[] = {
926  index a4daf9eb8562..8dd8cab88b87 100644   .mode = 0644,
927  --- a/include/net/ip_tunnels.h   .proc_handler = proc_dointvec
928  +++ b/include/net/ip_tunnels.h   },
929  @@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {  - {
930    - .procname = "ldt16",
931   struct ip_tunnel_dst {  - .data = &sysctl_ldt16,
932   struct dst_entry __rcu *dst;  - .maxlen = sizeof(int),
933  + __be32 saddr;  - .mode = 0644,
934    - .proc_handler = proc_dointvec
935    - },
936     {}
937   };   };
938    
939   struct ip_tunnel {  diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
940  diff --git a/include/net/ipv6.h b/include/net/ipv6.h  index 0982233b9b84..a6a72ce8630f 100644
941  index d640925bc454..d6815688ad9e 100644  --- a/arch/x86/xen/setup.c
942  --- a/include/net/ipv6.h  +++ b/arch/x86/xen/setup.c
943  +++ b/include/net/ipv6.h  @@ -574,13 +574,7 @@ void xen_enable_syscall(void)
944  @@ -660,8 +660,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add   }
945   return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));   #endif /* CONFIG_X86_64 */
946   }   }
947    -void xen_enable_nmi(void)
948    -{
949    -#ifdef CONFIG_X86_64
950    - if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
951    - BUG();
952    -#endif
953    -}
954    +
955     void __init xen_pvmmu_arch_setup(void)
956     {
957     HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
958    @@ -595,7 +589,6 @@ void __init xen_pvmmu_arch_setup(void)
959    
960  -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);   xen_enable_sysenter();
961  -   xen_enable_syscall();
962   int ip6_dst_hoplimit(struct dst_entry *dst);  - xen_enable_nmi();
963     }
964    
965   /*   /* This function is not called for HVM domains */
966  diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h  diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
967  index f257486f17be..3f36d45b714a 100644  index f9e1ec346e35..8453e6e39895 100644
968  --- a/include/net/secure_seq.h  --- a/arch/xtensa/kernel/vectors.S
969  +++ b/include/net/secure_seq.h  +++ b/arch/xtensa/kernel/vectors.S
970  @@ -3,8 +3,6 @@  @@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
971     beqz a2, 1f # if at start of vector, don't restore
972   #include <linux/types.h>  
973     addi a0, a0, -128
974  -__u32 secure_ip_id(__be32 daddr);  - bbsi a0, 8, 1f # don't restore except for overflow 8 and 12
975  -__u32 secure_ipv6_id(const __be32 daddr[4]);  - bbsi a0, 7, 2f
976   u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);  + bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
977   u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,  +
978         __be16 dport);  + /*
979  diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c  + * This fixup handler is for the extremely unlikely case where the
980  index f14e54a05691..022d18ab27a6 100644  + * overflow handler's reference thru a0 gets a hardware TLB refill
981  --- a/net/batman-adv/fragmentation.c  + * that bumps out the (distinct, aliasing) TLB entry that mapped its
982  +++ b/net/batman-adv/fragmentation.c  + * prior references thru a9/a13, and where our reference now thru
983  @@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,  + * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
984   {  + */
985   struct batadv_frag_table_entry *chain;  + movi a2, window_overflow_restore_a0_fixup
986   struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;  + s32i a2, a3, EXC_TABLE_FIXUP
987  + struct batadv_frag_list_entry *frag_entry_last = NULL;  + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
988   struct batadv_frag_packet *frag_packet;  + xsr a3, excsave1
  uint8_t bucket;  
  uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);  
 @@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,  
  ret = true;  
  goto out;  
  }  
989  +  +
990  + /* store current entry because it could be the last in list */  + bbsi.l a0, 7, 2f
 + frag_entry_last = frag_entry_curr;  
  }  
991    
992  - /* Reached the end of the list, so insert after 'frag_entry_curr'. */   /*
993  - if (likely(frag_entry_curr)) {   * Restore a0 as saved by _WindowOverflow8().
994  - hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);  - *
995  + /* Reached the end of the list, so insert after 'frag_entry_last'. */  - * FIXME:  we really need a fixup handler for this L32E,
996  + if (likely(frag_entry_last)) {  - * for the extremely unlikely case where the overflow handler's
997  + hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);  - * reference thru a0 gets a hardware TLB refill that bumps out
998   chain->size += skb->len - hdr_size;  - * the (distinct, aliasing) TLB entry that mapped its prior
999   chain->timestamp = jiffies;  - * references thru a9, and where our reference now thru a9
1000   ret = true;  - * gets a 2nd-level miss exception (not hardware TLB refill).
1001  diff --git a/net/compat.c b/net/compat.c   */
 index 9a76eaf63184..bc8aeefddf3f 100644  
 --- a/net/compat.c  
 +++ b/net/compat.c  
 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,  
  {  
  int tot_len;  
1002    
1003  - if (kern_msg->msg_namelen) {  - l32e a2, a9, -16
1004  + if (kern_msg->msg_name && kern_msg->msg_namelen) {  - wsr a2, depc # replace the saved a0
1005   if (mode == VERIFY_READ) {  - j 1f
1006   int err = move_addr_to_kernel(kern_msg->msg_name,  + l32e a0, a9, -16
1007        kern_msg->msg_namelen,  + wsr a0, depc # replace the saved a0
1008  @@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,  + j 3f
  if (err < 0)  
  return err;  
  }  
 - if (kern_msg->msg_name)  
 - kern_msg->msg_name = kern_address;  
 - } else  
 + kern_msg->msg_name = kern_address;  
 + } else {  
  kern_msg->msg_name = NULL;  
 + kern_msg->msg_namelen = 0;  
 + }  
1009    
1010   tot_len = iov_from_user_compat_to_kern(kern_iov,   2:
1011    (struct compat_iovec __user *)kern_msg->msg_iov,   /*
1012  diff --git a/net/core/iovec.c b/net/core/iovec.c   * Restore a0 as saved by _WindowOverflow12().
1013  index b61869429f4c..26dc0062652f 100644  - *
1014  --- a/net/core/iovec.c  - * FIXME:  we really need a fixup handler for this L32E,
1015  +++ b/net/core/iovec.c  - * for the extremely unlikely case where the overflow handler's
1016  @@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a  - * reference thru a0 gets a hardware TLB refill that bumps out
1017   {  - * the (distinct, aliasing) TLB entry that mapped its prior
1018   int size, ct, err;  - * references thru a13, and where our reference now thru a13
1019    - * gets a 2nd-level miss exception (not hardware TLB refill).
1020     */
1021    
1022  - if (m->msg_namelen) {  - l32e a2, a13, -16
1023  + if (m->msg_name && m->msg_namelen) {  - wsr a2, depc # replace the saved a0
1024   if (mode == VERIFY_READ) {  + l32e a0, a13, -16
1025   void __user *namep;  + wsr a0, depc # replace the saved a0
1026   namep = (void __user __force *) m->msg_name;  +3:
1027  @@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a  + xsr a3, excsave1
1028   if (err < 0)  + movi a0, 0
1029   return err;  + s32i a0, a3, EXC_TABLE_FIXUP
1030   }  + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1031  - if (m->msg_name)   1:
1032  - m->msg_name = address;   /*
1033  + m->msg_name = address;   * Restore WindowBase while leaving all address registers restored.
1034   } else {  @@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
  m->msg_name = NULL;  
 + m->msg_namelen = 0;  
  }  
1035    
1036   size = m->msg_iovlen * sizeof(struct iovec);   s32i a0, a2, PT_DEPC
 @@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);  
  int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,  
  int offset, int len)  
  {  
 + /* No data? Done! */  
 + if (len == 0)  
 + return 0;  
 +  
  /* Skip over the finished iovecs */  
  while (offset >= iov->iov_len) {  
  offset -= iov->iov_len;  
 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c  
 index 897da56f3aff..ba71212f0251 100644  
 --- a/net/core/secure_seq.c  
 +++ b/net/core/secure_seq.c  
 @@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);  
  #endif  
1037    
1038   #ifdef CONFIG_INET  +_DoubleExceptionVector_handle_exception:
1039  -__u32 secure_ip_id(__be32 daddr)   addx4 a0, a0, a3
1040  -{   l32i a0, a0, EXC_TABLE_FAST_USER
1041  - u32 hash[MD5_DIGEST_WORDS];   xsr a3, excsave1
1042  -  @@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
1043  - net_secret_init();   rotw -3
1044  - hash[0] = (__force __u32) daddr;   j 1b
 - hash[1] = net_secret[13];  
 - hash[2] = net_secret[14];  
 - hash[3] = net_secret[15];  
 -  
 - md5_transform(hash, net_secret);  
 -  
 - return hash[0];  
 -}  
 -  
 -__u32 secure_ipv6_id(const __be32 daddr[4])  
 -{  
 - __u32 hash[4];  
 -  
 - net_secret_init();  
 - memcpy(hash, daddr, 16);  
 - md5_transform(hash, net_secret);  
 -  
 - return hash[0];  
 -}  
1045    
1046   __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,  - .end literal_prefix
1047   __be16 sport, __be16 dport)  
1048  diff --git a/net/core/skbuff.c b/net/core/skbuff.c   ENDPROC(_DoubleExceptionVector)
1049  index 9433047b2453..6ab5f7721cdb 100644  
1050  --- a/net/core/skbuff.c   /*
1051  +++ b/net/core/skbuff.c  + * Fixup handler for TLB miss in double exception handler for window owerflow.
1052  @@ -2968,9 +2968,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,  + * We get here with windowbase set to the window that was being spilled and
1053   tail = nskb;  + * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
1054    + * (bit set) window.
1055   __copy_skb_header(nskb, head_skb);  + *
1056  - nskb->mac_len = head_skb->mac_len;  + * We do the following here:
1057    + * - go to the original window retaining a0 value;
1058   skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);  + * - set up exception stack to return back to appropriate a0 restore code
1059  + skb_reset_mac_len(nskb);  + *   (we'll need to rotate window back and there's no place to save this
1060    + *    information, use different return address for that);
1061   skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,  + * - handle the exception;
1062   nskb->data - tnl_hlen,  + * - go to the window that was being spilled;
1063  diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c  + * - set up window_overflow_restore_a0_fixup as a fixup routine;
1064  index 9db3b877fcaf..0ffcd4d64e0a 100644  + * - reload a0;
1065  --- a/net/ipv4/igmp.c  + * - restore the original window;
1066  +++ b/net/ipv4/igmp.c  + * - reset the default fixup routine;
1067  @@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)  + * - return to user. By the time we get to this fixup handler all information
1068   pip->saddr    = fl4.saddr;  + *   about the conditions of the original double exception that happened in
1069   pip->protocol = IPPROTO_IGMP;  + *   the window overflow handler is lost, so we just return to userspace to
1070   pip->tot_len  = 0; /* filled in later */  + *   retry overflow from start.
1071  - ip_select_ident(skb, &rt->dst, NULL);  + *
1072  + ip_select_ident(skb, NULL);  + * a0: value of depc, original value in depc
1073   ((u8 *)&pip[1])[0] = IPOPT_RA;  + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1074   ((u8 *)&pip[1])[1] = 4;  + * a3: exctable, original value in excsave1
1075   ((u8 *)&pip[1])[2] = 0;  + */
1076  @@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,  +
1077   iph->daddr    = dst;  +ENTRY(window_overflow_restore_a0_fixup)
1078   iph->saddr    = fl4.saddr;  +
1079   iph->protocol = IPPROTO_IGMP;  + rsr a0, ps
1080  - ip_select_ident(skb, &rt->dst, NULL);  + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
1081  + ip_select_ident(skb, NULL);  + rsr a2, windowbase
1082   ((u8 *)&iph[1])[0] = IPOPT_RA;  + sub a0, a2, a0
1083   ((u8 *)&iph[1])[1] = 4;  + extui a0, a0, 0, 3
1084   ((u8 *)&iph[1])[2] = 0;  + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1085  diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c  + xsr a3, excsave1
1086  index 48f424465112..bf2cb4a4714b 100644  +
1087  --- a/net/ipv4/inetpeer.c  + _beqi a0, 1, .Lhandle_1
1088  +++ b/net/ipv4/inetpeer.c  + _beqi a0, 3, .Lhandle_3
1089  @@ -26,20 +26,7 @@  +
1090    *  Theory of operations.  + .macro overflow_fixup_handle_exception_pane n
1091    *  We keep one entry for each peer IP address.  The nodes contains long-living  +
1092    *  information about the peer which doesn't depend on routes.  + rsr a0, depc
1093  - *  At this moment this information consists only of ID field for the next  + rotw -\n
1094  - *  outgoing IP packet.  This field is incremented with each packet as encoded  +
1095  - *  in inet_getid() function (include/net/inetpeer.h).  + xsr a3, excsave1
1096  - *  At the moment of writing this notes identifier of IP packets is generated  + wsr a2, depc
1097  - *  to be unpredictable using this code only for packets subjected  + l32i a2, a3, EXC_TABLE_KSTK
1098  - *  (actually or potentially) to defragmentation.  I.e. DF packets less than  + s32i a0, a2, PT_AREG0
1099  - *  PMTU in size when local fragmentation is disabled use a constant ID and do  +
1100  - *  not use this code (see ip_select_ident() in include/net/ip.h).  + movi a0, .Lrestore_\n
1101    + s32i a0, a2, PT_DEPC
1102    + rsr a0, exccause
1103    + j _DoubleExceptionVector_handle_exception
1104    +
1105    + .endm
1106    +
1107    + overflow_fixup_handle_exception_pane 2
1108    +.Lhandle_1:
1109    + overflow_fixup_handle_exception_pane 1
1110    +.Lhandle_3:
1111    + overflow_fixup_handle_exception_pane 3
1112    +
1113    + .macro overflow_fixup_restore_a0_pane n
1114    +
1115    + rotw \n
1116    + /* Need to preserve a0 value here to be able to handle exception
1117    + * that may occur on a0 reload from stack. It may occur because
1118    + * TLB miss handler may not be atomic and pointer to page table
1119    + * may be lost before we get here. There are no free registers,
1120    + * so we need to use EXC_TABLE_DOUBLE_SAVE area.
1121    + */
1122    + xsr a3, excsave1
1123    + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1124    + movi a2, window_overflow_restore_a0_fixup
1125    + s32i a2, a3, EXC_TABLE_FIXUP
1126    + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1127    + xsr a3, excsave1
1128    + bbsi.l a0, 7, 1f
1129    + l32e a0, a9, -16
1130    + j 2f
1131    +1:
1132    + l32e a0, a13, -16
1133    +2:
1134    + rotw -\n
1135    +
1136    + .endm
1137    +
1138    +.Lrestore_2:
1139    + overflow_fixup_restore_a0_pane 2
1140    +
1141    +.Lset_default_fixup:
1142    + xsr a3, excsave1
1143    + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1144    + movi a2, 0
1145    + s32i a2, a3, EXC_TABLE_FIXUP
1146    + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
1147    + xsr a3, excsave1
1148    + rfe
1149    +
1150    +.Lrestore_1:
1151    + overflow_fixup_restore_a0_pane 1
1152    + j .Lset_default_fixup
1153    +.Lrestore_3:
1154    + overflow_fixup_restore_a0_pane 3
1155    + j .Lset_default_fixup
1156    +
1157    +ENDPROC(window_overflow_restore_a0_fixup)
1158    +
1159    + .end literal_prefix
1160    +/*
1161      * Debug interrupt vector
1162    *    *
1163  - *  Route cache entries hold references to our nodes.    * There is not much space here, so simply jump to another handler.
1164  - *  New cache entries get references via lookup by destination IP address in  diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
1165  - *  the avl tree.  The reference is grabbed only when it's needed i.e. only  index ee32c0085dff..d16db6df86f8 100644
1166  - *  when we try to output IP packet which needs an unpredictable ID (see  --- a/arch/xtensa/kernel/vmlinux.lds.S
1167  - *  __ip_select_ident() in net/ipv4/route.c).  +++ b/arch/xtensa/kernel/vmlinux.lds.S
1168    *  Nodes are removed only when reference counter goes to 0.  @@ -269,13 +269,13 @@ SECTIONS
1169    *  When it's happened the node may be removed when a sufficient amount of    .UserExceptionVector.literal)
1170    *  time has been passed since its last use.  The less-recently-used entry can     SECTION_VECTOR (_DoubleExceptionVector_literal,
1171  @@ -62,7 +49,6 @@    .DoubleExceptionVector.literal,
1172    * refcnt: atomically against modifications on other CPU;  -  DOUBLEEXC_VECTOR_VADDR - 16,
1173    *   usually under some other lock to prevent node disappearing  +  DOUBLEEXC_VECTOR_VADDR - 40,
1174    * daddr: unchangeable    SIZEOF(.UserExceptionVector.text),
1175  - * ip_id_count: atomic value (no lock needed)    .UserExceptionVector.text)
1176    */     SECTION_VECTOR (_DoubleExceptionVector_text,
1177      .DoubleExceptionVector.text,
1178      DOUBLEEXC_VECTOR_VADDR,
1179    -  32,
1180    +  40,
1181      .DoubleExceptionVector.literal)
1182    
1183       . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
1184    diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1185    index 966f893711b3..6a3ad8011585 100644
1186    --- a/crypto/af_alg.c
1187    +++ b/crypto/af_alg.c
1188    @@ -21,6 +21,7 @@
1189     #include <linux/module.h>
1190     #include <linux/net.h>
1191     #include <linux/rwsem.h>
1192    +#include <linux/security.h>
1193    
1194     struct alg_type_list {
1195     const struct af_alg_type *type;
1196    @@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
1197    
1198     sock_init_data(newsock, sk2);
1199     sock_graft(sk2, newsock);
1200    + security_sk_clone(sk, sk2);
1201    
1202   static struct kmem_cache *peer_cachep __read_mostly;   err = type->accept(ask->private, sk2);
1203  @@ -497,10 +483,6 @@ relookup:   if (err) {
1204   p->daddr = *daddr;  diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1205   atomic_set(&p->refcnt, 1);  index b91dfbe9fe8e..c83eb75c7cfc 100644
1206   atomic_set(&p->rid, 0);  --- a/drivers/gpu/drm/i915/intel_display.c
1207  - atomic_set(&p->ip_id_count,  +++ b/drivers/gpu/drm/i915/intel_display.c
1208  - (daddr->family == AF_INET) ?  @@ -11251,6 +11251,9 @@ static struct intel_quirk intel_quirks[] = {
 - secure_ip_id(daddr->addr.a4) :  
 - secure_ipv6_id(daddr->addr.a6));  
  p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;  
  p->rate_tokens = 0;  
  /* 60*HZ is arbitrary, but chosen enough high so that the first  
 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c  
 index a52f50187b54..4ecc1600f84d 100644  
 --- a/net/ipv4/ip_output.c  
 +++ b/net/ipv4/ip_output.c  
 @@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,  
  iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);  
  iph->saddr    = saddr;  
  iph->protocol = sk->sk_protocol;  
 - ip_select_ident(skb, &rt->dst, sk);  
 + ip_select_ident(skb, sk);  
   
  if (opt && opt->opt.optlen) {  
  iph->ihl += opt->opt.optlen>>2;  
 @@ -430,8 +430,7 @@ packet_routed:  
  ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);  
  }  
1209    
1210  - ip_select_ident_more(skb, &rt->dst, sk,   /* Toshiba CB35 Chromebook (Celeron 2955U) */
1211  -     (skb_shinfo(skb)->gso_segs ?: 1) - 1);   { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
1212  + ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);  +
1213    + /* HP Chromebook 14 (Celeron 2955U) */
1214   /* TODO : should we use skb->sk here instead of sk ? */  + { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
1215   skb->priority = sk->sk_priority;   };
 @@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,  
  iph->ttl = ttl;  
  iph->protocol = sk->sk_protocol;  
  ip_copy_addrs(iph, fl4);  
 - ip_select_ident(skb, &rt->dst, sk);  
 + ip_select_ident(skb, sk);  
   
  if (opt) {  
  iph->ihl += opt->optlen>>2;  
 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c  
 index b77b6a55b05e..e3e3a91f249e 100644  
 --- a/net/ipv4/ip_tunnel.c  
 +++ b/net/ipv4/ip_tunnel.c  
 @@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)  
  }  
1216    
1217   static void __tunnel_dst_set(struct ip_tunnel_dst *idst,   static void intel_init_quirks(struct drm_device *dev)
1218  -     struct dst_entry *dst)  diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
1219  +     struct dst_entry *dst, __be32 saddr)  index a7e68c81f89d..a077cc86421b 100644
1220   {  --- a/drivers/iio/accel/bma180.c
1221   struct dst_entry *old_dst;  +++ b/drivers/iio/accel/bma180.c
1222    @@ -68,13 +68,13 @@
1223     /* Defaults values */
1224     #define BMA180_DEF_PMODE 0
1225     #define BMA180_DEF_BW 20
1226    -#define BMA180_DEF_SCALE 250
1227    +#define BMA180_DEF_SCALE 2452
1228    
1229     /* Available values for sysfs */
1230     #define BMA180_FLP_FREQ_AVAILABLE \
1231     "10 20 40 75 150 300"
1232     #define BMA180_SCALE_AVAILABLE \
1233    - "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
1234    + "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
1235    
1236     struct bma180_data {
1237     struct i2c_client *client;
1238    @@ -94,7 +94,7 @@ enum bma180_axis {
1239     };
1240    
1241   dst_clone(dst);   static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
1242   old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);  -static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
1243   dst_release(old_dst);  +static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
 + idst->saddr = saddr;  
  }  
1244    
1245  -static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)   static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
 +static void tunnel_dst_set(struct ip_tunnel *t,  
 +   struct dst_entry *dst, __be32 saddr)  
1246   {   {
1247  - __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);  @@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
1248  + __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);   mutex_unlock(&data->mutex);
1249   }   return ret;
1250     case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
1251    + if (val2)
1252    + return -EINVAL;
1253     mutex_lock(&data->mutex);
1254     ret = bma180_set_bw(data, val);
1255     mutex_unlock(&data->mutex);
1256    diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
1257    index e472cff6eeae..476b5a333066 100644
1258    --- a/drivers/iio/industrialio-buffer.c
1259    +++ b/drivers/iio/industrialio-buffer.c
1260    @@ -949,7 +949,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1261    
1262     /* Now we have the two masks, work from least sig and build up sizes */
1263     for_each_set_bit(out_ind,
1264    - indio_dev->active_scan_mask,
1265    + buffer->scan_mask,
1266     indio_dev->masklength) {
1267     in_ind = find_next_bit(indio_dev->active_scan_mask,
1268           indio_dev->masklength,
1269    diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1270    index 66c5d130c8c2..0e722c103562 100644
1271    --- a/drivers/md/dm-bufio.c
1272    +++ b/drivers/md/dm-bufio.c
1273    @@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
1274     BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1275           (block_size & (block_size - 1)));
1276    
1277    - c = kmalloc(sizeof(*c), GFP_KERNEL);
1278    + c = kzalloc(sizeof(*c), GFP_KERNEL);
1279     if (!c) {
1280     r = -ENOMEM;
1281     goto bad_client;
1282    diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1283    index 5f054c44b485..2c63326638b6 100644
1284    --- a/drivers/md/dm-cache-target.c
1285    +++ b/drivers/md/dm-cache-target.c
1286    @@ -231,7 +231,7 @@ struct cache {
1287     /*
1288     * cache_size entries, dirty if set
1289     */
1290    - dm_cblock_t nr_dirty;
1291    + atomic_t nr_dirty;
1292     unsigned long *dirty_bitset;
1293    
1294   static void tunnel_dst_reset(struct ip_tunnel *t)   /*
1295    @@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
1296     static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
1297   {   {
1298  - tunnel_dst_set(t, NULL);   if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
1299  + tunnel_dst_set(t, NULL, 0);  - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
1300    + atomic_inc(&cache->nr_dirty);
1301     policy_set_dirty(cache->policy, oblock);
1302     }
1303     }
1304    @@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
1305     {
1306     if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
1307     policy_clear_dirty(cache->policy, oblock);
1308    - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
1309    - if (!from_cblock(cache->nr_dirty))
1310    + if (atomic_dec_return(&cache->nr_dirty) == 0)
1311     dm_table_event(cache->ti->table);
1312     }
1313   }   }
1314    @@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1315     atomic_set(&cache->quiescing_ack, 0);
1316    
1317   void ip_tunnel_dst_reset_all(struct ip_tunnel *t)   r = -ENOMEM;
1318  @@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)  - cache->nr_dirty = 0;
1319   int i;  + atomic_set(&cache->nr_dirty, 0);
1320     cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
1321   for_each_possible_cpu(i)   if (!cache->dirty_bitset) {
1322  - __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);   *error = "could not allocate dirty bitset";
1323  + __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);  @@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
1324    
1325     residency = policy_residency(cache->policy);
1326    
1327    - DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
1328    + DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
1329           (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1330           (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
1331           (unsigned long long)nr_blocks_metadata,
1332    @@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
1333           (unsigned) atomic_read(&cache->stats.write_miss),
1334           (unsigned) atomic_read(&cache->stats.demotion),
1335           (unsigned) atomic_read(&cache->stats.promotion),
1336    -       (unsigned long long) from_cblock(cache->nr_dirty));
1337    +       (unsigned long) atomic_read(&cache->nr_dirty));
1338    
1339     if (writethrough_mode(&cache->features))
1340     DMEMIT("1 writethrough ");
1341    diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1342    index 66acb2cbd9df..7c28cb55610b 100644
1343    --- a/drivers/net/wireless/ath/ath9k/xmit.c
1344    +++ b/drivers/net/wireless/ath/ath9k/xmit.c
1345    @@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
1346    
1347     tx_info = IEEE80211_SKB_CB(skb);
1348     tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
1349    +
1350    + /*
1351    + * No aggregation session is running, but there may be frames
1352    + * from a previous session or a failed attempt in the queue.
1353    + * Send them out as normal data frames
1354    + */
1355    + if (!tid->active)
1356    + tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1357    +
1358     if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1359     bf->bf_state.bf_type = 0;
1360     return bf;
1361    diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
1362    index c31aa07b3ba5..da1c6cb1a41e 100644
1363    --- a/drivers/pnp/pnpacpi/core.c
1364    +++ b/drivers/pnp/pnpacpi/core.c
1365    @@ -339,8 +339,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
1366     struct pnp_dev *pnp = _pnp;
1367    
1368     /* true means it matched */
1369    - return !acpi->physical_node_count
1370    -    && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
1371    + return pnp->data == acpi;
1372   }   }
  EXPORT_SYMBOL(ip_tunnel_dst_reset_all);  
1373    
1374  -static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)   static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
1375  +static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,  diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
1376  + u32 cookie, __be32 *saddr)  index 9b60b1f3261c..44341dc5b148 100644
1377   {  --- a/drivers/rapidio/devices/tsi721_dma.c
1378  + struct ip_tunnel_dst *idst;  +++ b/drivers/rapidio/devices/tsi721_dma.c
1379   struct dst_entry *dst;  @@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
1380     "desc %p not ACKed\n", tx_desc);
1381     }
1382    
1383   rcu_read_lock();  + if (ret == NULL) {
1384  - dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);  + dev_dbg(bdma_chan->dchan.device->dev,
1385  + idst = this_cpu_ptr(t->dst_cache);  + "%s: unable to obtain tx descriptor\n", __func__);
1386  + dst = rcu_dereference(idst->dst);  + goto err_out;
1387   if (dst && !atomic_inc_not_zero(&dst->__refcnt))  + }
1388   dst = NULL;  +
1389   if (dst) {   i = bdma_chan->wr_count_next % bdma_chan->bd_num;
1390  - if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {   if (i == bdma_chan->bd_num - 1) {
1391  + if (!dst->obsolete || dst->ops->check(dst, cookie)) {   i = 0;
1392  + *saddr = idst->saddr;  @@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
1393  + } else {   tx_desc->txd.phys = bdma_chan->bd_phys +
1394   tunnel_dst_reset(t);   i * sizeof(struct tsi721_dma_desc);
1395   dst_release(dst);   tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
1396   dst = NULL;  -
1397  @@ -366,7 +373,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)  +err_out:
1398     spin_unlock_bh(&bdma_chan->lock);
1399   if (!IS_ERR(rt)) {  
1400   tdev = rt->dst.dev;   return ret;
1401  - tunnel_dst_set(tunnel, &rt->dst);  diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1402  + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);  index 9db097a28a74..d99ab3bfaaee 100644
1403   ip_rt_put(rt);  --- a/drivers/scsi/scsi_lib.c
1404   }  +++ b/drivers/scsi/scsi_lib.c
1405   if (dev->type != ARPHRD_ETHER)  @@ -806,6 +806,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1406  @@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,   scsi_next_command(cmd);
1407   init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,   return;
  tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);  
   
 - rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;  
 + rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;  
   
  if (!rt) {  
  rt = ip_route_output_key(tunnel->net, &fl4);  
 @@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,  
  goto tx_error;  
1408   }   }
1409   if (connected)  + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
1410  - tunnel_dst_set(tunnel, &rt->dst);  + /*
1411  + tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);  + * Certain non BLOCK_PC requests are commands that don't
1412    + * actually transfer anything (FLUSH), so cannot use
1413    + * good_bytes != blk_rq_bytes(req) as the signal for an error.
1414    + * This sets the error explicitly for the problem case.
1415    + */
1416    + error = __scsi_error_from_host_byte(cmd, result);
1417   }   }
1418    
1419   if (rt->dst.dev == dev) {   /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
1420  diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c  diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
1421  index bcf206c79005..847e69cbff7e 100644  index d7efd0173a9a..7d7578872a84 100644
1422  --- a/net/ipv4/ip_tunnel_core.c  --- a/drivers/staging/vt6655/bssdb.c
1423  +++ b/net/ipv4/ip_tunnel_core.c  +++ b/drivers/staging/vt6655/bssdb.c
1424  @@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,  @@ -983,7 +983,7 @@ start:
1425   iph->daddr = dst;   pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
1426   iph->saddr = src;   }
  iph->ttl = ttl;  
 - __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);  
 + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);  
   
  err = ip_local_out_sk(sk, skb);  
  if (unlikely(net_xmit_eval(err)))  
 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c  
 index d84dc8d4c916..d11a50d24295 100644  
 --- a/net/ipv4/ipmr.c  
 +++ b/net/ipv4/ipmr.c  
 @@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)  
  iph->protocol = IPPROTO_IPIP;  
  iph->ihl = 5;  
  iph->tot_len = htons(skb->len);  
 - ip_select_ident(skb, skb_dst(skb), NULL);  
 + ip_select_ident(skb, NULL);  
  ip_send_check(iph);  
   
  memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));  
 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c  
 index a9dbe58bdfe7..2c65160565e1 100644  
 --- a/net/ipv4/raw.c  
 +++ b/net/ipv4/raw.c  
 @@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,  
  iph->check   = 0;  
  iph->tot_len = htons(length);  
  if (!iph->id)  
 - ip_select_ident(skb, &rt->dst, NULL);  
 + ip_select_ident(skb, NULL);  
1427    
1428   iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);  - {
1429    + if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
1430     pDevice->byReAssocCount++;
1431     /* 10 sec timeout */
1432     if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
1433    diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
1434    index a952df1bf9d6..6f13f0e597f8 100644
1435    --- a/drivers/staging/vt6655/device_main.c
1436    +++ b/drivers/staging/vt6655/device_main.c
1437    @@ -2430,6 +2430,7 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
1438     int             handled = 0;
1439     unsigned char byData = 0;
1440     int             ii = 0;
1441    + unsigned long flags;
1442     //    unsigned char byRSSI;
1443    
1444     MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
1445    @@ -2455,7 +2456,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
1446    
1447     handled = 1;
1448     MACvIntDisable(pDevice->PortOffset);
1449    - spin_lock_irq(&pDevice->lock);
1450    +
1451    + spin_lock_irqsave(&pDevice->lock, flags);
1452    
1453     //Make sure current page is 0
1454     VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
1455    @@ -2696,7 +2698,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
1456     MACvSelectPage1(pDevice->PortOffset);
1457   }   }
 diff --git a/net/ipv4/route.c b/net/ipv4/route.c  
 index be9f2b1ac3ab..fd618d48f4ce 100644  
 --- a/net/ipv4/route.c  
 +++ b/net/ipv4/route.c  
 @@ -89,6 +89,7 @@  
  #include <linux/rcupdate.h>  
  #include <linux/times.h>  
  #include <linux/slab.h>  
 +#include <linux/jhash.h>  
  #include <net/dst.h>  
  #include <net/net_namespace.h>  
  #include <net/protocol.h>  
 @@ -456,39 +457,45 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,  
  return neigh_create(&arp_tbl, pkey, dev);  
  }  
1458    
1459  -/*  - spin_unlock_irq(&pDevice->lock);
1460  - * Peer allocation may fail only in serious out-of-memory conditions.  However  + spin_unlock_irqrestore(&pDevice->lock, flags);
1461  - * we still can generate some output.  +
1462  - * Random ID selection looks a bit dangerous because we have no chances to   MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
1463  - * select ID being unique in a reasonable period of time.  
1464  - * But broken packet identifier may be better than no packet at all.   return IRQ_RETVAL(handled);
1465  +#define IP_IDENTS_SZ 2048u  diff --git a/fs/open.c b/fs/open.c
1466  +struct ip_ident_bucket {  index 9d64679cec73..dd24f21d31f7 100644
1467  + atomic_t id;  --- a/fs/open.c
1468  + u32 stamp32;  +++ b/fs/open.c
1469  +};  @@ -263,11 +263,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1470  +   return -EPERM;
1471  +static struct ip_ident_bucket *ip_idents __read_mostly;  
1472  +   /*
1473  +/* In order to protect privacy, we add a perturbation to identifiers  - * We can not allow to do any fallocate operation on an active
1474  + * if one generator is seldom used. This makes hard for an attacker  - * swapfile
1475  + * to infer how many packets were sent between two points in time.  + * We cannot allow any fallocate operation on an active swapfile
1476     */
1477     if (IS_SWAPFILE(inode))
1478    - ret = -ETXTBSY;
1479    + return -ETXTBSY;
1480    
1481     /*
1482     * Revalidate the write permissions, in case security policy has
1483    diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
1484    index 002a2855c046..3d33794e4f3e 100644
1485    --- a/include/dt-bindings/pinctrl/dra.h
1486    +++ b/include/dt-bindings/pinctrl/dra.h
1487    @@ -30,7 +30,8 @@
1488     #define MUX_MODE14 0xe
1489     #define MUX_MODE15 0xf
1490    
1491    -#define PULL_ENA (1 << 16)
1492    +#define PULL_ENA (0 << 16)
1493    +#define PULL_DIS (1 << 16)
1494     #define PULL_UP (1 << 17)
1495     #define INPUT_EN (1 << 18)
1496     #define SLEWCONTROL (1 << 19)
1497    @@ -38,10 +39,10 @@
1498     #define WAKEUP_EVENT (1 << 25)
1499    
1500     /* Active pin states */
1501    -#define PIN_OUTPUT 0
1502    +#define PIN_OUTPUT (0 | PULL_DIS)
1503     #define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
1504     #define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
1505    -#define PIN_INPUT INPUT_EN
1506    +#define PIN_INPUT (INPUT_EN | PULL_DIS)
1507     #define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
1508     #define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
1509     #define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
1510    diff --git a/include/linux/printk.h b/include/linux/printk.h
1511    index 8752f7595b27..7847301e2837 100644
1512    --- a/include/linux/printk.h
1513    +++ b/include/linux/printk.h
1514    @@ -128,9 +128,9 @@ asmlinkage __printf(1, 2) __cold
1515     int printk(const char *fmt, ...);
1516    
1517     /*
1518    - * Special printk facility for scheduler use only, _DO_NOT_USE_ !
1519    + * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
1520    */    */
1521  -static void ip_select_fb_ident(struct iphdr *iph)  -__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
1522  +u32 ip_idents_reserve(u32 hash, int segs)  +__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
  {  
 - static DEFINE_SPINLOCK(ip_fb_id_lock);  
 - static u32 ip_fallback_id;  
 - u32 salt;  
 + struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;  
 + u32 old = ACCESS_ONCE(bucket->stamp32);  
 + u32 now = (u32)jiffies;  
 + u32 delta = 0;  
 +  
 + if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)  
 + delta = prandom_u32_max(now - old);  
   
 - spin_lock_bh(&ip_fb_id_lock);  
 - salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);  
 - iph->id = htons(salt & 0xFFFF);  
 - ip_fallback_id = salt;  
 - spin_unlock_bh(&ip_fb_id_lock);  
 + return atomic_add_return(segs + delta, &bucket->id) - segs;  
  }  
 +EXPORT_SYMBOL(ip_idents_reserve);  
1523    
1524  -void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)   /*
1525  +void __ip_select_ident(struct iphdr *iph, int segs)    * Please don't use printk_ratelimit(), because it shares ratelimiting state
1526    @@ -165,7 +165,7 @@ int printk(const char *s, ...)
1527     return 0;
1528     }
1529     static inline __printf(1, 2) __cold
1530    -int printk_sched(const char *s, ...)
1531    +int printk_deferred(const char *s, ...)
1532   {   {
1533  - struct net *net = dev_net(dst->dev);   return 0;
1534  - struct inet_peer *peer;   }
1535  + static u32 ip_idents_hashrnd __read_mostly;  diff --git a/init/main.c b/init/main.c
1536  + u32 hash, id;  index 48655ceb66f4..eb0ea86aefde 100644
1537    --- a/init/main.c
1538  - peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);  +++ b/init/main.c
1539  - if (peer) {  @@ -617,6 +617,10 @@ asmlinkage __visible void __init start_kernel(void)
1540  - iph->id = htons(inet_getid(peer, more));   if (efi_enabled(EFI_RUNTIME_SERVICES))
1541  - inet_putpeer(peer);   efi_enter_virtual_mode();
1542  - return;   #endif
1543  - }  +#ifdef CONFIG_X86_ESPFIX64
1544  + net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));  + /* Should be run before the first non-init thread is created */
1545    + init_espfix_bsp();
1546  - ip_select_fb_ident(iph);  +#endif
1547  + hash = jhash_3words((__force u32)iph->daddr,   thread_info_cache_init();
1548  +    (__force u32)iph->saddr,   cred_init();
1549  +    iph->protocol,   fork_init(totalram_pages);
1550  +    ip_idents_hashrnd);  diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
1551  + id = ip_idents_reserve(hash, segs);  index 221229cf0190..63594befdd58 100644
1552  + iph->id = htons(id);  --- a/kernel/printk/printk.c
1553    +++ b/kernel/printk/printk.c
1554    @@ -2474,7 +2474,7 @@ void wake_up_klogd(void)
1555     preempt_enable();
1556   }   }
  EXPORT_SYMBOL(__ip_select_ident);  
1557    
1558  @@ -2705,6 +2712,12 @@ int __init ip_rt_init(void)  -int printk_sched(const char *fmt, ...)
1559    +int printk_deferred(const char *fmt, ...)
1560     {
1561     unsigned long flags;
1562     va_list args;
1563    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1564    index 084d17f89139..8da7e4990427 100644
1565    --- a/kernel/sched/core.c
1566    +++ b/kernel/sched/core.c
1567    @@ -1320,7 +1320,7 @@ out:
1568     * leave kernel.
1569     */
1570     if (p->mm && printk_ratelimit()) {
1571    - printk_sched("process %d (%s) no longer affine to cpu%d\n",
1572    + printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1573     task_pid_nr(p), p->comm, cpu);
1574     }
1575     }
1576    diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
1577    index 14bc348ba3b4..34e054ddf50a 100644
1578    --- a/kernel/sched/deadline.c
1579    +++ b/kernel/sched/deadline.c
1580    @@ -352,7 +352,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1581    
1582     if (!lag_once) {
1583     lag_once = true;
1584    - printk_sched("sched: DL replenish lagged to much\n");
1585    + printk_deferred("sched: DL replenish lagged to much\n");
1586     }
1587     dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1588     dl_se->runtime = pi_se->dl_runtime;
1589    diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
1590    index bd2267ad404f..c19b3cc53b00 100644
1591    --- a/kernel/sched/rt.c
1592    +++ b/kernel/sched/rt.c
1593    @@ -857,7 +857,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
1594    
1595     if (!once) {
1596     once = true;
1597    - printk_sched("sched: RT throttling activated\n");
1598    + printk_deferred("sched: RT throttling activated\n");
1599     }
1600     } else {
1601     /*
1602    diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
1603    index ad362c260ef4..9c94c19f1305 100644
1604    --- a/kernel/time/clockevents.c
1605    +++ b/kernel/time/clockevents.c
1606    @@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
1607   {   {
1608   int rc = 0;   /* Nothing to do if we already reached the limit */
1609     if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
1610    - printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
1611    + printk_deferred(KERN_WARNING
1612    + "CE: Reprogramming failure. Giving up\n");
1613     dev->next_event.tv64 = KTIME_MAX;
1614     return -ETIME;
1615     }
1616    @@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
1617     if (dev->min_delta_ns > MIN_DELTA_LIMIT)
1618     dev->min_delta_ns = MIN_DELTA_LIMIT;
1619    
1620    - printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
1621    -       dev->name ? dev->name : "?",
1622    -       (unsigned long long) dev->min_delta_ns);
1623    + printk_deferred(KERN_WARNING
1624    + "CE: %s increased min_delta_ns to %llu nsec\n",
1625    + dev->name ? dev->name : "?",
1626    + (unsigned long long) dev->min_delta_ns);
1627     return 0;
1628     }
1629    
1630  + ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);  diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
1631  + if (!ip_idents)  index 4d23dc4d8139..313a662911b1 100644
1632  + panic("IP: failed to allocate ip_idents\n");  --- a/kernel/time/sched_clock.c
1633  +  +++ b/kernel/time/sched_clock.c
1634  + prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));  @@ -204,7 +204,8 @@ void __init sched_clock_postinit(void)
 +  
  #ifdef CONFIG_IP_ROUTE_CLASSID  
  ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));  
  if (!ip_rt_acct)  
 diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c  
 index 48539fff6357..08c8ab490fe5 100644  
 --- a/net/ipv4/tcp_vegas.c  
 +++ b/net/ipv4/tcp_vegas.c  
 @@ -219,7 +219,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,  
  * This is:  
  *     (actual rate in segments) * baseRTT  
  */  
 - target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;  
 + target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;  
 + do_div(target_cwnd, rtt);  
   
  /* Calculate the difference between the window we had,  
  * and the window we would like to have. This quantity  
 diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c  
 index 1b8e28fcd7e1..4cd4e1be3a71 100644  
 --- a/net/ipv4/tcp_veno.c  
 +++ b/net/ipv4/tcp_veno.c  
 @@ -145,7 +145,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,  
   
  rtt = veno->minrtt;  
   
 - target_cwnd = (tp->snd_cwnd * veno->basertt);  
 + target_cwnd = (u64)tp->snd_cwnd * veno->basertt;  
  target_cwnd <<= V_PARAM_SHIFT;  
  do_div(target_cwnd, rtt);  
   
 diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c  
 index 05f2b484954f..91771a7c802f 100644  
 --- a/net/ipv4/xfrm4_mode_tunnel.c  
 +++ b/net/ipv4/xfrm4_mode_tunnel.c  
 @@ -58,12 +58,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)  
   
  top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?  
  0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));  
 - ip_select_ident(skb, dst->child, NULL);  
   
  top_iph->ttl = ip4_dst_hoplimit(dst->child);  
   
  top_iph->saddr = x->props.saddr.a4;  
  top_iph->daddr = x->id.daddr.a4;  
 + ip_select_ident(skb, NULL);  
1635    
1636     static int sched_clock_suspend(void)
1637     {
1638    - sched_clock_poll(&sched_clock_timer);
1639    + update_sched_clock();
1640    + hrtimer_cancel(&sched_clock_timer);
1641     cd.suspended = true;
1642   return 0;   return 0;
1643   }   }
1644  diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c  @@ -212,6 +213,7 @@ static int sched_clock_suspend(void)
1645  index fbf11562b54c..1362d3a7b26f 100644   static void sched_clock_resume(void)
1646  --- a/net/ipv6/ip6_output.c   {
1647  +++ b/net/ipv6/ip6_output.c   cd.epoch_cyc = read_sched_clock();
1648  @@ -537,6 +537,20 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)  + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
1649   skb_copy_secmark(to, from);   cd.suspended = false;
1650     }
1651    
1652    diff --git a/lib/btree.c b/lib/btree.c
1653    index f9a484676cb6..4264871ea1a0 100644
1654    --- a/lib/btree.c
1655    +++ b/lib/btree.c
1656    @@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
1657    
1658     void btree_destroy(struct btree_head *head)
1659     {
1660    + mempool_free(head->node, head->mempool);
1661     mempool_destroy(head->mempool);
1662     head->mempool = NULL;
1663   }   }
1664    diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1665    index 67c927a10add..fe99d963b987 100644
1666    --- a/mm/memcontrol.c
1667    +++ b/mm/memcontrol.c
1668    @@ -5544,8 +5544,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
1669     {
1670     struct mem_cgroup_eventfd_list *ev;
1671    
1672  +static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)  + spin_lock(&memcg_oom_lock);
 +{  
 + static u32 ip6_idents_hashrnd __read_mostly;  
 + u32 hash, id;  
 +  
 + net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));  
1673  +  +
1674  + hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);   list_for_each_entry(ev, &memcg->oom_notify, list)
1675  + hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);   eventfd_signal(ev->eventfd, 1);
1676  +  +
1677  + id = ip_idents_reserve(hash, 1);  + spin_unlock(&memcg_oom_lock);
1678  + fhdr->identification = htonl(id);   return 0;
1679  +}   }
 +  
  int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))  
  {  
  struct sk_buff *frag;  
 diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c  
 index 56596ce390a1..6179ac186ab9 100644  
 --- a/net/ipv6/output_core.c  
 +++ b/net/ipv6/output_core.c  
 @@ -8,31 +8,6 @@  
  #include <net/addrconf.h>  
  #include <net/secure_seq.h>  
1680    
1681  -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)  diff --git a/mm/page-writeback.c b/mm/page-writeback.c
1682  -{  index 154af210178b..f972182d4218 100644
1683  - static atomic_t ipv6_fragmentation_id;  --- a/mm/page-writeback.c
1684  - struct in6_addr addr;  +++ b/mm/page-writeback.c
1685  - int ident;  @@ -1324,9 +1324,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1686  -   *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1687  -#if IS_ENABLED(CONFIG_IPV6)  
1688  - struct inet_peer *peer;   if (bdi_bg_thresh)
1689  - struct net *net;  - *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
1690  -  - background_thresh,
1691  - net = dev_net(rt->dst.dev);  - dirty_thresh);
1692  - peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);  + *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
1693  - if (peer) {  + background_thresh,
1694  - fhdr->identification = htonl(inet_getid(peer, 0));  + dirty_thresh) : 0;
 - inet_putpeer(peer);  
 - return;  
 - }  
 -#endif  
 - ident = atomic_inc_return(&ipv6_fragmentation_id);  
 -  
 - addr = rt->rt6i_dst.addr;  
 - addr.s6_addr32[0] ^= (__force __be32)ident;  
 - fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));  
 -}  
 -EXPORT_SYMBOL(ipv6_select_ident);  
1695    
1696   int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)   /*
1697     * In order to avoid the stacked BDI deadlock we need
1698    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1699    index d64f5f90000c..e98306fc4234 100644
1700    --- a/mm/page_alloc.c
1701    +++ b/mm/page_alloc.c
1702    @@ -2435,7 +2435,7 @@ static inline int
1703     gfp_to_alloc_flags(gfp_t gfp_mask)
1704   {   {
1705  diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c   int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1706  index c47444e4cf8c..7f0e1cf2d7e8 100644  - const gfp_t wait = gfp_mask & __GFP_WAIT;
1707  --- a/net/netfilter/ipvs/ip_vs_xmit.c  + const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
1708  +++ b/net/netfilter/ipvs/ip_vs_xmit.c  
1709  @@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,   /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1710   iph->daddr = cp->daddr.ip;   BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
1711   iph->saddr = saddr;  @@ -2444,20 +2444,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1712   iph->ttl = old_iph->ttl;   * The caller may dip into page reserves a bit more if the caller
1713  - ip_select_ident(skb, &rt->dst, NULL);   * cannot run direct reclaim, or if the caller has realtime scheduling
1714  + ip_select_ident(skb, NULL);   * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1715    - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1716   /* Another hack: avoid icmp_send in ip_fragment */  + * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
1717   skb->local_df = 1;   */
1718  diff --git a/net/sctp/associola.c b/net/sctp/associola.c   alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 index 0b999987b658..a6953b0436a5 100644  
 --- a/net/sctp/associola.c  
 +++ b/net/sctp/associola.c  
 @@ -1151,6 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc,  
  asoc->c = new->c;  
  asoc->peer.rwnd = new->peer.rwnd;  
  asoc->peer.sack_needed = new->peer.sack_needed;  
 + asoc->peer.auth_capable = new->peer.auth_capable;  
  asoc->peer.i = new->peer.i;  
  sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,  
  asoc->peer.i.initial_tsn, GFP_ATOMIC);  
 diff --git a/net/sctp/output.c b/net/sctp/output.c  
 index 0f4d15fc2627..8267b06c3646 100644  
 --- a/net/sctp/output.c  
 +++ b/net/sctp/output.c  
 @@ -599,7 +599,7 @@ out:  
  return err;  
  no_route:  
  kfree_skb(nskb);  
 - IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);  
 + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);  
   
  /* FIXME: Returning the 'err' will effect all the associations  
  * associated with a socket, although only one of the paths of the  
 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c  
 index c08fbd11ceff..ed608432e4f9 100644  
 --- a/net/xfrm/xfrm_policy.c  
 +++ b/net/xfrm/xfrm_policy.c  
 @@ -2107,6 +2107,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,  
  goto no_transform;  
  }  
1719    
1720  + dst_hold(&xdst->u.dst);  - if (!wait) {
1721  + xdst->u.dst.flags |= DST_NOCACHE;  + if (atomic) {
1722   route = xdst->route;   /*
1723   }  - * Not worth trying to allocate harder for
1724   }  - * __GFP_NOMEMALLOC even if it can't schedule.
1725  diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c  + * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
1726  index 51398ae6cda8..d4c0fbe568ff 100644  + * if it can't schedule.
1727  --- a/net/xfrm/xfrm_user.c   */
1728  +++ b/net/xfrm/xfrm_user.c  - if  (!(gfp_mask & __GFP_NOMEMALLOC))
1729  @@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,  + if (!(gfp_mask & __GFP_NOMEMALLOC))
1730      attrs[XFRMA_ALG_AEAD] ||   alloc_flags |= ALLOC_HARDER;
1731      attrs[XFRMA_ALG_CRYPT] ||   /*
1732      attrs[XFRMA_ALG_COMP] ||  - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1733  -    attrs[XFRMA_TFCPAD] ||  - * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1734  -    (ntohl(p->id.spi) >= 0x10000))  + * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
1735    + * comment for __cpuset_node_allowed_softwall().
1736     */
1737     alloc_flags &= ~ALLOC_CPUSET;
1738     } else if (unlikely(rt_task(current)) && !in_interrupt())
1739    diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1740    index 950909f04ee6..13752d96275e 100644
1741    --- a/net/l2tp/l2tp_ppp.c
1742    +++ b/net/l2tp/l2tp_ppp.c
1743    @@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1744     int err;
1745    
1746     if (level != SOL_PPPOL2TP)
1747    - return udp_prot.setsockopt(sk, level, optname, optval, optlen);
1748    + return -EINVAL;
1749    
1750     if (optlen < sizeof(int))
1751     return -EINVAL;
1752    @@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1753     struct pppol2tp_session *ps;
1754    
1755     if (level != SOL_PPPOL2TP)
1756    - return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1757    + return -EINVAL;
1758    
1759     if (get_user(len, optlen))
1760     return -EFAULT;
1761    diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1762    index 19d36d4117e0..1fbb057b2db1 100644
1763    --- a/net/mac80211/tx.c
1764    +++ b/net/mac80211/tx.c
1765    @@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
1766     if (ieee80211_has_order(hdr->frame_control))
1767     return TX_CONTINUE;
1768    
1769    + if (ieee80211_is_probe_req(hdr->frame_control))
1770    + return TX_CONTINUE;
1771    +
1772     if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
1773     info->hw_queue = tx->sdata->vif.cab_queue;
1774    
1775    @@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1776     {
1777     struct sta_info *sta = tx->sta;
1778     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1779    + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1780     struct ieee80211_local *local = tx->local;
1781    
1782     if (unlikely(!sta))
1783    @@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1784         !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
1785     int ac = skb_get_queue_mapping(tx->skb);
1786    
1787    + if (ieee80211_is_mgmt(hdr->frame_control) &&
1788    +    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
1789    + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1790    + return TX_CONTINUE;
1791    + }
1792    +
1793     ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
1794           sta->sta.addr, sta->sta.aid, ac);
1795     if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
1796    @@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
1797     static ieee80211_tx_result debug_noinline
1798     ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
1799     {
1800    - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1801    - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
1802  -  -
1803  +    attrs[XFRMA_TFCPAD])   if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
1804   goto out;   return TX_CONTINUE;
  break;  
   
 @@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,  
     attrs[XFRMA_ALG_AUTH] ||  
     attrs[XFRMA_ALG_AUTH_TRUNC] ||  
     attrs[XFRMA_ALG_CRYPT] ||  
 -    attrs[XFRMA_TFCPAD])  
 +    attrs[XFRMA_TFCPAD] ||  
 +    (ntohl(p->id.spi) >= 0x10000))  
  goto out;  
  break;  
1805    
1806    - if (ieee80211_is_mgmt(hdr->frame_control) &&
1807    -    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
1808    - if (tx->flags & IEEE80211_TX_UNICAST)
1809    - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1810    - return TX_CONTINUE;
1811    - }
1812    -
1813     if (tx->flags & IEEE80211_TX_UNICAST)
1814     return ieee80211_tx_h_unicast_ps_buf(tx);
1815     else
1816    diff --git a/net/wireless/trace.h b/net/wireless/trace.h
1817    index aabccf13e07b..0a46eba04a4a 100644
1818    --- a/net/wireless/trace.h
1819    +++ b/net/wireless/trace.h
1820    @@ -2072,7 +2072,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
1821     MAC_ASSIGN(addr, addr);
1822     __entry->key_type = key_type;
1823     __entry->key_id = key_id;
1824    - memcpy(__entry->tsc, tsc, 6);
1825    + if (tsc)
1826    + memcpy(__entry->tsc, tsc, 6);
1827     ),
1828     TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
1829      NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,

Legend:
Removed from v.2493  
changed lines
  Added in v.2496