Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0152-3.10.53-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 41028 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Makefile b/Makefile
2     index b94f00938acc..2ac415a7e937 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 52
9     +SUBLEVEL = 53
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
14     index dfb0019bf05b..6663604a902a 100644
15     --- a/arch/sparc/include/asm/pgtable_64.h
16     +++ b/arch/sparc/include/asm/pgtable_64.h
17     @@ -24,7 +24,8 @@
18    
19     /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
20     * The page copy blockops can use 0x6000000 to 0x8000000.
21     - * The TSB is mapped in the 0x8000000 to 0xa000000 range.
22     + * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
23     + * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
24     * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
25     * The vmalloc area spans 0x100000000 to 0x200000000.
26     * Since modules need to be in the lowest 32-bits of the address space,
27     @@ -33,7 +34,8 @@
28     * 0x400000000.
29     */
30     #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
31     -#define TSBMAP_BASE _AC(0x0000000008000000,UL)
32     +#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
33     +#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
34     #define MODULES_VADDR _AC(0x0000000010000000,UL)
35     #define MODULES_LEN _AC(0x00000000e0000000,UL)
36     #define MODULES_END _AC(0x00000000f0000000,UL)
37     diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
38     index f0d6a9700f4c..1a4bb971e06d 100644
39     --- a/arch/sparc/include/asm/tlbflush_64.h
40     +++ b/arch/sparc/include/asm/tlbflush_64.h
41     @@ -35,6 +35,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
42     {
43     }
44    
45     +void flush_tlb_kernel_range(unsigned long start, unsigned long end);
46     +
47     #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
48    
49     extern void flush_tlb_pending(void);
50     @@ -49,11 +51,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
51    
52     #ifndef CONFIG_SMP
53    
54     -#define flush_tlb_kernel_range(start,end) \
55     -do { flush_tsb_kernel_range(start,end); \
56     - __flush_tlb_kernel_range(start,end); \
57     -} while (0)
58     -
59     static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
60     {
61     __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
62     @@ -64,11 +61,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
63     extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
64     extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
65    
66     -#define flush_tlb_kernel_range(start, end) \
67     -do { flush_tsb_kernel_range(start,end); \
68     - smp_flush_tlb_kernel_range(start, end); \
69     -} while (0)
70     -
71     #define global_flush_tlb_page(mm, vaddr) \
72     smp_flush_tlb_page(mm, vaddr)
73    
74     diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
75     index 54df554b82d9..fa4c900a0d1f 100644
76     --- a/arch/sparc/kernel/ldc.c
77     +++ b/arch/sparc/kernel/ldc.c
78     @@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
79     if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
80     !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
81     lp->hs_state != LDC_HS_OPEN)
82     - err = -EINVAL;
83     + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
84     else
85     err = start_handshake(lp);
86    
87     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
88     index 77539eda928c..8565ecd7d48a 100644
89     --- a/arch/sparc/kernel/smp_64.c
90     +++ b/arch/sparc/kernel/smp_64.c
91     @@ -150,7 +150,7 @@ void cpu_panic(void)
92     #define NUM_ROUNDS 64 /* magic value */
93     #define NUM_ITERS 5 /* likewise */
94    
95     -static DEFINE_SPINLOCK(itc_sync_lock);
96     +static DEFINE_RAW_SPINLOCK(itc_sync_lock);
97     static unsigned long go[SLAVE + 1];
98    
99     #define DEBUG_TICK_SYNC 0
100     @@ -258,7 +258,7 @@ static void smp_synchronize_one_tick(int cpu)
101     go[MASTER] = 0;
102     membar_safe("#StoreLoad");
103    
104     - spin_lock_irqsave(&itc_sync_lock, flags);
105     + raw_spin_lock_irqsave(&itc_sync_lock, flags);
106     {
107     for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
108     while (!go[MASTER])
109     @@ -269,7 +269,7 @@ static void smp_synchronize_one_tick(int cpu)
110     membar_safe("#StoreLoad");
111     }
112     }
113     - spin_unlock_irqrestore(&itc_sync_lock, flags);
114     + raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
115     }
116    
117     #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
118     diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
119     index f7c72b6efc27..d066eb18650c 100644
120     --- a/arch/sparc/kernel/sys32.S
121     +++ b/arch/sparc/kernel/sys32.S
122     @@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
123     SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
124     SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
125     SIGN1(sys32_select, compat_sys_select, %o0)
126     -SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
127     +SIGN1(sys32_futex, compat_sys_futex, %o1)
128     SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
129     SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
130     SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
131     diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
132     index 8201c25e7669..4db8898199f7 100644
133     --- a/arch/sparc/kernel/unaligned_64.c
134     +++ b/arch/sparc/kernel/unaligned_64.c
135     @@ -163,17 +163,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
136     unsigned long compute_effective_address(struct pt_regs *regs,
137     unsigned int insn, unsigned int rd)
138     {
139     + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
140     unsigned int rs1 = (insn >> 14) & 0x1f;
141     unsigned int rs2 = insn & 0x1f;
142     - int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
143     + unsigned long addr;
144    
145     if (insn & 0x2000) {
146     maybe_flush_windows(rs1, 0, rd, from_kernel);
147     - return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
148     + addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
149     } else {
150     maybe_flush_windows(rs1, rs2, rd, from_kernel);
151     - return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
152     + addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
153     }
154     +
155     + if (!from_kernel && test_thread_flag(TIF_32BIT))
156     + addr &= 0xffffffff;
157     +
158     + return addr;
159     }
160    
161     /* This is just to make gcc think die_if_kernel does return... */
162     diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
163     index 2c20ad63ddbf..30eee6e8a81b 100644
164     --- a/arch/sparc/lib/NG2memcpy.S
165     +++ b/arch/sparc/lib/NG2memcpy.S
166     @@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
167     */
168     VISEntryHalf
169    
170     + membar #Sync
171     alignaddr %o1, %g0, %g0
172    
173     add %o1, (64 - 1), %o4
174     diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
175     index aa4d55b0bdf0..5ce8f2f64604 100644
176     --- a/arch/sparc/math-emu/math_32.c
177     +++ b/arch/sparc/math-emu/math_32.c
178     @@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
179     case 0: fsr = *pfsr;
180     if (IR == -1) IR = 2;
181     /* fcc is always fcc0 */
182     - fsr &= ~0xc00; fsr |= (IR << 10); break;
183     + fsr &= ~0xc00; fsr |= (IR << 10);
184     *pfsr = fsr;
185     break;
186     case 1: rd->s = IR; break;
187     diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
188     index 5062ff389e83..ea83f82464da 100644
189     --- a/arch/sparc/mm/fault_64.c
190     +++ b/arch/sparc/mm/fault_64.c
191     @@ -95,38 +95,51 @@ static unsigned int get_user_insn(unsigned long tpc)
192     pte_t *ptep, pte;
193     unsigned long pa;
194     u32 insn = 0;
195     - unsigned long pstate;
196    
197     - if (pgd_none(*pgdp))
198     - goto outret;
199     + if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
200     + goto out;
201     pudp = pud_offset(pgdp, tpc);
202     - if (pud_none(*pudp))
203     - goto outret;
204     - pmdp = pmd_offset(pudp, tpc);
205     - if (pmd_none(*pmdp))
206     - goto outret;
207     -
208     - /* This disables preemption for us as well. */
209     - __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
210     - __asm__ __volatile__("wrpr %0, %1, %%pstate"
211     - : : "r" (pstate), "i" (PSTATE_IE));
212     - ptep = pte_offset_map(pmdp, tpc);
213     - pte = *ptep;
214     - if (!pte_present(pte))
215     + if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
216     goto out;
217    
218     - pa = (pte_pfn(pte) << PAGE_SHIFT);
219     - pa += (tpc & ~PAGE_MASK);
220     -
221     - /* Use phys bypass so we don't pollute dtlb/dcache. */
222     - __asm__ __volatile__("lduwa [%1] %2, %0"
223     - : "=r" (insn)
224     - : "r" (pa), "i" (ASI_PHYS_USE_EC));
225     + /* This disables preemption for us as well. */
226     + local_irq_disable();
227    
228     + pmdp = pmd_offset(pudp, tpc);
229     + if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
230     + goto out_irq_enable;
231     +
232     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
233     + if (pmd_trans_huge(*pmdp)) {
234     + if (pmd_trans_splitting(*pmdp))
235     + goto out_irq_enable;
236     +
237     + pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
238     + pa += tpc & ~HPAGE_MASK;
239     +
240     + /* Use phys bypass so we don't pollute dtlb/dcache. */
241     + __asm__ __volatile__("lduwa [%1] %2, %0"
242     + : "=r" (insn)
243     + : "r" (pa), "i" (ASI_PHYS_USE_EC));
244     + } else
245     +#endif
246     + {
247     + ptep = pte_offset_map(pmdp, tpc);
248     + pte = *ptep;
249     + if (pte_present(pte)) {
250     + pa = (pte_pfn(pte) << PAGE_SHIFT);
251     + pa += (tpc & ~PAGE_MASK);
252     +
253     + /* Use phys bypass so we don't pollute dtlb/dcache. */
254     + __asm__ __volatile__("lduwa [%1] %2, %0"
255     + : "=r" (insn)
256     + : "r" (pa), "i" (ASI_PHYS_USE_EC));
257     + }
258     + pte_unmap(ptep);
259     + }
260     +out_irq_enable:
261     + local_irq_enable();
262     out:
263     - pte_unmap(ptep);
264     - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
265     -outret:
266     return insn;
267     }
268    
269     @@ -152,7 +165,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
270     }
271    
272     static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
273     - unsigned int insn, int fault_code)
274     + unsigned long fault_addr, unsigned int insn,
275     + int fault_code)
276     {
277     unsigned long addr;
278     siginfo_t info;
279     @@ -160,10 +174,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
280     info.si_code = code;
281     info.si_signo = sig;
282     info.si_errno = 0;
283     - if (fault_code & FAULT_CODE_ITLB)
284     + if (fault_code & FAULT_CODE_ITLB) {
285     addr = regs->tpc;
286     - else
287     - addr = compute_effective_address(regs, insn, 0);
288     + } else {
289     + /* If we were able to probe the faulting instruction, use it
290     + * to compute a precise fault address. Otherwise use the fault
291     + * time provided address which may only have page granularity.
292     + */
293     + if (insn)
294     + addr = compute_effective_address(regs, insn, 0);
295     + else
296     + addr = fault_addr;
297     + }
298     info.si_addr = (void __user *) addr;
299     info.si_trapno = 0;
300    
301     @@ -238,7 +260,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
302     /* The si_code was set to make clear whether
303     * this was a SEGV_MAPERR or SEGV_ACCERR fault.
304     */
305     - do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
306     + do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
307     return;
308     }
309    
310     @@ -258,18 +280,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
311     show_regs(regs);
312     }
313    
314     -static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
315     - unsigned long addr)
316     -{
317     - static int times;
318     -
319     - if (times++ < 10)
320     - printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
321     - "reports 64-bit fault address [%lx]\n",
322     - current->comm, current->pid, addr);
323     - show_regs(regs);
324     -}
325     -
326     asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
327     {
328     struct mm_struct *mm = current->mm;
329     @@ -298,10 +308,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
330     goto intr_or_no_mm;
331     }
332     }
333     - if (unlikely((address >> 32) != 0)) {
334     - bogus_32bit_fault_address(regs, address);
335     + if (unlikely((address >> 32) != 0))
336     goto intr_or_no_mm;
337     - }
338     }
339    
340     if (regs->tstate & TSTATE_PRIV) {
341     @@ -519,7 +527,7 @@ do_sigbus:
342     * Send a sigbus, regardless of whether we were in kernel
343     * or user mode.
344     */
345     - do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
346     + do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
347    
348     /* Kernel mode? Handle exceptions or die */
349     if (regs->tstate & TSTATE_PRIV)
350     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
351     index 04fd55a6e461..a751023dbdcd 100644
352     --- a/arch/sparc/mm/init_64.c
353     +++ b/arch/sparc/mm/init_64.c
354     @@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
355    
356     mm = vma->vm_mm;
357    
358     + /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
359     + if (!pte_accessible(mm, pte))
360     + return;
361     +
362     spin_lock_irqsave(&mm->context.lock, flags);
363    
364     #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
365     @@ -2764,3 +2768,26 @@ void hugetlb_setup(struct pt_regs *regs)
366     }
367     }
368     #endif
369     +
370     +#ifdef CONFIG_SMP
371     +#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
372     +#else
373     +#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
374     +#endif
375     +
376     +void flush_tlb_kernel_range(unsigned long start, unsigned long end)
377     +{
378     + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
379     + if (start < LOW_OBP_ADDRESS) {
380     + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
381     + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
382     + }
383     + if (end > HI_OBP_ADDRESS) {
384     + flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
385     + do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
386     + }
387     + } else {
388     + flush_tsb_kernel_range(start, end);
389     + do_flush_tlb_kernel_range(start, end);
390     + }
391     +}
392     diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
393     index 2cc3bce5ee91..71d99a6c75a7 100644
394     --- a/arch/sparc/mm/tsb.c
395     +++ b/arch/sparc/mm/tsb.c
396     @@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
397     mm->context.tsb_block[tsb_idx].tsb_nentries =
398     tsb_bytes / sizeof(struct tsb);
399    
400     - base = TSBMAP_BASE;
401     + switch (tsb_idx) {
402     + case MM_TSB_BASE:
403     + base = TSBMAP_8K_BASE;
404     + break;
405     +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
406     + case MM_TSB_HUGE:
407     + base = TSBMAP_4M_BASE;
408     + break;
409     +#endif
410     + default:
411     + BUG();
412     + }
413     +
414     tte = pgprot_val(PAGE_KERNEL_LOCKED);
415     tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
416     BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
417     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
418     index 3dba2a70a00e..ec86177be1df 100644
419     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
420     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
421     @@ -312,6 +312,7 @@ struct sw_tx_bd {
422     u8 flags;
423     /* Set on the first BD descriptor when there is a split BD */
424     #define BNX2X_TSO_SPLIT_BD (1<<0)
425     +#define BNX2X_HAS_SECOND_PBD (1<<1)
426     };
427    
428     struct sw_rx_page {
429     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
430     index b04f7f128f49..372a7557e1fa 100644
431     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
432     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
433     @@ -180,6 +180,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
434     --nbd;
435     bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
436    
437     + if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
438     + /* Skip second parse bd... */
439     + --nbd;
440     + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
441     + }
442     +
443     /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
444     if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
445     tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
446     @@ -3755,6 +3761,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
447     /* set encapsulation flag in start BD */
448     SET_FLAG(tx_start_bd->general_data,
449     ETH_TX_START_BD_TUNNEL_EXIST, 1);
450     +
451     + tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
452     +
453     nbd++;
454     } else if (xmit_type & XMIT_CSUM) {
455     /* Set PBD in checksum offload case w/o encapsulation */
456     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
457     index 155ef4bbde91..9be91cb4f4a3 100644
458     --- a/drivers/net/macvlan.c
459     +++ b/drivers/net/macvlan.c
460     @@ -500,6 +500,7 @@ static int macvlan_init(struct net_device *dev)
461     (lowerdev->state & MACVLAN_STATE_MASK);
462     dev->features = lowerdev->features & MACVLAN_FEATURES;
463     dev->features |= NETIF_F_LLTX;
464     + dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
465     dev->gso_max_size = lowerdev->gso_max_size;
466     dev->iflink = lowerdev->ifindex;
467     dev->hard_header_len = lowerdev->hard_header_len;
468     diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
469     index 7f10588fe668..8161c3f066a3 100644
470     --- a/drivers/net/ppp/pptp.c
471     +++ b/drivers/net/ppp/pptp.c
472     @@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
473     nf_reset(skb);
474    
475     skb->ip_summed = CHECKSUM_NONE;
476     - ip_select_ident(skb, &rt->dst, NULL);
477     + ip_select_ident(skb, NULL);
478     ip_send_check(iph);
479    
480     ip_local_out(skb);
481     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
482     index fcbd4eee52cc..a1dc186c6f66 100644
483     --- a/drivers/net/vxlan.c
484     +++ b/drivers/net/vxlan.c
485     @@ -1093,7 +1093,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
486     iph->daddr = dst;
487     iph->saddr = fl4.saddr;
488     iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
489     - __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
490     + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
491    
492     nf_reset(skb);
493    
494     diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
495     index 160e7510aca6..0787b9756165 100644
496     --- a/drivers/sbus/char/bbc_envctrl.c
497     +++ b/drivers/sbus/char/bbc_envctrl.c
498     @@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
499     if (!tp)
500     return;
501    
502     + INIT_LIST_HEAD(&tp->bp_list);
503     + INIT_LIST_HEAD(&tp->glob_list);
504     +
505     tp->client = bbc_i2c_attach(bp, op);
506     if (!tp->client) {
507     kfree(tp);
508     @@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
509     if (!fp)
510     return;
511    
512     + INIT_LIST_HEAD(&fp->bp_list);
513     + INIT_LIST_HEAD(&fp->glob_list);
514     +
515     fp->client = bbc_i2c_attach(bp, op);
516     if (!fp->client) {
517     kfree(fp);
518     diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
519     index c1441ed282eb..e0e6cd605cca 100644
520     --- a/drivers/sbus/char/bbc_i2c.c
521     +++ b/drivers/sbus/char/bbc_i2c.c
522     @@ -301,13 +301,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
523     if (!bp)
524     return NULL;
525    
526     + INIT_LIST_HEAD(&bp->temps);
527     + INIT_LIST_HEAD(&bp->fans);
528     +
529     bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
530     if (!bp->i2c_control_regs)
531     goto fail;
532    
533     - bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
534     - if (!bp->i2c_bussel_reg)
535     - goto fail;
536     + if (op->num_resources == 2) {
537     + bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
538     + if (!bp->i2c_bussel_reg)
539     + goto fail;
540     + }
541    
542     bp->waiting = 0;
543     init_waitqueue_head(&bp->wq);
544     diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
545     index a422c8b55a47..aa53fee1df63 100644
546     --- a/drivers/tty/serial/sunsab.c
547     +++ b/drivers/tty/serial/sunsab.c
548     @@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
549     (up->port.line == up->port.cons->index))
550     saw_console_brk = 1;
551    
552     + if (count == 0) {
553     + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
554     + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
555     + SAB82532_ISR0_FERR);
556     + up->port.icount.brk++;
557     + uart_handle_break(&up->port);
558     + }
559     + }
560     +
561     for (i = 0; i < count; i++) {
562     unsigned char ch = buf[i], flag;
563    
564     diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
565     index 6ca347a0717e..bb06fd26a7bd 100644
566     --- a/include/net/inetpeer.h
567     +++ b/include/net/inetpeer.h
568     @@ -41,14 +41,13 @@ struct inet_peer {
569     struct rcu_head gc_rcu;
570     };
571     /*
572     - * Once inet_peer is queued for deletion (refcnt == -1), following fields
573     - * are not available: rid, ip_id_count
574     + * Once inet_peer is queued for deletion (refcnt == -1), following field
575     + * is not available: rid
576     * We can share memory with rcu_head to help keep inet_peer small.
577     */
578     union {
579     struct {
580     atomic_t rid; /* Frag reception counter */
581     - atomic_t ip_id_count; /* IP ID for the next packet */
582     };
583     struct rcu_head rcu;
584     struct inet_peer *gc_next;
585     @@ -166,7 +165,7 @@ extern void inetpeer_invalidate_tree(struct inet_peer_base *);
586     extern void inetpeer_invalidate_family(int family);
587    
588     /*
589     - * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
590     + * temporary check to make sure we dont access rid, tcp_ts,
591     * tcp_ts_stamp if no refcount is taken on inet_peer
592     */
593     static inline void inet_peer_refcheck(const struct inet_peer *p)
594     @@ -174,13 +173,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
595     WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
596     }
597    
598     -
599     -/* can be called with or without local BH being disabled */
600     -static inline int inet_getid(struct inet_peer *p, int more)
601     -{
602     - more++;
603     - inet_peer_refcheck(p);
604     - return atomic_add_return(more, &p->ip_id_count) - more;
605     -}
606     -
607     #endif /* _NET_INETPEER_H */
608     diff --git a/include/net/ip.h b/include/net/ip.h
609     index 788f1d8a796f..8695359982d1 100644
610     --- a/include/net/ip.h
611     +++ b/include/net/ip.h
612     @@ -252,9 +252,10 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
613     !(dst_metric_locked(dst, RTAX_MTU)));
614     }
615    
616     -extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
617     +u32 ip_idents_reserve(u32 hash, int segs);
618     +void __ip_select_ident(struct iphdr *iph, int segs);
619    
620     -static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
621     +static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
622     {
623     struct iphdr *iph = ip_hdr(skb);
624    
625     @@ -264,24 +265,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
626     * does not change, they drop every other packet in
627     * a TCP stream using header compression.
628     */
629     - iph->id = (sk && inet_sk(sk)->inet_daddr) ?
630     - htons(inet_sk(sk)->inet_id++) : 0;
631     - } else
632     - __ip_select_ident(iph, dst, 0);
633     -}
634     -
635     -static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
636     -{
637     - struct iphdr *iph = ip_hdr(skb);
638     -
639     - if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
640     if (sk && inet_sk(sk)->inet_daddr) {
641     iph->id = htons(inet_sk(sk)->inet_id);
642     - inet_sk(sk)->inet_id += 1 + more;
643     - } else
644     + inet_sk(sk)->inet_id += segs;
645     + } else {
646     iph->id = 0;
647     - } else
648     - __ip_select_ident(iph, dst, more);
649     + }
650     + } else {
651     + __ip_select_ident(iph, segs);
652     + }
653     +}
654     +
655     +static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
656     +{
657     + ip_select_ident_segs(skb, sk, 1);
658     }
659    
660     /*
661     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
662     index 9e093fc33dab..087370ff05f1 100644
663     --- a/include/net/ipv6.h
664     +++ b/include/net/ipv6.h
665     @@ -530,14 +530,19 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
666     }
667    
668     /* more secured version of ipv6_addr_hash() */
669     -static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
670     +static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
671     {
672     u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
673    
674     return jhash_3words(v,
675     (__force u32)a->s6_addr32[2],
676     (__force u32)a->s6_addr32[3],
677     - ipv6_hash_secret);
678     + initval);
679     +}
680     +
681     +static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
682     +{
683     + return __ipv6_addr_jhash(a, ipv6_hash_secret);
684     }
685    
686     static inline bool ipv6_addr_loopback(const struct in6_addr *a)
687     @@ -649,8 +654,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
688     return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
689     }
690    
691     -extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
692     -
693     /*
694     * Header manipulation
695     */
696     diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
697     index c2e542b27a5a..b1c3d1c63c4e 100644
698     --- a/include/net/secure_seq.h
699     +++ b/include/net/secure_seq.h
700     @@ -3,8 +3,6 @@
701    
702     #include <linux/types.h>
703    
704     -extern __u32 secure_ip_id(__be32 daddr);
705     -extern __u32 secure_ipv6_id(const __be32 daddr[4]);
706     extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
707     extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
708     __be16 dport);
709     diff --git a/net/compat.c b/net/compat.c
710     index f50161fb812e..cbc1a2a26587 100644
711     --- a/net/compat.c
712     +++ b/net/compat.c
713     @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
714     {
715     int tot_len;
716    
717     - if (kern_msg->msg_namelen) {
718     + if (kern_msg->msg_name && kern_msg->msg_namelen) {
719     if (mode == VERIFY_READ) {
720     int err = move_addr_to_kernel(kern_msg->msg_name,
721     kern_msg->msg_namelen,
722     @@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
723     if (err < 0)
724     return err;
725     }
726     - if (kern_msg->msg_name)
727     - kern_msg->msg_name = kern_address;
728     - } else
729     + kern_msg->msg_name = kern_address;
730     + } else {
731     kern_msg->msg_name = NULL;
732     + kern_msg->msg_namelen = 0;
733     + }
734    
735     tot_len = iov_from_user_compat_to_kern(kern_iov,
736     (struct compat_iovec __user *)kern_msg->msg_iov,
737     diff --git a/net/core/iovec.c b/net/core/iovec.c
738     index 9a31515fb8e3..1117a26a8548 100644
739     --- a/net/core/iovec.c
740     +++ b/net/core/iovec.c
741     @@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
742     {
743     int size, ct, err;
744    
745     - if (m->msg_namelen) {
746     + if (m->msg_name && m->msg_namelen) {
747     if (mode == VERIFY_READ) {
748     void __user *namep;
749     namep = (void __user __force *) m->msg_name;
750     @@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
751     if (err < 0)
752     return err;
753     }
754     - if (m->msg_name)
755     - m->msg_name = address;
756     + m->msg_name = address;
757     } else {
758     m->msg_name = NULL;
759     + m->msg_namelen = 0;
760     }
761    
762     size = m->msg_iovlen * sizeof(struct iovec);
763     @@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
764     int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
765     int offset, int len)
766     {
767     + /* No data? Done! */
768     + if (len == 0)
769     + return 0;
770     +
771     /* Skip over the finished iovecs */
772     while (offset >= iov->iov_len) {
773     offset -= iov->iov_len;
774     diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
775     index 8d9d05edd2eb..d0afc322b961 100644
776     --- a/net/core/secure_seq.c
777     +++ b/net/core/secure_seq.c
778     @@ -95,31 +95,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
779     #endif
780    
781     #ifdef CONFIG_INET
782     -__u32 secure_ip_id(__be32 daddr)
783     -{
784     - u32 hash[MD5_DIGEST_WORDS];
785     -
786     - net_secret_init();
787     - hash[0] = (__force __u32) daddr;
788     - hash[1] = net_secret[13];
789     - hash[2] = net_secret[14];
790     - hash[3] = net_secret[15];
791     -
792     - md5_transform(hash, net_secret);
793     -
794     - return hash[0];
795     -}
796     -
797     -__u32 secure_ipv6_id(const __be32 daddr[4])
798     -{
799     - __u32 hash[4];
800     -
801     - net_secret_init();
802     - memcpy(hash, daddr, 16);
803     - md5_transform(hash, net_secret);
804     -
805     - return hash[0];
806     -}
807    
808     __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
809     __be16 sport, __be16 dport)
810     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
811     index 9f84a5f7404d..6148716884ae 100644
812     --- a/net/core/skbuff.c
813     +++ b/net/core/skbuff.c
814     @@ -2810,7 +2810,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
815     tail = nskb;
816    
817     __copy_skb_header(nskb, skb);
818     - nskb->mac_len = skb->mac_len;
819    
820     /* nskb and skb might have different headroom */
821     if (nskb->ip_summed == CHECKSUM_PARTIAL)
822     @@ -2820,6 +2819,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
823     skb_set_network_header(nskb, skb->mac_len);
824     nskb->transport_header = (nskb->network_header +
825     skb_network_header_len(skb));
826     + skb_reset_mac_len(nskb);
827    
828     skb_copy_from_linear_data_offset(skb, -tnl_hlen,
829     nskb->data - tnl_hlen,
830     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
831     index 38d63ca8a6b5..155adf8729c2 100644
832     --- a/net/ipv4/igmp.c
833     +++ b/net/ipv4/igmp.c
834     @@ -343,7 +343,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
835     pip->saddr = fl4.saddr;
836     pip->protocol = IPPROTO_IGMP;
837     pip->tot_len = 0; /* filled in later */
838     - ip_select_ident(skb, &rt->dst, NULL);
839     + ip_select_ident(skb, NULL);
840     ((u8 *)&pip[1])[0] = IPOPT_RA;
841     ((u8 *)&pip[1])[1] = 4;
842     ((u8 *)&pip[1])[2] = 0;
843     @@ -687,7 +687,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
844     iph->daddr = dst;
845     iph->saddr = fl4.saddr;
846     iph->protocol = IPPROTO_IGMP;
847     - ip_select_ident(skb, &rt->dst, NULL);
848     + ip_select_ident(skb, NULL);
849     ((u8 *)&iph[1])[0] = IPOPT_RA;
850     ((u8 *)&iph[1])[1] = 4;
851     ((u8 *)&iph[1])[2] = 0;
852     diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
853     index 33d5537881ed..67140efc15fd 100644
854     --- a/net/ipv4/inetpeer.c
855     +++ b/net/ipv4/inetpeer.c
856     @@ -26,20 +26,7 @@
857     * Theory of operations.
858     * We keep one entry for each peer IP address. The nodes contains long-living
859     * information about the peer which doesn't depend on routes.
860     - * At this moment this information consists only of ID field for the next
861     - * outgoing IP packet. This field is incremented with each packet as encoded
862     - * in inet_getid() function (include/net/inetpeer.h).
863     - * At the moment of writing this notes identifier of IP packets is generated
864     - * to be unpredictable using this code only for packets subjected
865     - * (actually or potentially) to defragmentation. I.e. DF packets less than
866     - * PMTU in size when local fragmentation is disabled use a constant ID and do
867     - * not use this code (see ip_select_ident() in include/net/ip.h).
868     *
869     - * Route cache entries hold references to our nodes.
870     - * New cache entries get references via lookup by destination IP address in
871     - * the avl tree. The reference is grabbed only when it's needed i.e. only
872     - * when we try to output IP packet which needs an unpredictable ID (see
873     - * __ip_select_ident() in net/ipv4/route.c).
874     * Nodes are removed only when reference counter goes to 0.
875     * When it's happened the node may be removed when a sufficient amount of
876     * time has been passed since its last use. The less-recently-used entry can
877     @@ -62,7 +49,6 @@
878     * refcnt: atomically against modifications on other CPU;
879     * usually under some other lock to prevent node disappearing
880     * daddr: unchangeable
881     - * ip_id_count: atomic value (no lock needed)
882     */
883    
884     static struct kmem_cache *peer_cachep __read_mostly;
885     @@ -504,10 +490,6 @@ relookup:
886     p->daddr = *daddr;
887     atomic_set(&p->refcnt, 1);
888     atomic_set(&p->rid, 0);
889     - atomic_set(&p->ip_id_count,
890     - (daddr->family == AF_INET) ?
891     - secure_ip_id(daddr->addr.a4) :
892     - secure_ipv6_id(daddr->addr.a6));
893     p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
894     p->rate_tokens = 0;
895     /* 60*HZ is arbitrary, but chosen enough high so that the first
896     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
897     index 6ca5873d6175..5afbbbe03b0e 100644
898     --- a/net/ipv4/ip_output.c
899     +++ b/net/ipv4/ip_output.c
900     @@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
901     iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
902     iph->saddr = saddr;
903     iph->protocol = sk->sk_protocol;
904     - ip_select_ident(skb, &rt->dst, sk);
905     + ip_select_ident(skb, sk);
906    
907     if (opt && opt->opt.optlen) {
908     iph->ihl += opt->opt.optlen>>2;
909     @@ -394,8 +394,7 @@ packet_routed:
910     ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
911     }
912    
913     - ip_select_ident_more(skb, &rt->dst, sk,
914     - (skb_shinfo(skb)->gso_segs ?: 1) - 1);
915     + ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
916    
917     skb->priority = sk->sk_priority;
918     skb->mark = sk->sk_mark;
919     @@ -1332,7 +1331,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
920     iph->ttl = ttl;
921     iph->protocol = sk->sk_protocol;
922     ip_copy_addrs(iph, fl4);
923     - ip_select_ident(skb, &rt->dst, sk);
924     + ip_select_ident(skb, sk);
925    
926     if (opt) {
927     iph->ihl += opt->optlen>>2;
928     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
929     index 5642374cb751..84aa69caee59 100644
930     --- a/net/ipv4/ip_tunnel.c
931     +++ b/net/ipv4/ip_tunnel.c
932     @@ -691,7 +691,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
933     iph->daddr = fl4.daddr;
934     iph->saddr = fl4.saddr;
935     iph->ttl = ttl;
936     - __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
937     + __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
938    
939     iptunnel_xmit(skb, dev);
940     return;
941     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
942     index 49797ed0917c..56d079b63ad3 100644
943     --- a/net/ipv4/ipmr.c
944     +++ b/net/ipv4/ipmr.c
945     @@ -1661,7 +1661,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
946     iph->protocol = IPPROTO_IPIP;
947     iph->ihl = 5;
948     iph->tot_len = htons(skb->len);
949     - ip_select_ident(skb, skb_dst(skb), NULL);
950     + ip_select_ident(skb, NULL);
951     ip_send_check(iph);
952    
953     memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
954     diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
955     index 402870fdfa0e..b4a1c42a627f 100644
956     --- a/net/ipv4/raw.c
957     +++ b/net/ipv4/raw.c
958     @@ -387,7 +387,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
959     iph->check = 0;
960     iph->tot_len = htons(length);
961     if (!iph->id)
962     - ip_select_ident(skb, &rt->dst, NULL);
963     + ip_select_ident(skb, NULL);
964    
965     iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
966     }
967     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
968     index 2b9887becb5c..d4d162eac4df 100644
969     --- a/net/ipv4/route.c
970     +++ b/net/ipv4/route.c
971     @@ -89,6 +89,7 @@
972     #include <linux/rcupdate.h>
973     #include <linux/times.h>
974     #include <linux/slab.h>
975     +#include <linux/jhash.h>
976     #include <net/dst.h>
977     #include <net/net_namespace.h>
978     #include <net/protocol.h>
979     @@ -464,39 +465,53 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
980     return neigh_create(&arp_tbl, pkey, dev);
981     }
982    
983     -/*
984     - * Peer allocation may fail only in serious out-of-memory conditions. However
985     - * we still can generate some output.
986     - * Random ID selection looks a bit dangerous because we have no chances to
987     - * select ID being unique in a reasonable period of time.
988     - * But broken packet identifier may be better than no packet at all.
989     +#define IP_IDENTS_SZ 2048u
990     +struct ip_ident_bucket {
991     + atomic_t id;
992     + u32 stamp32;
993     +};
994     +
995     +static struct ip_ident_bucket *ip_idents __read_mostly;
996     +
997     +/* In order to protect privacy, we add a perturbation to identifiers
998     + * if one generator is seldom used. This makes hard for an attacker
999     + * to infer how many packets were sent between two points in time.
1000     */
1001     -static void ip_select_fb_ident(struct iphdr *iph)
1002     +u32 ip_idents_reserve(u32 hash, int segs)
1003     {
1004     - static DEFINE_SPINLOCK(ip_fb_id_lock);
1005     - static u32 ip_fallback_id;
1006     - u32 salt;
1007     + struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
1008     + u32 old = ACCESS_ONCE(bucket->stamp32);
1009     + u32 now = (u32)jiffies;
1010     + u32 delta = 0;
1011     +
1012     + if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) {
1013     + u64 x = prandom_u32();
1014     +
1015     + x *= (now - old);
1016     + delta = (u32)(x >> 32);
1017     + }
1018    
1019     - spin_lock_bh(&ip_fb_id_lock);
1020     - salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1021     - iph->id = htons(salt & 0xFFFF);
1022     - ip_fallback_id = salt;
1023     - spin_unlock_bh(&ip_fb_id_lock);
1024     + return atomic_add_return(segs + delta, &bucket->id) - segs;
1025     }
1026     +EXPORT_SYMBOL(ip_idents_reserve);
1027    
1028     -void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1029     +void __ip_select_ident(struct iphdr *iph, int segs)
1030     {
1031     - struct net *net = dev_net(dst->dev);
1032     - struct inet_peer *peer;
1033     + static u32 ip_idents_hashrnd __read_mostly;
1034     + static bool hashrnd_initialized = false;
1035     + u32 hash, id;
1036    
1037     - peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
1038     - if (peer) {
1039     - iph->id = htons(inet_getid(peer, more));
1040     - inet_putpeer(peer);
1041     - return;
1042     + if (unlikely(!hashrnd_initialized)) {
1043     + hashrnd_initialized = true;
1044     + get_random_bytes(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
1045     }
1046    
1047     - ip_select_fb_ident(iph);
1048     + hash = jhash_3words((__force u32)iph->daddr,
1049     + (__force u32)iph->saddr,
1050     + iph->protocol,
1051     + ip_idents_hashrnd);
1052     + id = ip_idents_reserve(hash, segs);
1053     + iph->id = htons(id);
1054     }
1055     EXPORT_SYMBOL(__ip_select_ident);
1056    
1057     @@ -2656,6 +2671,12 @@ int __init ip_rt_init(void)
1058     {
1059     int rc = 0;
1060    
1061     + ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
1062     + if (!ip_idents)
1063     + panic("IP: failed to allocate ip_idents\n");
1064     +
1065     + prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
1066     +
1067     #ifdef CONFIG_IP_ROUTE_CLASSID
1068     ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1069     if (!ip_rt_acct)
1070     diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
1071     index 80fa2bfd7ede..c042e529a11e 100644
1072     --- a/net/ipv4/tcp_vegas.c
1073     +++ b/net/ipv4/tcp_vegas.c
1074     @@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
1075     * This is:
1076     * (actual rate in segments) * baseRTT
1077     */
1078     - target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
1079     + target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
1080     + do_div(target_cwnd, rtt);
1081    
1082     /* Calculate the difference between the window we had,
1083     * and the window we would like to have. This quantity
1084     diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
1085     index ac43cd747bce..b4d1858be550 100644
1086     --- a/net/ipv4/tcp_veno.c
1087     +++ b/net/ipv4/tcp_veno.c
1088     @@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
1089    
1090     rtt = veno->minrtt;
1091    
1092     - target_cwnd = (tp->snd_cwnd * veno->basertt);
1093     + target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
1094     target_cwnd <<= V_PARAM_SHIFT;
1095     do_div(target_cwnd, rtt);
1096    
1097     diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
1098     index b5663c37f089..e3f64831bc36 100644
1099     --- a/net/ipv4/xfrm4_mode_tunnel.c
1100     +++ b/net/ipv4/xfrm4_mode_tunnel.c
1101     @@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
1102    
1103     top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
1104     0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
1105     - ip_select_ident(skb, dst->child, NULL);
1106    
1107     top_iph->ttl = ip4_dst_hoplimit(dst->child);
1108    
1109     top_iph->saddr = x->props.saddr.a4;
1110     top_iph->daddr = x->id.daddr.a4;
1111     + ip_select_ident(skb, NULL);
1112    
1113     return 0;
1114     }
1115     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1116     index ffa8d295c56c..071edcba4158 100644
1117     --- a/net/ipv6/ip6_output.c
1118     +++ b/net/ipv6/ip6_output.c
1119     @@ -540,6 +540,23 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
1120     skb_copy_secmark(to, from);
1121     }
1122    
1123     +static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
1124     +{
1125     + static u32 ip6_idents_hashrnd __read_mostly;
1126     + static bool hashrnd_initialized = false;
1127     + u32 hash, id;
1128     +
1129     + if (unlikely(!hashrnd_initialized)) {
1130     + hashrnd_initialized = true;
1131     + get_random_bytes(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
1132     + }
1133     + hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
1134     + hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
1135     +
1136     + id = ip_idents_reserve(hash, 1);
1137     + fhdr->identification = htonl(id);
1138     +}
1139     +
1140     int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1141     {
1142     struct sk_buff *frag;
1143     diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
1144     index 3d2c81a66d6a..a5d465105b69 100644
1145     --- a/net/ipv6/output_core.c
1146     +++ b/net/ipv6/output_core.c
1147     @@ -6,29 +6,6 @@
1148     #include <net/ipv6.h>
1149     #include <net/ip6_fib.h>
1150    
1151     -void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
1152     -{
1153     - static atomic_t ipv6_fragmentation_id;
1154     - int ident;
1155     -
1156     -#if IS_ENABLED(CONFIG_IPV6)
1157     - if (rt && !(rt->dst.flags & DST_NOPEER)) {
1158     - struct inet_peer *peer;
1159     - struct net *net;
1160     -
1161     - net = dev_net(rt->dst.dev);
1162     - peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
1163     - if (peer) {
1164     - fhdr->identification = htonl(inet_getid(peer, 0));
1165     - inet_putpeer(peer);
1166     - return;
1167     - }
1168     - }
1169     -#endif
1170     - ident = atomic_inc_return(&ipv6_fragmentation_id);
1171     - fhdr->identification = htonl(ident);
1172     -}
1173     -EXPORT_SYMBOL(ipv6_select_ident);
1174    
1175     int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
1176     {
1177     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1178     index 540d58921007..8d22460a811b 100644
1179     --- a/net/ipv6/sit.c
1180     +++ b/net/ipv6/sit.c
1181     @@ -919,7 +919,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
1182     iph->ttl = iph6->hop_limit;
1183    
1184     skb->ip_summed = CHECKSUM_NONE;
1185     - ip_select_ident(skb, skb_dst(skb), NULL);
1186     + ip_select_ident(skb, NULL);
1187     iptunnel_xmit(skb, dev);
1188     return NETDEV_TX_OK;
1189    
1190     diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
1191     index c47444e4cf8c..7f0e1cf2d7e8 100644
1192     --- a/net/netfilter/ipvs/ip_vs_xmit.c
1193     +++ b/net/netfilter/ipvs/ip_vs_xmit.c
1194     @@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1195     iph->daddr = cp->daddr.ip;
1196     iph->saddr = saddr;
1197     iph->ttl = old_iph->ttl;
1198     - ip_select_ident(skb, &rt->dst, NULL);
1199     + ip_select_ident(skb, NULL);
1200    
1201     /* Another hack: avoid icmp_send in ip_fragment */
1202     skb->local_df = 1;
1203     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1204     index 229b3c3fb6c9..62e86d98bc36 100644
1205     --- a/net/sctp/associola.c
1206     +++ b/net/sctp/associola.c
1207     @@ -1213,6 +1213,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1208     asoc->c = new->c;
1209     asoc->peer.rwnd = new->peer.rwnd;
1210     asoc->peer.sack_needed = new->peer.sack_needed;
1211     + asoc->peer.auth_capable = new->peer.auth_capable;
1212     asoc->peer.i = new->peer.i;
1213     sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1214     asoc->peer.i.initial_tsn, GFP_ATOMIC);
1215     diff --git a/net/sctp/output.c b/net/sctp/output.c
1216     index 0beb2f9c8a7c..b6f5fc3127b9 100644
1217     --- a/net/sctp/output.c
1218     +++ b/net/sctp/output.c
1219     @@ -618,7 +618,7 @@ out:
1220     return err;
1221     no_route:
1222     kfree_skb(nskb);
1223     - IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
1224     + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
1225    
1226     /* FIXME: Returning the 'err' will effect all the associations
1227     * associated with a socket, although only one of the paths of the