Magellan Linux

Annotation of /trunk/kernel-lts/patches-3.4/0142-3.4.43-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2202 - (hide annotations) (download)
Thu Jun 13 10:35:13 2013 UTC (11 years ago) by niro
File size: 40514 byte(s)
-linux-3.4.43
1 niro 2202 diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
2     index 76e4a52..656de8b 100644
3     --- a/arch/sparc/include/asm/pgtable_64.h
4     +++ b/arch/sparc/include/asm/pgtable_64.h
5     @@ -780,6 +780,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
6     return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
7     }
8    
9     +#include <asm/tlbflush.h>
10     #include <asm-generic/pgtable.h>
11    
12     /* We provide our own get_unmapped_area to cope with VA holes and
13     diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
14     index 7923c4a..9c2a92d 100644
15     --- a/arch/sparc/include/asm/switch_to_64.h
16     +++ b/arch/sparc/include/asm/switch_to_64.h
17     @@ -18,8 +18,7 @@ do { \
18     * and 2 stores in this critical code path. -DaveM
19     */
20     #define switch_to(prev, next, last) \
21     -do { flush_tlb_pending(); \
22     - save_and_clear_fpu(); \
23     +do { save_and_clear_fpu(); \
24     /* If you are tempted to conditionalize the following */ \
25     /* so that ASI is only written if it changes, think again. */ \
26     __asm__ __volatile__("wr %%g0, %0, %%asi" \
27     diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
28     index 2ef4634..f0d6a97 100644
29     --- a/arch/sparc/include/asm/tlbflush_64.h
30     +++ b/arch/sparc/include/asm/tlbflush_64.h
31     @@ -11,24 +11,40 @@
32     struct tlb_batch {
33     struct mm_struct *mm;
34     unsigned long tlb_nr;
35     + unsigned long active;
36     unsigned long vaddrs[TLB_BATCH_NR];
37     };
38    
39     extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
40     extern void flush_tsb_user(struct tlb_batch *tb);
41     +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
42    
43     /* TLB flush operations. */
44    
45     -extern void flush_tlb_pending(void);
46     +static inline void flush_tlb_mm(struct mm_struct *mm)
47     +{
48     +}
49     +
50     +static inline void flush_tlb_page(struct vm_area_struct *vma,
51     + unsigned long vmaddr)
52     +{
53     +}
54     +
55     +static inline void flush_tlb_range(struct vm_area_struct *vma,
56     + unsigned long start, unsigned long end)
57     +{
58     +}
59     +
60     +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
61    
62     -#define flush_tlb_range(vma,start,end) \
63     - do { (void)(start); flush_tlb_pending(); } while (0)
64     -#define flush_tlb_page(vma,addr) flush_tlb_pending()
65     -#define flush_tlb_mm(mm) flush_tlb_pending()
66     +extern void flush_tlb_pending(void);
67     +extern void arch_enter_lazy_mmu_mode(void);
68     +extern void arch_leave_lazy_mmu_mode(void);
69     +#define arch_flush_lazy_mmu_mode() do {} while (0)
70    
71     /* Local cpu only. */
72     extern void __flush_tlb_all(void);
73     -
74     +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
75     extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
76    
77     #ifndef CONFIG_SMP
78     @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
79     __flush_tlb_kernel_range(start,end); \
80     } while (0)
81    
82     +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
83     +{
84     + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
85     +}
86     +
87     #else /* CONFIG_SMP */
88    
89     extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
90     +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
91    
92     #define flush_tlb_kernel_range(start, end) \
93     do { flush_tsb_kernel_range(start,end); \
94     smp_flush_tlb_kernel_range(start, end); \
95     } while (0)
96    
97     +#define global_flush_tlb_page(mm, vaddr) \
98     + smp_flush_tlb_page(mm, vaddr)
99     +
100     #endif /* ! CONFIG_SMP */
101    
102     #endif /* _SPARC64_TLBFLUSH_H */
103     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
104     index 3b1bd7c..bb2886a 100644
105     --- a/arch/sparc/kernel/smp_64.c
106     +++ b/arch/sparc/kernel/smp_64.c
107     @@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm)
108     }
109    
110     extern unsigned long xcall_flush_tlb_mm;
111     -extern unsigned long xcall_flush_tlb_pending;
112     +extern unsigned long xcall_flush_tlb_page;
113     extern unsigned long xcall_flush_tlb_kernel_range;
114     extern unsigned long xcall_fetch_glob_regs;
115     extern unsigned long xcall_receive_signal;
116     @@ -1070,23 +1070,56 @@ local_flush_and_out:
117     put_cpu();
118     }
119    
120     +struct tlb_pending_info {
121     + unsigned long ctx;
122     + unsigned long nr;
123     + unsigned long *vaddrs;
124     +};
125     +
126     +static void tlb_pending_func(void *info)
127     +{
128     + struct tlb_pending_info *t = info;
129     +
130     + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
131     +}
132     +
133     void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
134     {
135     u32 ctx = CTX_HWBITS(mm->context);
136     + struct tlb_pending_info info;
137     int cpu = get_cpu();
138    
139     + info.ctx = ctx;
140     + info.nr = nr;
141     + info.vaddrs = vaddrs;
142     +
143     if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
144     cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
145     else
146     - smp_cross_call_masked(&xcall_flush_tlb_pending,
147     - ctx, nr, (unsigned long) vaddrs,
148     - mm_cpumask(mm));
149     + smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
150     + &info, 1);
151    
152     __flush_tlb_pending(ctx, nr, vaddrs);
153    
154     put_cpu();
155     }
156    
157     +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
158     +{
159     + unsigned long context = CTX_HWBITS(mm->context);
160     + int cpu = get_cpu();
161     +
162     + if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
163     + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
164     + else
165     + smp_cross_call_masked(&xcall_flush_tlb_page,
166     + context, vaddr, 0,
167     + mm_cpumask(mm));
168     + __flush_tlb_page(context, vaddr);
169     +
170     + put_cpu();
171     +}
172     +
173     void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
174     {
175     start &= PAGE_MASK;
176     diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
177     index b1f279c..afd021e 100644
178     --- a/arch/sparc/mm/tlb.c
179     +++ b/arch/sparc/mm/tlb.c
180     @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
181     void flush_tlb_pending(void)
182     {
183     struct tlb_batch *tb = &get_cpu_var(tlb_batch);
184     + struct mm_struct *mm = tb->mm;
185    
186     - if (tb->tlb_nr) {
187     - flush_tsb_user(tb);
188     + if (!tb->tlb_nr)
189     + goto out;
190    
191     - if (CTX_VALID(tb->mm->context)) {
192     + flush_tsb_user(tb);
193     +
194     + if (CTX_VALID(mm->context)) {
195     + if (tb->tlb_nr == 1) {
196     + global_flush_tlb_page(mm, tb->vaddrs[0]);
197     + } else {
198     #ifdef CONFIG_SMP
199     smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
200     &tb->vaddrs[0]);
201     @@ -37,12 +43,30 @@ void flush_tlb_pending(void)
202     tb->tlb_nr, &tb->vaddrs[0]);
203     #endif
204     }
205     - tb->tlb_nr = 0;
206     }
207    
208     + tb->tlb_nr = 0;
209     +
210     +out:
211     put_cpu_var(tlb_batch);
212     }
213    
214     +void arch_enter_lazy_mmu_mode(void)
215     +{
216     + struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
217     +
218     + tb->active = 1;
219     +}
220     +
221     +void arch_leave_lazy_mmu_mode(void)
222     +{
223     + struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
224     +
225     + if (tb->tlb_nr)
226     + flush_tlb_pending();
227     + tb->active = 0;
228     +}
229     +
230     void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
231     pte_t *ptep, pte_t orig, int fullmm)
232     {
233     @@ -90,6 +114,12 @@ no_cache_flush:
234     nr = 0;
235     }
236    
237     + if (!tb->active) {
238     + global_flush_tlb_page(mm, vaddr);
239     + flush_tsb_user_page(mm, vaddr);
240     + goto out;
241     + }
242     +
243     if (nr == 0)
244     tb->mm = mm;
245    
246     @@ -98,5 +128,6 @@ no_cache_flush:
247     if (nr >= TLB_BATCH_NR)
248     flush_tlb_pending();
249    
250     +out:
251     put_cpu_var(tlb_batch);
252     }
253     diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
254     index c52add7..f4e84f3 100644
255     --- a/arch/sparc/mm/tsb.c
256     +++ b/arch/sparc/mm/tsb.c
257     @@ -7,11 +7,10 @@
258     #include <linux/preempt.h>
259     #include <linux/slab.h>
260     #include <asm/page.h>
261     -#include <asm/tlbflush.h>
262     -#include <asm/tlb.h>
263     -#include <asm/mmu_context.h>
264     #include <asm/pgtable.h>
265     +#include <asm/mmu_context.h>
266     #include <asm/tsb.h>
267     +#include <asm/tlb.h>
268     #include <asm/oplib.h>
269    
270     extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
271     @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
272     }
273     }
274    
275     -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
276     - unsigned long tsb, unsigned long nentries)
277     +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
278     + unsigned long hash_shift,
279     + unsigned long nentries)
280     {
281     - unsigned long i;
282     + unsigned long tag, ent, hash;
283    
284     - for (i = 0; i < tb->tlb_nr; i++) {
285     - unsigned long v = tb->vaddrs[i];
286     - unsigned long tag, ent, hash;
287     + v &= ~0x1UL;
288     + hash = tsb_hash(v, hash_shift, nentries);
289     + ent = tsb + (hash * sizeof(struct tsb));
290     + tag = (v >> 22UL);
291    
292     - v &= ~0x1UL;
293     + tsb_flush(ent, tag);
294     +}
295    
296     - hash = tsb_hash(v, hash_shift, nentries);
297     - ent = tsb + (hash * sizeof(struct tsb));
298     - tag = (v >> 22UL);
299     +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
300     + unsigned long tsb, unsigned long nentries)
301     +{
302     + unsigned long i;
303    
304     - tsb_flush(ent, tag);
305     - }
306     + for (i = 0; i < tb->tlb_nr; i++)
307     + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
308     }
309    
310     void flush_tsb_user(struct tlb_batch *tb)
311     @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
312     spin_unlock_irqrestore(&mm->context.lock, flags);
313     }
314    
315     +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
316     +{
317     + unsigned long nentries, base, flags;
318     +
319     + spin_lock_irqsave(&mm->context.lock, flags);
320     +
321     + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
322     + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
323     + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
324     + base = __pa(base);
325     + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
326     +
327     +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
328     + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
329     + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
330     + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
331     + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
332     + base = __pa(base);
333     + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
334     + }
335     +#endif
336     + spin_unlock_irqrestore(&mm->context.lock, flags);
337     +}
338     +
339     #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
340     #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
341     #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
342     diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
343     index 874162a..dd10caa 100644
344     --- a/arch/sparc/mm/ultra.S
345     +++ b/arch/sparc/mm/ultra.S
346     @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
347     nop
348    
349     .align 32
350     + .globl __flush_tlb_page
351     +__flush_tlb_page: /* 22 insns */
352     + /* %o0 = context, %o1 = vaddr */
353     + rdpr %pstate, %g7
354     + andn %g7, PSTATE_IE, %g2
355     + wrpr %g2, %pstate
356     + mov SECONDARY_CONTEXT, %o4
357     + ldxa [%o4] ASI_DMMU, %g2
358     + stxa %o0, [%o4] ASI_DMMU
359     + andcc %o1, 1, %g0
360     + andn %o1, 1, %o3
361     + be,pn %icc, 1f
362     + or %o3, 0x10, %o3
363     + stxa %g0, [%o3] ASI_IMMU_DEMAP
364     +1: stxa %g0, [%o3] ASI_DMMU_DEMAP
365     + membar #Sync
366     + stxa %g2, [%o4] ASI_DMMU
367     + sethi %hi(KERNBASE), %o4
368     + flush %o4
369     + retl
370     + wrpr %g7, 0x0, %pstate
371     + nop
372     + nop
373     + nop
374     + nop
375     +
376     + .align 32
377     .globl __flush_tlb_pending
378     __flush_tlb_pending: /* 26 insns */
379     /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
380     @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
381     retl
382     wrpr %g7, 0x0, %pstate
383    
384     +__cheetah_flush_tlb_page: /* 22 insns */
385     + /* %o0 = context, %o1 = vaddr */
386     + rdpr %pstate, %g7
387     + andn %g7, PSTATE_IE, %g2
388     + wrpr %g2, 0x0, %pstate
389     + wrpr %g0, 1, %tl
390     + mov PRIMARY_CONTEXT, %o4
391     + ldxa [%o4] ASI_DMMU, %g2
392     + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
393     + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
394     + or %o0, %o3, %o0 /* Preserve nucleus page size fields */
395     + stxa %o0, [%o4] ASI_DMMU
396     + andcc %o1, 1, %g0
397     + be,pn %icc, 1f
398     + andn %o1, 1, %o3
399     + stxa %g0, [%o3] ASI_IMMU_DEMAP
400     +1: stxa %g0, [%o3] ASI_DMMU_DEMAP
401     + membar #Sync
402     + stxa %g2, [%o4] ASI_DMMU
403     + sethi %hi(KERNBASE), %o4
404     + flush %o4
405     + wrpr %g0, 0, %tl
406     + retl
407     + wrpr %g7, 0x0, %pstate
408     +
409     __cheetah_flush_tlb_pending: /* 27 insns */
410     /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
411     rdpr %pstate, %g7
412     @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
413     retl
414     nop
415    
416     +__hypervisor_flush_tlb_page: /* 11 insns */
417     + /* %o0 = context, %o1 = vaddr */
418     + mov %o0, %g2
419     + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
420     + mov %g2, %o1 /* ARG1: mmu context */
421     + mov HV_MMU_ALL, %o2 /* ARG2: flags */
422     + srlx %o0, PAGE_SHIFT, %o0
423     + sllx %o0, PAGE_SHIFT, %o0
424     + ta HV_MMU_UNMAP_ADDR_TRAP
425     + brnz,pn %o0, __hypervisor_tlb_tl0_error
426     + mov HV_MMU_UNMAP_ADDR_TRAP, %o1
427     + retl
428     + nop
429     +
430     __hypervisor_flush_tlb_pending: /* 16 insns */
431     /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
432     sllx %o1, 3, %g1
433     @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
434     call tlb_patch_one
435     mov 19, %o2
436    
437     + sethi %hi(__flush_tlb_page), %o0
438     + or %o0, %lo(__flush_tlb_page), %o0
439     + sethi %hi(__cheetah_flush_tlb_page), %o1
440     + or %o1, %lo(__cheetah_flush_tlb_page), %o1
441     + call tlb_patch_one
442     + mov 22, %o2
443     +
444     sethi %hi(__flush_tlb_pending), %o0
445     or %o0, %lo(__flush_tlb_pending), %o0
446     sethi %hi(__cheetah_flush_tlb_pending), %o1
447     @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
448     nop
449     nop
450    
451     - .globl xcall_flush_tlb_pending
452     -xcall_flush_tlb_pending: /* 21 insns */
453     - /* %g5=context, %g1=nr, %g7=vaddrs[] */
454     - sllx %g1, 3, %g1
455     + .globl xcall_flush_tlb_page
456     +xcall_flush_tlb_page: /* 17 insns */
457     + /* %g5=context, %g1=vaddr */
458     mov PRIMARY_CONTEXT, %g4
459     ldxa [%g4] ASI_DMMU, %g2
460     srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
461     @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
462     or %g5, %g4, %g5
463     mov PRIMARY_CONTEXT, %g4
464     stxa %g5, [%g4] ASI_DMMU
465     -1: sub %g1, (1 << 3), %g1
466     - ldx [%g7 + %g1], %g5
467     - andcc %g5, 0x1, %g0
468     + andcc %g1, 0x1, %g0
469     be,pn %icc, 2f
470     -
471     - andn %g5, 0x1, %g5
472     + andn %g1, 0x1, %g5
473     stxa %g0, [%g5] ASI_IMMU_DEMAP
474     2: stxa %g0, [%g5] ASI_DMMU_DEMAP
475     membar #Sync
476     - brnz,pt %g1, 1b
477     - nop
478     stxa %g2, [%g4] ASI_DMMU
479     retry
480     nop
481     + nop
482    
483     .globl xcall_flush_tlb_kernel_range
484     xcall_flush_tlb_kernel_range: /* 25 insns */
485     @@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
486     membar #Sync
487     retry
488    
489     - .globl __hypervisor_xcall_flush_tlb_pending
490     -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
491     - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
492     - sllx %g1, 3, %g1
493     + .globl __hypervisor_xcall_flush_tlb_page
494     +__hypervisor_xcall_flush_tlb_page: /* 17 insns */
495     + /* %g5=ctx, %g1=vaddr */
496     mov %o0, %g2
497     mov %o1, %g3
498     mov %o2, %g4
499     -1: sub %g1, (1 << 3), %g1
500     - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
501     + mov %g1, %o0 /* ARG0: virtual address */
502     mov %g5, %o1 /* ARG1: mmu context */
503     mov HV_MMU_ALL, %o2 /* ARG2: flags */
504     srlx %o0, PAGE_SHIFT, %o0
505     @@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
506     mov HV_MMU_UNMAP_ADDR_TRAP, %g6
507     brnz,a,pn %o0, __hypervisor_tlb_xcall_error
508     mov %o0, %g5
509     - brnz,pt %g1, 1b
510     - nop
511     mov %g2, %o0
512     mov %g3, %o1
513     mov %g4, %o2
514     @@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops:
515     call tlb_patch_one
516     mov 10, %o2
517    
518     + sethi %hi(__flush_tlb_page), %o0
519     + or %o0, %lo(__flush_tlb_page), %o0
520     + sethi %hi(__hypervisor_flush_tlb_page), %o1
521     + or %o1, %lo(__hypervisor_flush_tlb_page), %o1
522     + call tlb_patch_one
523     + mov 11, %o2
524     +
525     sethi %hi(__flush_tlb_pending), %o0
526     or %o0, %lo(__flush_tlb_pending), %o0
527     sethi %hi(__hypervisor_flush_tlb_pending), %o1
528     @@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops:
529     call tlb_patch_one
530     mov 21, %o2
531    
532     - sethi %hi(xcall_flush_tlb_pending), %o0
533     - or %o0, %lo(xcall_flush_tlb_pending), %o0
534     - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
535     - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
536     + sethi %hi(xcall_flush_tlb_page), %o0
537     + or %o0, %lo(xcall_flush_tlb_page), %o0
538     + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
539     + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
540     call tlb_patch_one
541     - mov 21, %o2
542     + mov 17, %o2
543    
544     sethi %hi(xcall_flush_tlb_kernel_range), %o0
545     or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
546     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
547     index d9f646f..d9f8358 100644
548     --- a/drivers/net/bonding/bond_main.c
549     +++ b/drivers/net/bonding/bond_main.c
550     @@ -1888,6 +1888,7 @@ err_detach:
551     write_unlock_bh(&bond->lock);
552    
553     err_close:
554     + slave_dev->priv_flags &= ~IFF_BONDING;
555     dev_close(slave_dev);
556    
557     err_unset_master:
558     @@ -4864,9 +4865,18 @@ static int __net_init bond_net_init(struct net *net)
559     static void __net_exit bond_net_exit(struct net *net)
560     {
561     struct bond_net *bn = net_generic(net, bond_net_id);
562     + struct bonding *bond, *tmp_bond;
563     + LIST_HEAD(list);
564    
565     bond_destroy_sysfs(bn);
566     bond_destroy_proc_dir(bn);
567     +
568     + /* Kill off any bonds created after unregistering bond rtnl ops */
569     + rtnl_lock();
570     + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
571     + unregister_netdevice_queue(bond->dev, &list);
572     + unregister_netdevice_many(&list);
573     + rtnl_unlock();
574     }
575    
576     static struct pernet_operations bond_net_ops = {
577     diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
578     index edfdf6b..b5fd934 100644
579     --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
580     +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
581     @@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
582     /* how about 0x2000 */
583     #define MAX_TX_BUF_LEN 0x2000
584     #define MAX_TX_BUF_SHIFT 13
585     -/*#define MAX_TX_BUF_LEN 0x3000 */
586     +#define MAX_TSO_SEG_SIZE 0x3c00
587    
588     /* rrs word 1 bit 0:31 */
589     #define RRS_RX_CSUM_MASK 0xFFFF
590     diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
591     index f964151..d53509e 100644
592     --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
593     +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
594     @@ -2354,6 +2354,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
595    
596     INIT_WORK(&adapter->reset_task, atl1e_reset_task);
597     INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
598     + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
599     err = register_netdev(netdev);
600     if (err) {
601     netdev_err(netdev, "register netdevice failed\n");
602     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
603     index d939bd7..bf9f987 100644
604     --- a/drivers/tty/tty_io.c
605     +++ b/drivers/tty/tty_io.c
606     @@ -938,6 +938,14 @@ void start_tty(struct tty_struct *tty)
607    
608     EXPORT_SYMBOL(start_tty);
609    
610     +static void tty_update_time(struct timespec *time)
611     +{
612     + unsigned long sec = get_seconds();
613     + sec -= sec % 60;
614     + if ((long)(sec - time->tv_sec) > 0)
615     + time->tv_sec = sec;
616     +}
617     +
618     /**
619     * tty_read - read method for tty device files
620     * @file: pointer to tty file
621     @@ -974,8 +982,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
622     else
623     i = -EIO;
624     tty_ldisc_deref(ld);
625     +
626     if (i > 0)
627     - inode->i_atime = current_fs_time(inode->i_sb);
628     + tty_update_time(&inode->i_atime);
629     +
630     return i;
631     }
632    
633     @@ -1078,7 +1088,7 @@ static inline ssize_t do_tty_write(
634     }
635     if (written) {
636     struct inode *inode = file->f_path.dentry->d_inode;
637     - inode->i_mtime = current_fs_time(inode->i_sb);
638     + tty_update_time(&inode->i_mtime);
639     ret = written;
640     }
641     out:
642     diff --git a/fs/aio.c b/fs/aio.c
643     index e7f2fad..cdc8dc4 100644
644     --- a/fs/aio.c
645     +++ b/fs/aio.c
646     @@ -1094,9 +1094,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
647     spin_unlock(&info->ring_lock);
648    
649     out:
650     - kunmap_atomic(ring);
651     dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
652     (unsigned long)ring->head, (unsigned long)ring->tail);
653     + kunmap_atomic(ring);
654     return ret;
655     }
656    
657     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
658     index e517695..dc6c687 100644
659     --- a/include/linux/netdevice.h
660     +++ b/include/linux/netdevice.h
661     @@ -232,9 +232,9 @@ struct netdev_hw_addr {
662     #define NETDEV_HW_ADDR_T_SLAVE 3
663     #define NETDEV_HW_ADDR_T_UNICAST 4
664     #define NETDEV_HW_ADDR_T_MULTICAST 5
665     - bool synced;
666     bool global_use;
667     int refcount;
668     + int synced;
669     struct rcu_head rcu_head;
670     };
671    
672     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
673     index 4aea870..dc4d49a 100644
674     --- a/include/linux/skbuff.h
675     +++ b/include/linux/skbuff.h
676     @@ -2392,6 +2392,13 @@ static inline void nf_reset(struct sk_buff *skb)
677     #endif
678     }
679    
680     +static inline void nf_reset_trace(struct sk_buff *skb)
681     +{
682     +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
683     + skb->nf_trace = 0;
684     +#endif
685     +}
686     +
687     /* Note: This doesn't put any conntrack and bridge info in dst. */
688     static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
689     {
690     diff --git a/include/linux/socket.h b/include/linux/socket.h
691     index b84bbd4..8f15b1d 100644
692     --- a/include/linux/socket.h
693     +++ b/include/linux/socket.h
694     @@ -316,7 +316,8 @@ struct ucred {
695     /* IPX options */
696     #define IPX_TYPE 1
697    
698     -extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
699     +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred,
700     + bool use_effective);
701    
702     extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
703     extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
704     diff --git a/include/net/scm.h b/include/net/scm.h
705     index 0c0017c..9f211cf 100644
706     --- a/include/net/scm.h
707     +++ b/include/net/scm.h
708     @@ -50,7 +50,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
709     {
710     scm->pid = get_pid(pid);
711     scm->cred = cred ? get_cred(cred) : NULL;
712     - cred_to_ucred(pid, cred, &scm->creds);
713     + cred_to_ucred(pid, cred, &scm->creds, false);
714     }
715    
716     static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
717     diff --git a/net/atm/common.c b/net/atm/common.c
718     index 0c0ad93..f0a9b7e 100644
719     --- a/net/atm/common.c
720     +++ b/net/atm/common.c
721     @@ -520,6 +520,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
722     struct sk_buff *skb;
723     int copied, error = -EINVAL;
724    
725     + msg->msg_namelen = 0;
726     +
727     if (sock->state != SS_CONNECTED)
728     return -ENOTCONN;
729    
730     diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
731     index 9d9a6a3..68b3992 100644
732     --- a/net/ax25/af_ax25.c
733     +++ b/net/ax25/af_ax25.c
734     @@ -1646,6 +1646,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
735     ax25_address src;
736     const unsigned char *mac = skb_mac_header(skb);
737    
738     + memset(sax, 0, sizeof(struct full_sockaddr_ax25));
739     ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
740     &digi, NULL, NULL);
741     sax->sax25_family = AF_AX25;
742     diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
743     index 6fb68a9..c294348 100644
744     --- a/net/bluetooth/af_bluetooth.c
745     +++ b/net/bluetooth/af_bluetooth.c
746     @@ -240,6 +240,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
747     if (flags & (MSG_OOB))
748     return -EOPNOTSUPP;
749    
750     + msg->msg_namelen = 0;
751     +
752     skb = skb_recv_datagram(sk, flags, noblock, &err);
753     if (!skb) {
754     if (sk->sk_shutdown & RCV_SHUTDOWN)
755     @@ -247,8 +249,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
756     return err;
757     }
758    
759     - msg->msg_namelen = 0;
760     -
761     copied = skb->len;
762     if (len < copied) {
763     msg->msg_flags |= MSG_TRUNC;
764     diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
765     index 8d1edd7..c79db7f 100644
766     --- a/net/bluetooth/rfcomm/sock.c
767     +++ b/net/bluetooth/rfcomm/sock.c
768     @@ -628,6 +628,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
769    
770     if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
771     rfcomm_dlc_accept(d);
772     + msg->msg_namelen = 0;
773     return 0;
774     }
775    
776     diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
777     index 5016fa5..24a6886 100644
778     --- a/net/caif/caif_socket.c
779     +++ b/net/caif/caif_socket.c
780     @@ -287,6 +287,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
781     if (m->msg_flags&MSG_OOB)
782     goto read_error;
783    
784     + m->msg_namelen = 0;
785     +
786     skb = skb_recv_datagram(sk, flags, 0 , &ret);
787     if (!skb)
788     goto read_error;
789     diff --git a/net/core/dev.c b/net/core/dev.c
790     index 9e2e29b..dd12421 100644
791     --- a/net/core/dev.c
792     +++ b/net/core/dev.c
793     @@ -1628,6 +1628,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
794     skb->mark = 0;
795     secpath_reset(skb);
796     nf_reset(skb);
797     + nf_reset_trace(skb);
798     return netif_rx(skb);
799     }
800     EXPORT_SYMBOL_GPL(dev_forward_skb);
801     @@ -1894,6 +1895,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
802     struct net_device *dev = skb->dev;
803     const char *driver = "";
804    
805     + if (!net_ratelimit())
806     + return;
807     +
808     if (dev && dev->dev.parent)
809     driver = dev_driver_string(dev->dev.parent);
810    
811     diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
812     index 76f6d0b..0346e59 100644
813     --- a/net/core/dev_addr_lists.c
814     +++ b/net/core/dev_addr_lists.c
815     @@ -57,7 +57,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
816     ha->type = addr_type;
817     ha->refcount = 1;
818     ha->global_use = global;
819     - ha->synced = false;
820     + ha->synced = 0;
821     list_add_tail_rcu(&ha->list, &list->list);
822     list->count++;
823     return 0;
824     @@ -155,7 +155,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
825     addr_len, ha->type);
826     if (err)
827     break;
828     - ha->synced = true;
829     + ha->synced++;
830     ha->refcount++;
831     } else if (ha->refcount == 1) {
832     __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
833     @@ -176,7 +176,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
834     if (ha->synced) {
835     __hw_addr_del(to_list, ha->addr,
836     addr_len, ha->type);
837     - ha->synced = false;
838     + ha->synced--;
839     __hw_addr_del(from_list, ha->addr,
840     addr_len, ha->type);
841     }
842     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
843     index 3db960c..a133427 100644
844     --- a/net/core/rtnetlink.c
845     +++ b/net/core/rtnetlink.c
846     @@ -1066,7 +1066,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
847     rcu_read_lock();
848     cb->seq = net->dev_base_seq;
849    
850     - if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
851     + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
852     ifla_policy) >= 0) {
853    
854     if (tb[IFLA_EXT_MASK])
855     @@ -1910,7 +1910,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
856     u32 ext_filter_mask = 0;
857     u16 min_ifinfo_dump_size = 0;
858    
859     - if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
860     + if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
861     ifla_policy) >= 0) {
862     if (tb[IFLA_EXT_MASK])
863     ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
864     diff --git a/net/core/sock.c b/net/core/sock.c
865     index 4b469e3..f8b5030 100644
866     --- a/net/core/sock.c
867     +++ b/net/core/sock.c
868     @@ -815,15 +815,20 @@ EXPORT_SYMBOL(sock_setsockopt);
869    
870    
871     void cred_to_ucred(struct pid *pid, const struct cred *cred,
872     - struct ucred *ucred)
873     + struct ucred *ucred, bool use_effective)
874     {
875     ucred->pid = pid_vnr(pid);
876     ucred->uid = ucred->gid = -1;
877     if (cred) {
878     struct user_namespace *current_ns = current_user_ns();
879    
880     - ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
881     - ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
882     + if (use_effective) {
883     + ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
884     + ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
885     + } else {
886     + ucred->uid = user_ns_map_uid(current_ns, cred, cred->uid);
887     + ucred->gid = user_ns_map_gid(current_ns, cred, cred->gid);
888     + }
889     }
890     }
891     EXPORT_SYMBOL_GPL(cred_to_ucred);
892     @@ -984,7 +989,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
893     struct ucred peercred;
894     if (len > sizeof(peercred))
895     len = sizeof(peercred);
896     - cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
897     + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred,
898     + &peercred, true);
899     if (copy_to_user(optval, &peercred, len))
900     return -EFAULT;
901     goto lenout;
902     diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
903     index cb982a6..e814e2a 100644
904     --- a/net/ipv4/esp4.c
905     +++ b/net/ipv4/esp4.c
906     @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
907    
908     /* skb is pure payload to encrypt */
909    
910     - err = -ENOMEM;
911     -
912     esp = x->data;
913     aead = esp->aead;
914     alen = crypto_aead_authsize(aead);
915     @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
916     }
917    
918     tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
919     - if (!tmp)
920     + if (!tmp) {
921     + err = -ENOMEM;
922     goto error;
923     + }
924    
925     seqhi = esp_tmp_seqhi(tmp);
926     iv = esp_tmp_iv(aead, tmp, seqhilen);
927     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
928     index 8e28871..4a40457 100644
929     --- a/net/ipv4/ip_fragment.c
930     +++ b/net/ipv4/ip_fragment.c
931     @@ -251,8 +251,7 @@ static void ip_expire(unsigned long arg)
932     if (!head->dev)
933     goto out_rcu_unlock;
934    
935     - /* skb dst is stale, drop it, and perform route lookup again */
936     - skb_dst_drop(head);
937     + /* skb has no dst, perform route lookup again */
938     iph = ip_hdr(head);
939     err = ip_route_input_noref(head, iph->daddr, iph->saddr,
940     iph->tos, head->dev);
941     @@ -517,8 +516,16 @@ found:
942     qp->q.last_in |= INET_FRAG_FIRST_IN;
943    
944     if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
945     - qp->q.meat == qp->q.len)
946     - return ip_frag_reasm(qp, prev, dev);
947     + qp->q.meat == qp->q.len) {
948     + unsigned long orefdst = skb->_skb_refdst;
949     +
950     + skb->_skb_refdst = 0UL;
951     + err = ip_frag_reasm(qp, prev, dev);
952     + skb->_skb_refdst = orefdst;
953     + return err;
954     + }
955     +
956     + skb_dst_drop(skb);
957    
958     write_lock(&ip4_frags.lock);
959     list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
960     diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
961     index eab2a7f..550aa2a 100644
962     --- a/net/ipv4/syncookies.c
963     +++ b/net/ipv4/syncookies.c
964     @@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
965     * hasn't changed since we received the original syn, but I see
966     * no easy way to do this.
967     */
968     - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
969     - RT_SCOPE_UNIVERSE, IPPROTO_TCP,
970     + flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
971     + RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
972     inet_sk_flowi_flags(sk),
973     (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
974     ireq->loc_addr, th->source, th->dest);
975     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
976     index 0a1f159..762c78f 100644
977     --- a/net/ipv4/tcp_input.c
978     +++ b/net/ipv4/tcp_input.c
979     @@ -116,6 +116,7 @@ int sysctl_tcp_abc __read_mostly;
980     #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
981     #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
982     #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
983     +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
984    
985     #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
986     #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
987     @@ -3707,6 +3708,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
988     }
989     }
990    
991     +static void tcp_store_ts_recent(struct tcp_sock *tp)
992     +{
993     + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
994     + tp->rx_opt.ts_recent_stamp = get_seconds();
995     +}
996     +
997     +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
998     +{
999     + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
1000     + /* PAWS bug workaround wrt. ACK frames, the PAWS discard
1001     + * extra check below makes sure this can only happen
1002     + * for pure ACK frames. -DaveM
1003     + *
1004     + * Not only, also it occurs for expired timestamps.
1005     + */
1006     +
1007     + if (tcp_paws_check(&tp->rx_opt, 0))
1008     + tcp_store_ts_recent(tp);
1009     + }
1010     +}
1011     +
1012     /* This routine deals with incoming acks, but not outgoing ones. */
1013     static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
1014     {
1015     @@ -3756,6 +3778,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
1016     prior_fackets = tp->fackets_out;
1017     prior_in_flight = tcp_packets_in_flight(tp);
1018    
1019     + /* ts_recent update must be made after we are sure that the packet
1020     + * is in window.
1021     + */
1022     + if (flag & FLAG_UPDATE_TS_RECENT)
1023     + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
1024     +
1025     if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
1026     /* Window is constant, pure forward advance.
1027     * No more checks are required.
1028     @@ -4053,27 +4081,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
1029     EXPORT_SYMBOL(tcp_parse_md5sig_option);
1030     #endif
1031    
1032     -static inline void tcp_store_ts_recent(struct tcp_sock *tp)
1033     -{
1034     - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
1035     - tp->rx_opt.ts_recent_stamp = get_seconds();
1036     -}
1037     -
1038     -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
1039     -{
1040     - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
1041     - /* PAWS bug workaround wrt. ACK frames, the PAWS discard
1042     - * extra check below makes sure this can only happen
1043     - * for pure ACK frames. -DaveM
1044     - *
1045     - * Not only, also it occurs for expired timestamps.
1046     - */
1047     -
1048     - if (tcp_paws_check(&tp->rx_opt, 0))
1049     - tcp_store_ts_recent(tp);
1050     - }
1051     -}
1052     -
1053     /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
1054     *
1055     * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
1056     @@ -5577,14 +5584,10 @@ slow_path:
1057     return 0;
1058    
1059     step5:
1060     - if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
1061     + if (th->ack &&
1062     + tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
1063     goto discard;
1064    
1065     - /* ts_recent update must be made after we are sure that the packet
1066     - * is in window.
1067     - */
1068     - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
1069     -
1070     tcp_rcv_rtt_measure_ts(sk, skb);
1071    
1072     /* Process urgent data. */
1073     @@ -5948,7 +5951,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
1074    
1075     /* step 5: check the ACK field */
1076     if (th->ack) {
1077     - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
1078     + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
1079     + FLAG_UPDATE_TS_RECENT) > 0;
1080    
1081     switch (sk->sk_state) {
1082     case TCP_SYN_RECV:
1083     @@ -6055,11 +6059,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
1084     } else
1085     goto discard;
1086    
1087     - /* ts_recent update must be made after we are sure that the packet
1088     - * is in window.
1089     - */
1090     - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
1091     -
1092     /* step 6: check the URG bit */
1093     tcp_urg(sk, skb, th);
1094    
1095     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1096     index 4dca494..9db21e3 100644
1097     --- a/net/ipv4/tcp_output.c
1098     +++ b/net/ipv4/tcp_output.c
1099     @@ -2154,8 +2154,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1100     */
1101     TCP_SKB_CB(skb)->when = tcp_time_stamp;
1102    
1103     - /* make sure skb->data is aligned on arches that require it */
1104     - if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
1105     + /* make sure skb->data is aligned on arches that require it
1106     + * and check if ack-trimming & collapsing extended the headroom
1107     + * beyond what csum_start can cover.
1108     + */
1109     + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
1110     + skb_headroom(skb) >= 0xFFFF)) {
1111     struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
1112     GFP_ATOMIC);
1113     err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
1114     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1115     index 541a719..2c496d6 100644
1116     --- a/net/ipv6/addrconf.c
1117     +++ b/net/ipv6/addrconf.c
1118     @@ -2399,6 +2399,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
1119     static void init_loopback(struct net_device *dev)
1120     {
1121     struct inet6_dev *idev;
1122     + struct net_device *sp_dev;
1123     + struct inet6_ifaddr *sp_ifa;
1124     + struct rt6_info *sp_rt;
1125    
1126     /* ::1 */
1127    
1128     @@ -2410,6 +2413,30 @@ static void init_loopback(struct net_device *dev)
1129     }
1130    
1131     add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
1132     +
1133     + /* Add routes to other interface's IPv6 addresses */
1134     + for_each_netdev(dev_net(dev), sp_dev) {
1135     + if (!strcmp(sp_dev->name, dev->name))
1136     + continue;
1137     +
1138     + idev = __in6_dev_get(sp_dev);
1139     + if (!idev)
1140     + continue;
1141     +
1142     + read_lock_bh(&idev->lock);
1143     + list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
1144     +
1145     + if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
1146     + continue;
1147     +
1148     + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
1149     +
1150     + /* Failure cases are ignored */
1151     + if (!IS_ERR(sp_rt))
1152     + ip6_ins_rt(sp_rt);
1153     + }
1154     + read_unlock_bh(&idev->lock);
1155     + }
1156     }
1157    
1158     static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
1159     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1160     index 5ff412f..6bda7aa 100644
1161     --- a/net/ipv6/reassembly.c
1162     +++ b/net/ipv6/reassembly.c
1163     @@ -385,8 +385,17 @@ found:
1164     }
1165    
1166     if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
1167     - fq->q.meat == fq->q.len)
1168     - return ip6_frag_reasm(fq, prev, dev);
1169     + fq->q.meat == fq->q.len) {
1170     + int res;
1171     + unsigned long orefdst = skb->_skb_refdst;
1172     +
1173     + skb->_skb_refdst = 0UL;
1174     + res = ip6_frag_reasm(fq, prev, dev);
1175     + skb->_skb_refdst = orefdst;
1176     + return res;
1177     + }
1178     +
1179     + skb_dst_drop(skb);
1180    
1181     write_lock(&ip6_frags.lock);
1182     list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
1183     diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1184     index d6c291c..bd25678 100644
1185     --- a/net/irda/af_irda.c
1186     +++ b/net/irda/af_irda.c
1187     @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
1188    
1189     IRDA_DEBUG(4, "%s()\n", __func__);
1190    
1191     + msg->msg_namelen = 0;
1192     +
1193     skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1194     flags & MSG_DONTWAIT, &err);
1195     if (!skb)
1196     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
1197     index cd6f7a9..625bc50 100644
1198     --- a/net/iucv/af_iucv.c
1199     +++ b/net/iucv/af_iucv.c
1200     @@ -1331,6 +1331,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1201     struct sk_buff *skb, *rskb, *cskb;
1202     int err = 0;
1203    
1204     + msg->msg_namelen = 0;
1205     +
1206     if ((sk->sk_state == IUCV_DISCONN) &&
1207     skb_queue_empty(&iucv->backlog_skb_q) &&
1208     skb_queue_empty(&sk->sk_receive_queue) &&
1209     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1210     index df08d77..e4d2fbb 100644
1211     --- a/net/llc/af_llc.c
1212     +++ b/net/llc/af_llc.c
1213     @@ -721,6 +721,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
1214     int target; /* Read at least this many bytes */
1215     long timeo;
1216    
1217     + msg->msg_namelen = 0;
1218     +
1219     lock_sock(sk);
1220     copied = -ENOTCONN;
1221     if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
1222     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
1223     index 1b9024e..7ed9b1d 100644
1224     --- a/net/netrom/af_netrom.c
1225     +++ b/net/netrom/af_netrom.c
1226     @@ -1177,6 +1177,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
1227     }
1228    
1229     if (sax != NULL) {
1230     + memset(sax, 0, sizeof(*sax));
1231     sax->sax25_family = AF_NETROM;
1232     skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
1233     AX25_ADDR_LEN);
1234     diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
1235     index c13e02e..0c2115f 100644
1236     --- a/net/nfc/llcp/sock.c
1237     +++ b/net/nfc/llcp/sock.c
1238     @@ -514,6 +514,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1239    
1240     pr_debug("%p %zu\n", sk, len);
1241    
1242     + msg->msg_namelen = 0;
1243     +
1244     lock_sock(sk);
1245    
1246     if (sk->sk_state == LLCP_CLOSED &&
1247     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1248     index c4719ce..7f645d1 100644
1249     --- a/net/rose/af_rose.c
1250     +++ b/net/rose/af_rose.c
1251     @@ -1257,6 +1257,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1252     skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1253    
1254     if (srose != NULL) {
1255     + memset(srose, 0, msg->msg_namelen);
1256     srose->srose_family = AF_ROSE;
1257     srose->srose_addr = rose->dest_addr;
1258     srose->srose_call = rose->dest_call;
1259     diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
1260     index 599f67a..b7cddb9 100644
1261     --- a/net/sched/sch_cbq.c
1262     +++ b/net/sched/sch_cbq.c
1263     @@ -963,8 +963,11 @@ cbq_dequeue(struct Qdisc *sch)
1264     cbq_update(q);
1265     if ((incr -= incr2) < 0)
1266     incr = 0;
1267     + q->now += incr;
1268     + } else {
1269     + if (now > q->now)
1270     + q->now = now;
1271     }
1272     - q->now += incr;
1273     q->now_rt = now;
1274    
1275     for (;;) {
1276     diff --git a/net/sctp/auth.c b/net/sctp/auth.c
1277     index bf81204..333926d 100644
1278     --- a/net/sctp/auth.c
1279     +++ b/net/sctp/auth.c
1280     @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
1281     return;
1282    
1283     if (atomic_dec_and_test(&key->refcnt)) {
1284     - kfree(key);
1285     + kzfree(key);
1286     SCTP_DBG_OBJCNT_DEC(keys);
1287     }
1288     }
1289     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1290     index 29e957f..1441ab7 100644
1291     --- a/net/tipc/socket.c
1292     +++ b/net/tipc/socket.c
1293     @@ -829,6 +829,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1294     if (addr) {
1295     addr->family = AF_TIPC;
1296     addr->addrtype = TIPC_ADDR_ID;
1297     + memset(&addr->addr, 0, sizeof(addr->addr));
1298     addr->addr.id.ref = msg_origport(msg);
1299     addr->addr.id.node = msg_orignode(msg);
1300     addr->addr.name.domain = 0; /* could leave uninitialized */
1301     @@ -948,6 +949,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
1302     goto exit;
1303     }
1304    
1305     + /* will be updated in set_orig_addr() if needed */
1306     + m->msg_namelen = 0;
1307     +
1308     timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1309     restart:
1310    
1311     @@ -1074,6 +1078,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1312     goto exit;
1313     }
1314    
1315     + /* will be updated in set_orig_addr() if needed */
1316     + m->msg_namelen = 0;
1317     +
1318     target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1319     timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1320     restart:
1321     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1322     index fa5289a..c4821fd 100644
1323     --- a/net/unix/af_unix.c
1324     +++ b/net/unix/af_unix.c
1325     @@ -1986,7 +1986,7 @@ again:
1326     if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1327     (UNIXCB(skb).cred != siocb->scm->cred))
1328     break;
1329     - } else {
1330     + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
1331     /* Copy credentials */
1332     scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1333     check_creds = 1;