Contents of /trunk/kernel-alx/patches-3.8/0110-3.8.11-all-fixes.patch
Parent Directory | Revision Log
Revision 2168 -
(show annotations)
(download)
Mon May 6 11:06:15 2013 UTC (11 years, 4 months ago) by niro
File size: 48036 byte(s)
Mon May 6 11:06:15 2013 UTC (11 years, 4 months ago) by niro
File size: 48036 byte(s)
-linux-3.8.11
1 | diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h |
2 | index 02fe2fb..ed94b1a 100644 |
3 | --- a/arch/arm/include/asm/hardware/iop3xx.h |
4 | +++ b/arch/arm/include/asm/hardware/iop3xx.h |
5 | @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); |
6 | * IOP3XX processor registers |
7 | */ |
8 | #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 |
9 | -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 |
10 | +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 |
11 | #define IOP3XX_PERIPHERAL_SIZE 0x00002000 |
12 | #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ |
13 | IOP3XX_PERIPHERAL_SIZE - 1) |
14 | diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c |
15 | index bd6f56b..59d2adb 100644 |
16 | --- a/arch/arm/kernel/sched_clock.c |
17 | +++ b/arch/arm/kernel/sched_clock.c |
18 | @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) |
19 | |
20 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; |
21 | |
22 | -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
23 | +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
24 | { |
25 | return (cyc * mult) >> shift; |
26 | } |
27 | |
28 | -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) |
29 | +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) |
30 | { |
31 | u64 epoch_ns; |
32 | u32 epoch_cyc; |
33 | diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h |
34 | index 08fcce9..7619f2f 100644 |
35 | --- a/arch/sparc/include/asm/pgtable_64.h |
36 | +++ b/arch/sparc/include/asm/pgtable_64.h |
37 | @@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
38 | return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); |
39 | } |
40 | |
41 | +#include <asm/tlbflush.h> |
42 | #include <asm-generic/pgtable.h> |
43 | |
44 | /* We provide our own get_unmapped_area to cope with VA holes and |
45 | diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h |
46 | index cad36f5..c7de332 100644 |
47 | --- a/arch/sparc/include/asm/switch_to_64.h |
48 | +++ b/arch/sparc/include/asm/switch_to_64.h |
49 | @@ -18,8 +18,7 @@ do { \ |
50 | * and 2 stores in this critical code path. -DaveM |
51 | */ |
52 | #define switch_to(prev, next, last) \ |
53 | -do { flush_tlb_pending(); \ |
54 | - save_and_clear_fpu(); \ |
55 | +do { save_and_clear_fpu(); \ |
56 | /* If you are tempted to conditionalize the following */ \ |
57 | /* so that ASI is only written if it changes, think again. */ \ |
58 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
59 | diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h |
60 | index 2ef4634..f0d6a97 100644 |
61 | --- a/arch/sparc/include/asm/tlbflush_64.h |
62 | +++ b/arch/sparc/include/asm/tlbflush_64.h |
63 | @@ -11,24 +11,40 @@ |
64 | struct tlb_batch { |
65 | struct mm_struct *mm; |
66 | unsigned long tlb_nr; |
67 | + unsigned long active; |
68 | unsigned long vaddrs[TLB_BATCH_NR]; |
69 | }; |
70 | |
71 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); |
72 | extern void flush_tsb_user(struct tlb_batch *tb); |
73 | +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); |
74 | |
75 | /* TLB flush operations. */ |
76 | |
77 | -extern void flush_tlb_pending(void); |
78 | +static inline void flush_tlb_mm(struct mm_struct *mm) |
79 | +{ |
80 | +} |
81 | + |
82 | +static inline void flush_tlb_page(struct vm_area_struct *vma, |
83 | + unsigned long vmaddr) |
84 | +{ |
85 | +} |
86 | + |
87 | +static inline void flush_tlb_range(struct vm_area_struct *vma, |
88 | + unsigned long start, unsigned long end) |
89 | +{ |
90 | +} |
91 | + |
92 | +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
93 | |
94 | -#define flush_tlb_range(vma,start,end) \ |
95 | - do { (void)(start); flush_tlb_pending(); } while (0) |
96 | -#define flush_tlb_page(vma,addr) flush_tlb_pending() |
97 | -#define flush_tlb_mm(mm) flush_tlb_pending() |
98 | +extern void flush_tlb_pending(void); |
99 | +extern void arch_enter_lazy_mmu_mode(void); |
100 | +extern void arch_leave_lazy_mmu_mode(void); |
101 | +#define arch_flush_lazy_mmu_mode() do {} while (0) |
102 | |
103 | /* Local cpu only. */ |
104 | extern void __flush_tlb_all(void); |
105 | - |
106 | +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); |
107 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); |
108 | |
109 | #ifndef CONFIG_SMP |
110 | @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ |
111 | __flush_tlb_kernel_range(start,end); \ |
112 | } while (0) |
113 | |
114 | +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) |
115 | +{ |
116 | + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); |
117 | +} |
118 | + |
119 | #else /* CONFIG_SMP */ |
120 | |
121 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); |
122 | +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); |
123 | |
124 | #define flush_tlb_kernel_range(start, end) \ |
125 | do { flush_tsb_kernel_range(start,end); \ |
126 | smp_flush_tlb_kernel_range(start, end); \ |
127 | } while (0) |
128 | |
129 | +#define global_flush_tlb_page(mm, vaddr) \ |
130 | + smp_flush_tlb_page(mm, vaddr) |
131 | + |
132 | #endif /* ! CONFIG_SMP */ |
133 | |
134 | #endif /* _SPARC64_TLBFLUSH_H */ |
135 | diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c |
136 | index 537eb66..ca64d2a 100644 |
137 | --- a/arch/sparc/kernel/smp_64.c |
138 | +++ b/arch/sparc/kernel/smp_64.c |
139 | @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) |
140 | } |
141 | |
142 | extern unsigned long xcall_flush_tlb_mm; |
143 | -extern unsigned long xcall_flush_tlb_pending; |
144 | +extern unsigned long xcall_flush_tlb_page; |
145 | extern unsigned long xcall_flush_tlb_kernel_range; |
146 | extern unsigned long xcall_fetch_glob_regs; |
147 | extern unsigned long xcall_fetch_glob_pmu; |
148 | @@ -1074,23 +1074,56 @@ local_flush_and_out: |
149 | put_cpu(); |
150 | } |
151 | |
152 | +struct tlb_pending_info { |
153 | + unsigned long ctx; |
154 | + unsigned long nr; |
155 | + unsigned long *vaddrs; |
156 | +}; |
157 | + |
158 | +static void tlb_pending_func(void *info) |
159 | +{ |
160 | + struct tlb_pending_info *t = info; |
161 | + |
162 | + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); |
163 | +} |
164 | + |
165 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
166 | { |
167 | u32 ctx = CTX_HWBITS(mm->context); |
168 | + struct tlb_pending_info info; |
169 | int cpu = get_cpu(); |
170 | |
171 | + info.ctx = ctx; |
172 | + info.nr = nr; |
173 | + info.vaddrs = vaddrs; |
174 | + |
175 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
176 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
177 | else |
178 | - smp_cross_call_masked(&xcall_flush_tlb_pending, |
179 | - ctx, nr, (unsigned long) vaddrs, |
180 | - mm_cpumask(mm)); |
181 | + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, |
182 | + &info, 1); |
183 | |
184 | __flush_tlb_pending(ctx, nr, vaddrs); |
185 | |
186 | put_cpu(); |
187 | } |
188 | |
189 | +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) |
190 | +{ |
191 | + unsigned long context = CTX_HWBITS(mm->context); |
192 | + int cpu = get_cpu(); |
193 | + |
194 | + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
195 | + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
196 | + else |
197 | + smp_cross_call_masked(&xcall_flush_tlb_page, |
198 | + context, vaddr, 0, |
199 | + mm_cpumask(mm)); |
200 | + __flush_tlb_page(context, vaddr); |
201 | + |
202 | + put_cpu(); |
203 | +} |
204 | + |
205 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
206 | { |
207 | start &= PAGE_MASK; |
208 | diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c |
209 | index ba6ae7f..83d89bc 100644 |
210 | --- a/arch/sparc/mm/tlb.c |
211 | +++ b/arch/sparc/mm/tlb.c |
212 | @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
213 | void flush_tlb_pending(void) |
214 | { |
215 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
216 | + struct mm_struct *mm = tb->mm; |
217 | |
218 | - if (tb->tlb_nr) { |
219 | - flush_tsb_user(tb); |
220 | + if (!tb->tlb_nr) |
221 | + goto out; |
222 | |
223 | - if (CTX_VALID(tb->mm->context)) { |
224 | + flush_tsb_user(tb); |
225 | + |
226 | + if (CTX_VALID(mm->context)) { |
227 | + if (tb->tlb_nr == 1) { |
228 | + global_flush_tlb_page(mm, tb->vaddrs[0]); |
229 | + } else { |
230 | #ifdef CONFIG_SMP |
231 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
232 | &tb->vaddrs[0]); |
233 | @@ -37,12 +43,30 @@ void flush_tlb_pending(void) |
234 | tb->tlb_nr, &tb->vaddrs[0]); |
235 | #endif |
236 | } |
237 | - tb->tlb_nr = 0; |
238 | } |
239 | |
240 | + tb->tlb_nr = 0; |
241 | + |
242 | +out: |
243 | put_cpu_var(tlb_batch); |
244 | } |
245 | |
246 | +void arch_enter_lazy_mmu_mode(void) |
247 | +{ |
248 | + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); |
249 | + |
250 | + tb->active = 1; |
251 | +} |
252 | + |
253 | +void arch_leave_lazy_mmu_mode(void) |
254 | +{ |
255 | + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); |
256 | + |
257 | + if (tb->tlb_nr) |
258 | + flush_tlb_pending(); |
259 | + tb->active = 0; |
260 | +} |
261 | + |
262 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
263 | bool exec) |
264 | { |
265 | @@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
266 | nr = 0; |
267 | } |
268 | |
269 | + if (!tb->active) { |
270 | + global_flush_tlb_page(mm, vaddr); |
271 | + flush_tsb_user_page(mm, vaddr); |
272 | + goto out; |
273 | + } |
274 | + |
275 | if (nr == 0) |
276 | tb->mm = mm; |
277 | |
278 | @@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
279 | if (nr >= TLB_BATCH_NR) |
280 | flush_tlb_pending(); |
281 | |
282 | +out: |
283 | put_cpu_var(tlb_batch); |
284 | } |
285 | |
286 | diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c |
287 | index 428982b..2cc3bce 100644 |
288 | --- a/arch/sparc/mm/tsb.c |
289 | +++ b/arch/sparc/mm/tsb.c |
290 | @@ -7,11 +7,10 @@ |
291 | #include <linux/preempt.h> |
292 | #include <linux/slab.h> |
293 | #include <asm/page.h> |
294 | -#include <asm/tlbflush.h> |
295 | -#include <asm/tlb.h> |
296 | -#include <asm/mmu_context.h> |
297 | #include <asm/pgtable.h> |
298 | +#include <asm/mmu_context.h> |
299 | #include <asm/tsb.h> |
300 | +#include <asm/tlb.h> |
301 | #include <asm/oplib.h> |
302 | |
303 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
304 | @@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
305 | } |
306 | } |
307 | |
308 | -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
309 | - unsigned long tsb, unsigned long nentries) |
310 | +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, |
311 | + unsigned long hash_shift, |
312 | + unsigned long nentries) |
313 | { |
314 | - unsigned long i; |
315 | + unsigned long tag, ent, hash; |
316 | |
317 | - for (i = 0; i < tb->tlb_nr; i++) { |
318 | - unsigned long v = tb->vaddrs[i]; |
319 | - unsigned long tag, ent, hash; |
320 | + v &= ~0x1UL; |
321 | + hash = tsb_hash(v, hash_shift, nentries); |
322 | + ent = tsb + (hash * sizeof(struct tsb)); |
323 | + tag = (v >> 22UL); |
324 | |
325 | - v &= ~0x1UL; |
326 | + tsb_flush(ent, tag); |
327 | +} |
328 | |
329 | - hash = tsb_hash(v, hash_shift, nentries); |
330 | - ent = tsb + (hash * sizeof(struct tsb)); |
331 | - tag = (v >> 22UL); |
332 | +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
333 | + unsigned long tsb, unsigned long nentries) |
334 | +{ |
335 | + unsigned long i; |
336 | |
337 | - tsb_flush(ent, tag); |
338 | - } |
339 | + for (i = 0; i < tb->tlb_nr; i++) |
340 | + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); |
341 | } |
342 | |
343 | void flush_tsb_user(struct tlb_batch *tb) |
344 | @@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) |
345 | spin_unlock_irqrestore(&mm->context.lock, flags); |
346 | } |
347 | |
348 | +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) |
349 | +{ |
350 | + unsigned long nentries, base, flags; |
351 | + |
352 | + spin_lock_irqsave(&mm->context.lock, flags); |
353 | + |
354 | + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
355 | + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; |
356 | + if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
357 | + base = __pa(base); |
358 | + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); |
359 | + |
360 | +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
361 | + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
362 | + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
363 | + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
364 | + if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
365 | + base = __pa(base); |
366 | + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); |
367 | + } |
368 | +#endif |
369 | + spin_unlock_irqrestore(&mm->context.lock, flags); |
370 | +} |
371 | + |
372 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
373 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
374 | |
375 | diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S |
376 | index f8e13d4..29b9608 100644 |
377 | --- a/arch/sparc/mm/ultra.S |
378 | +++ b/arch/sparc/mm/ultra.S |
379 | @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ |
380 | nop |
381 | |
382 | .align 32 |
383 | + .globl __flush_tlb_page |
384 | +__flush_tlb_page: /* 22 insns */ |
385 | + /* %o0 = context, %o1 = vaddr */ |
386 | + rdpr %pstate, %g7 |
387 | + andn %g7, PSTATE_IE, %g2 |
388 | + wrpr %g2, %pstate |
389 | + mov SECONDARY_CONTEXT, %o4 |
390 | + ldxa [%o4] ASI_DMMU, %g2 |
391 | + stxa %o0, [%o4] ASI_DMMU |
392 | + andcc %o1, 1, %g0 |
393 | + andn %o1, 1, %o3 |
394 | + be,pn %icc, 1f |
395 | + or %o3, 0x10, %o3 |
396 | + stxa %g0, [%o3] ASI_IMMU_DEMAP |
397 | +1: stxa %g0, [%o3] ASI_DMMU_DEMAP |
398 | + membar #Sync |
399 | + stxa %g2, [%o4] ASI_DMMU |
400 | + sethi %hi(KERNBASE), %o4 |
401 | + flush %o4 |
402 | + retl |
403 | + wrpr %g7, 0x0, %pstate |
404 | + nop |
405 | + nop |
406 | + nop |
407 | + nop |
408 | + |
409 | + .align 32 |
410 | .globl __flush_tlb_pending |
411 | __flush_tlb_pending: /* 26 insns */ |
412 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
413 | @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ |
414 | retl |
415 | wrpr %g7, 0x0, %pstate |
416 | |
417 | +__cheetah_flush_tlb_page: /* 22 insns */ |
418 | + /* %o0 = context, %o1 = vaddr */ |
419 | + rdpr %pstate, %g7 |
420 | + andn %g7, PSTATE_IE, %g2 |
421 | + wrpr %g2, 0x0, %pstate |
422 | + wrpr %g0, 1, %tl |
423 | + mov PRIMARY_CONTEXT, %o4 |
424 | + ldxa [%o4] ASI_DMMU, %g2 |
425 | + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 |
426 | + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 |
427 | + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ |
428 | + stxa %o0, [%o4] ASI_DMMU |
429 | + andcc %o1, 1, %g0 |
430 | + be,pn %icc, 1f |
431 | + andn %o1, 1, %o3 |
432 | + stxa %g0, [%o3] ASI_IMMU_DEMAP |
433 | +1: stxa %g0, [%o3] ASI_DMMU_DEMAP |
434 | + membar #Sync |
435 | + stxa %g2, [%o4] ASI_DMMU |
436 | + sethi %hi(KERNBASE), %o4 |
437 | + flush %o4 |
438 | + wrpr %g0, 0, %tl |
439 | + retl |
440 | + wrpr %g7, 0x0, %pstate |
441 | + |
442 | __cheetah_flush_tlb_pending: /* 27 insns */ |
443 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
444 | rdpr %pstate, %g7 |
445 | @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ |
446 | retl |
447 | nop |
448 | |
449 | +__hypervisor_flush_tlb_page: /* 11 insns */ |
450 | + /* %o0 = context, %o1 = vaddr */ |
451 | + mov %o0, %g2 |
452 | + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ |
453 | + mov %g2, %o1 /* ARG1: mmu context */ |
454 | + mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
455 | + srlx %o0, PAGE_SHIFT, %o0 |
456 | + sllx %o0, PAGE_SHIFT, %o0 |
457 | + ta HV_MMU_UNMAP_ADDR_TRAP |
458 | + brnz,pn %o0, __hypervisor_tlb_tl0_error |
459 | + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 |
460 | + retl |
461 | + nop |
462 | + |
463 | __hypervisor_flush_tlb_pending: /* 16 insns */ |
464 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
465 | sllx %o1, 3, %g1 |
466 | @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: |
467 | call tlb_patch_one |
468 | mov 19, %o2 |
469 | |
470 | + sethi %hi(__flush_tlb_page), %o0 |
471 | + or %o0, %lo(__flush_tlb_page), %o0 |
472 | + sethi %hi(__cheetah_flush_tlb_page), %o1 |
473 | + or %o1, %lo(__cheetah_flush_tlb_page), %o1 |
474 | + call tlb_patch_one |
475 | + mov 22, %o2 |
476 | + |
477 | sethi %hi(__flush_tlb_pending), %o0 |
478 | or %o0, %lo(__flush_tlb_pending), %o0 |
479 | sethi %hi(__cheetah_flush_tlb_pending), %o1 |
480 | @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ |
481 | nop |
482 | nop |
483 | |
484 | - .globl xcall_flush_tlb_pending |
485 | -xcall_flush_tlb_pending: /* 21 insns */ |
486 | - /* %g5=context, %g1=nr, %g7=vaddrs[] */ |
487 | - sllx %g1, 3, %g1 |
488 | + .globl xcall_flush_tlb_page |
489 | +xcall_flush_tlb_page: /* 17 insns */ |
490 | + /* %g5=context, %g1=vaddr */ |
491 | mov PRIMARY_CONTEXT, %g4 |
492 | ldxa [%g4] ASI_DMMU, %g2 |
493 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 |
494 | @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ |
495 | or %g5, %g4, %g5 |
496 | mov PRIMARY_CONTEXT, %g4 |
497 | stxa %g5, [%g4] ASI_DMMU |
498 | -1: sub %g1, (1 << 3), %g1 |
499 | - ldx [%g7 + %g1], %g5 |
500 | - andcc %g5, 0x1, %g0 |
501 | + andcc %g1, 0x1, %g0 |
502 | be,pn %icc, 2f |
503 | - |
504 | - andn %g5, 0x1, %g5 |
505 | + andn %g1, 0x1, %g5 |
506 | stxa %g0, [%g5] ASI_IMMU_DEMAP |
507 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP |
508 | membar #Sync |
509 | - brnz,pt %g1, 1b |
510 | - nop |
511 | stxa %g2, [%g4] ASI_DMMU |
512 | retry |
513 | nop |
514 | + nop |
515 | |
516 | .globl xcall_flush_tlb_kernel_range |
517 | xcall_flush_tlb_kernel_range: /* 25 insns */ |
518 | @@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ |
519 | membar #Sync |
520 | retry |
521 | |
522 | - .globl __hypervisor_xcall_flush_tlb_pending |
523 | -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ |
524 | - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ |
525 | - sllx %g1, 3, %g1 |
526 | + .globl __hypervisor_xcall_flush_tlb_page |
527 | +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ |
528 | + /* %g5=ctx, %g1=vaddr */ |
529 | mov %o0, %g2 |
530 | mov %o1, %g3 |
531 | mov %o2, %g4 |
532 | -1: sub %g1, (1 << 3), %g1 |
533 | - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ |
534 | + mov %g1, %o0 /* ARG0: virtual address */ |
535 | mov %g5, %o1 /* ARG1: mmu context */ |
536 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
537 | srlx %o0, PAGE_SHIFT, %o0 |
538 | @@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ |
539 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
540 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error |
541 | mov %o0, %g5 |
542 | - brnz,pt %g1, 1b |
543 | - nop |
544 | mov %g2, %o0 |
545 | mov %g3, %o1 |
546 | mov %g4, %o2 |
547 | @@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: |
548 | call tlb_patch_one |
549 | mov 10, %o2 |
550 | |
551 | + sethi %hi(__flush_tlb_page), %o0 |
552 | + or %o0, %lo(__flush_tlb_page), %o0 |
553 | + sethi %hi(__hypervisor_flush_tlb_page), %o1 |
554 | + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 |
555 | + call tlb_patch_one |
556 | + mov 11, %o2 |
557 | + |
558 | sethi %hi(__flush_tlb_pending), %o0 |
559 | or %o0, %lo(__flush_tlb_pending), %o0 |
560 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 |
561 | @@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: |
562 | call tlb_patch_one |
563 | mov 21, %o2 |
564 | |
565 | - sethi %hi(xcall_flush_tlb_pending), %o0 |
566 | - or %o0, %lo(xcall_flush_tlb_pending), %o0 |
567 | - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 |
568 | - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 |
569 | + sethi %hi(xcall_flush_tlb_page), %o0 |
570 | + or %o0, %lo(xcall_flush_tlb_page), %o0 |
571 | + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 |
572 | + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 |
573 | call tlb_patch_one |
574 | - mov 21, %o2 |
575 | + mov 17, %o2 |
576 | |
577 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 |
578 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 |
579 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
580 | index 27cdf1f..045dc53 100644 |
581 | --- a/drivers/net/bonding/bond_main.c |
582 | +++ b/drivers/net/bonding/bond_main.c |
583 | @@ -1888,6 +1888,7 @@ err_detach: |
584 | write_unlock_bh(&bond->lock); |
585 | |
586 | err_close: |
587 | + slave_dev->priv_flags &= ~IFF_BONDING; |
588 | dev_close(slave_dev); |
589 | |
590 | err_unset_master: |
591 | @@ -3379,20 +3380,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) |
592 | */ |
593 | static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) |
594 | { |
595 | - struct ethhdr *data = (struct ethhdr *)skb->data; |
596 | - struct iphdr *iph; |
597 | - struct ipv6hdr *ipv6h; |
598 | + const struct ethhdr *data; |
599 | + const struct iphdr *iph; |
600 | + const struct ipv6hdr *ipv6h; |
601 | u32 v6hash; |
602 | - __be32 *s, *d; |
603 | + const __be32 *s, *d; |
604 | |
605 | if (skb->protocol == htons(ETH_P_IP) && |
606 | - skb_network_header_len(skb) >= sizeof(*iph)) { |
607 | + pskb_network_may_pull(skb, sizeof(*iph))) { |
608 | iph = ip_hdr(skb); |
609 | + data = (struct ethhdr *)skb->data; |
610 | return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ |
611 | (data->h_dest[5] ^ data->h_source[5])) % count; |
612 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
613 | - skb_network_header_len(skb) >= sizeof(*ipv6h)) { |
614 | + pskb_network_may_pull(skb, sizeof(*ipv6h))) { |
615 | ipv6h = ipv6_hdr(skb); |
616 | + data = (struct ethhdr *)skb->data; |
617 | s = &ipv6h->saddr.s6_addr32[0]; |
618 | d = &ipv6h->daddr.s6_addr32[0]; |
619 | v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); |
620 | @@ -3411,33 +3414,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) |
621 | static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) |
622 | { |
623 | u32 layer4_xor = 0; |
624 | - struct iphdr *iph; |
625 | - struct ipv6hdr *ipv6h; |
626 | - __be32 *s, *d; |
627 | - __be16 *layer4hdr; |
628 | + const struct iphdr *iph; |
629 | + const struct ipv6hdr *ipv6h; |
630 | + const __be32 *s, *d; |
631 | + const __be16 *l4 = NULL; |
632 | + __be16 _l4[2]; |
633 | + int noff = skb_network_offset(skb); |
634 | + int poff; |
635 | |
636 | if (skb->protocol == htons(ETH_P_IP) && |
637 | - skb_network_header_len(skb) >= sizeof(*iph)) { |
638 | + pskb_may_pull(skb, noff + sizeof(*iph))) { |
639 | iph = ip_hdr(skb); |
640 | - if (!ip_is_fragment(iph) && |
641 | - (iph->protocol == IPPROTO_TCP || |
642 | - iph->protocol == IPPROTO_UDP) && |
643 | - (skb_headlen(skb) - skb_network_offset(skb) >= |
644 | - iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { |
645 | - layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); |
646 | - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); |
647 | + poff = proto_ports_offset(iph->protocol); |
648 | + |
649 | + if (!ip_is_fragment(iph) && poff >= 0) { |
650 | + l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, |
651 | + sizeof(_l4), &_l4); |
652 | + if (l4) |
653 | + layer4_xor = ntohs(l4[0] ^ l4[1]); |
654 | } |
655 | return (layer4_xor ^ |
656 | ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; |
657 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
658 | - skb_network_header_len(skb) >= sizeof(*ipv6h)) { |
659 | + pskb_may_pull(skb, noff + sizeof(*ipv6h))) { |
660 | ipv6h = ipv6_hdr(skb); |
661 | - if ((ipv6h->nexthdr == IPPROTO_TCP || |
662 | - ipv6h->nexthdr == IPPROTO_UDP) && |
663 | - (skb_headlen(skb) - skb_network_offset(skb) >= |
664 | - sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { |
665 | - layer4hdr = (__be16 *)(ipv6h + 1); |
666 | - layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); |
667 | + poff = proto_ports_offset(ipv6h->nexthdr); |
668 | + if (poff >= 0) { |
669 | + l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, |
670 | + sizeof(_l4), &_l4); |
671 | + if (l4) |
672 | + layer4_xor = ntohs(l4[0] ^ l4[1]); |
673 | } |
674 | s = &ipv6h->saddr.s6_addr32[0]; |
675 | d = &ipv6h->daddr.s6_addr32[0]; |
676 | @@ -4919,9 +4925,18 @@ static int __net_init bond_net_init(struct net *net) |
677 | static void __net_exit bond_net_exit(struct net *net) |
678 | { |
679 | struct bond_net *bn = net_generic(net, bond_net_id); |
680 | + struct bonding *bond, *tmp_bond; |
681 | + LIST_HEAD(list); |
682 | |
683 | bond_destroy_sysfs(bn); |
684 | bond_destroy_proc_dir(bn); |
685 | + |
686 | + /* Kill off any bonds created after unregistering bond rtnl ops */ |
687 | + rtnl_lock(); |
688 | + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) |
689 | + unregister_netdevice_queue(bond->dev, &list); |
690 | + unregister_netdevice_many(&list); |
691 | + rtnl_unlock(); |
692 | } |
693 | |
694 | static struct pernet_operations bond_net_ops = { |
695 | diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h |
696 | index edfdf6b..b5fd934 100644 |
697 | --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h |
698 | +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h |
699 | @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { |
700 | /* how about 0x2000 */ |
701 | #define MAX_TX_BUF_LEN 0x2000 |
702 | #define MAX_TX_BUF_SHIFT 13 |
703 | -/*#define MAX_TX_BUF_LEN 0x3000 */ |
704 | +#define MAX_TSO_SEG_SIZE 0x3c00 |
705 | |
706 | /* rrs word 1 bit 0:31 */ |
707 | #define RRS_RX_CSUM_MASK 0xFFFF |
708 | diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c |
709 | index 35faab7..ca33b28 100644 |
710 | --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c |
711 | +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c |
712 | @@ -2332,6 +2332,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
713 | |
714 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); |
715 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); |
716 | + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); |
717 | err = register_netdev(netdev); |
718 | if (err) { |
719 | netdev_err(netdev, "register netdevice failed\n"); |
720 | diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig |
721 | index edfba93..434e33c 100644 |
722 | --- a/drivers/net/ethernet/marvell/Kconfig |
723 | +++ b/drivers/net/ethernet/marvell/Kconfig |
724 | @@ -33,6 +33,7 @@ config MV643XX_ETH |
725 | |
726 | config MVMDIO |
727 | tristate "Marvell MDIO interface support" |
728 | + select PHYLIB |
729 | ---help--- |
730 | This driver supports the MDIO interface found in the network |
731 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, |
732 | @@ -45,7 +46,6 @@ config MVMDIO |
733 | config MVNETA |
734 | tristate "Marvell Armada 370/XP network interface support" |
735 | depends on MACH_ARMADA_370_XP |
736 | - select PHYLIB |
737 | select MVMDIO |
738 | ---help--- |
739 | This driver supports the network interface units in the |
740 | diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c |
741 | index b6025c3..84b312ea 100644 |
742 | --- a/drivers/net/ethernet/marvell/mvneta.c |
743 | +++ b/drivers/net/ethernet/marvell/mvneta.c |
744 | @@ -375,7 +375,6 @@ static int rxq_number = 8; |
745 | static int txq_number = 8; |
746 | |
747 | static int rxq_def; |
748 | -static int txq_def; |
749 | |
750 | #define MVNETA_DRIVER_NAME "mvneta" |
751 | #define MVNETA_DRIVER_VERSION "1.0" |
752 | @@ -1476,7 +1475,8 @@ error: |
753 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
754 | { |
755 | struct mvneta_port *pp = netdev_priv(dev); |
756 | - struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; |
757 | + u16 txq_id = skb_get_queue_mapping(skb); |
758 | + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; |
759 | struct mvneta_tx_desc *tx_desc; |
760 | struct netdev_queue *nq; |
761 | int frags = 0; |
762 | @@ -1486,7 +1486,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
763 | goto out; |
764 | |
765 | frags = skb_shinfo(skb)->nr_frags + 1; |
766 | - nq = netdev_get_tx_queue(dev, txq_def); |
767 | + nq = netdev_get_tx_queue(dev, txq_id); |
768 | |
769 | /* Get a descriptor for the first part of the packet */ |
770 | tx_desc = mvneta_txq_next_desc_get(txq); |
771 | @@ -2690,7 +2690,7 @@ static int mvneta_probe(struct platform_device *pdev) |
772 | return -EINVAL; |
773 | } |
774 | |
775 | - dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); |
776 | + dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); |
777 | if (!dev) |
778 | return -ENOMEM; |
779 | |
780 | @@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); |
781 | module_param(txq_number, int, S_IRUGO); |
782 | |
783 | module_param(rxq_def, int, S_IRUGO); |
784 | -module_param(txq_def, int, S_IRUGO); |
785 | diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c |
786 | index 16c8429..6bd9167 100644 |
787 | --- a/drivers/net/usb/cdc_mbim.c |
788 | +++ b/drivers/net/usb/cdc_mbim.c |
789 | @@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb |
790 | goto error; |
791 | |
792 | if (skb) { |
793 | - if (skb->len <= sizeof(ETH_HLEN)) |
794 | + if (skb->len <= ETH_HLEN) |
795 | goto error; |
796 | |
797 | /* mapping VLANs to MBIM sessions: |
798 | diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c |
799 | index da9fde8..892ecda 100644 |
800 | --- a/drivers/tty/tty_io.c |
801 | +++ b/drivers/tty/tty_io.c |
802 | @@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty) |
803 | |
804 | EXPORT_SYMBOL(start_tty); |
805 | |
806 | +static void tty_update_time(struct timespec *time) |
807 | +{ |
808 | + unsigned long sec = get_seconds(); |
809 | + sec -= sec % 60; |
810 | + if ((long)(sec - time->tv_sec) > 0) |
811 | + time->tv_sec = sec; |
812 | +} |
813 | + |
814 | /** |
815 | * tty_read - read method for tty device files |
816 | * @file: pointer to tty file |
817 | @@ -977,8 +985,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, |
818 | else |
819 | i = -EIO; |
820 | tty_ldisc_deref(ld); |
821 | + |
822 | if (i > 0) |
823 | - inode->i_atime = current_fs_time(inode->i_sb); |
824 | + tty_update_time(&inode->i_atime); |
825 | + |
826 | return i; |
827 | } |
828 | |
829 | @@ -1081,7 +1091,7 @@ static inline ssize_t do_tty_write( |
830 | } |
831 | if (written) { |
832 | struct inode *inode = file->f_path.dentry->d_inode; |
833 | - inode->i_mtime = current_fs_time(inode->i_sb); |
834 | + tty_update_time(&inode->i_mtime); |
835 | ret = written; |
836 | } |
837 | out: |
838 | diff --git a/fs/aio.c b/fs/aio.c |
839 | index 71f613c..ed762ae 100644 |
840 | --- a/fs/aio.c |
841 | +++ b/fs/aio.c |
842 | @@ -1027,9 +1027,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) |
843 | spin_unlock(&info->ring_lock); |
844 | |
845 | out: |
846 | - kunmap_atomic(ring); |
847 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, |
848 | (unsigned long)ring->head, (unsigned long)ring->tail); |
849 | + kunmap_atomic(ring); |
850 | return ret; |
851 | } |
852 | |
853 | diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
854 | index 9ef07d0..0e182f9 100644 |
855 | --- a/include/linux/netdevice.h |
856 | +++ b/include/linux/netdevice.h |
857 | @@ -208,9 +208,9 @@ struct netdev_hw_addr { |
858 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
859 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
860 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
861 | - bool synced; |
862 | bool global_use; |
863 | int refcount; |
864 | + int synced; |
865 | struct rcu_head rcu_head; |
866 | }; |
867 | |
868 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
869 | index 98399e2..9fe54b6 100644 |
870 | --- a/include/linux/skbuff.h |
871 | +++ b/include/linux/skbuff.h |
872 | @@ -2597,6 +2597,13 @@ static inline void nf_reset(struct sk_buff *skb) |
873 | #endif |
874 | } |
875 | |
876 | +static inline void nf_reset_trace(struct sk_buff *skb) |
877 | +{ |
878 | +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
879 | + skb->nf_trace = 0; |
880 | +#endif |
881 | +} |
882 | + |
883 | /* Note: This doesn't put any conntrack and bridge info in dst. */ |
884 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) |
885 | { |
886 | diff --git a/include/net/scm.h b/include/net/scm.h |
887 | index 975cca0..b117081 100644 |
888 | --- a/include/net/scm.h |
889 | +++ b/include/net/scm.h |
890 | @@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm, |
891 | scm->pid = get_pid(pid); |
892 | scm->cred = cred ? get_cred(cred) : NULL; |
893 | scm->creds.pid = pid_vnr(pid); |
894 | - scm->creds.uid = cred ? cred->euid : INVALID_UID; |
895 | - scm->creds.gid = cred ? cred->egid : INVALID_GID; |
896 | + scm->creds.uid = cred ? cred->uid : INVALID_UID; |
897 | + scm->creds.gid = cred ? cred->gid : INVALID_GID; |
898 | } |
899 | |
900 | static __inline__ void scm_destroy_cred(struct scm_cookie *scm) |
901 | diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c |
902 | index 4762316..5fc7aa5 100644 |
903 | --- a/kernel/trace/trace_selftest.c |
904 | +++ b/kernel/trace/trace_selftest.c |
905 | @@ -452,7 +452,6 @@ trace_selftest_function_recursion(void) |
906 | char *func_name; |
907 | int len; |
908 | int ret; |
909 | - int cnt; |
910 | |
911 | /* The previous test PASSED */ |
912 | pr_cont("PASSED\n"); |
913 | @@ -510,19 +509,10 @@ trace_selftest_function_recursion(void) |
914 | |
915 | unregister_ftrace_function(&test_recsafe_probe); |
916 | |
917 | - /* |
918 | - * If arch supports all ftrace features, and no other task |
919 | - * was on the list, we should be fine. |
920 | - */ |
921 | - if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) |
922 | - cnt = 2; /* Should have recursed */ |
923 | - else |
924 | - cnt = 1; |
925 | - |
926 | ret = -1; |
927 | - if (trace_selftest_recursion_cnt != cnt) { |
928 | - pr_cont("*callback not called expected %d times (%d)* ", |
929 | - cnt, trace_selftest_recursion_cnt); |
930 | + if (trace_selftest_recursion_cnt != 2) { |
931 | + pr_cont("*callback not called expected 2 times (%d)* ", |
932 | + trace_selftest_recursion_cnt); |
933 | goto out; |
934 | } |
935 | |
936 | diff --git a/net/atm/common.c b/net/atm/common.c |
937 | index 806fc0a..cf4b7e6 100644 |
938 | --- a/net/atm/common.c |
939 | +++ b/net/atm/common.c |
940 | @@ -532,6 +532,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
941 | struct sk_buff *skb; |
942 | int copied, error = -EINVAL; |
943 | |
944 | + msg->msg_namelen = 0; |
945 | + |
946 | if (sock->state != SS_CONNECTED) |
947 | return -ENOTCONN; |
948 | |
949 | diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c |
950 | index 779095d..d53a123 100644 |
951 | --- a/net/ax25/af_ax25.c |
952 | +++ b/net/ax25/af_ax25.c |
953 | @@ -1647,6 +1647,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, |
954 | ax25_address src; |
955 | const unsigned char *mac = skb_mac_header(skb); |
956 | |
957 | + memset(sax, 0, sizeof(struct full_sockaddr_ax25)); |
958 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, |
959 | &digi, NULL, NULL); |
960 | sax->sax25_family = AF_AX25; |
961 | diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c |
962 | index 5355df6..b04795e 100644 |
963 | --- a/net/bluetooth/af_bluetooth.c |
964 | +++ b/net/bluetooth/af_bluetooth.c |
965 | @@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
966 | if (flags & (MSG_OOB)) |
967 | return -EOPNOTSUPP; |
968 | |
969 | + msg->msg_namelen = 0; |
970 | + |
971 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
972 | if (!skb) { |
973 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
974 | @@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
975 | return err; |
976 | } |
977 | |
978 | - msg->msg_namelen = 0; |
979 | - |
980 | copied = skb->len; |
981 | if (len < copied) { |
982 | msg->msg_flags |= MSG_TRUNC; |
983 | diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c |
984 | index ce3f665..970fc13 100644 |
985 | --- a/net/bluetooth/rfcomm/sock.c |
986 | +++ b/net/bluetooth/rfcomm/sock.c |
987 | @@ -610,6 +610,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
988 | |
989 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { |
990 | rfcomm_dlc_accept(d); |
991 | + msg->msg_namelen = 0; |
992 | return 0; |
993 | } |
994 | |
995 | diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c |
996 | index aaf1957..cc16d1b 100644 |
997 | --- a/net/bluetooth/sco.c |
998 | +++ b/net/bluetooth/sco.c |
999 | @@ -667,6 +667,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
1000 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { |
1001 | hci_conn_accept(pi->conn->hcon, 0); |
1002 | sk->sk_state = BT_CONFIG; |
1003 | + msg->msg_namelen = 0; |
1004 | |
1005 | release_sock(sk); |
1006 | return 0; |
1007 | diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c |
1008 | index 095259f..ff2ff3c 100644 |
1009 | --- a/net/caif/caif_socket.c |
1010 | +++ b/net/caif/caif_socket.c |
1011 | @@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, |
1012 | if (m->msg_flags&MSG_OOB) |
1013 | goto read_error; |
1014 | |
1015 | + m->msg_namelen = 0; |
1016 | + |
1017 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
1018 | if (!skb) |
1019 | goto read_error; |
1020 | diff --git a/net/core/dev.c b/net/core/dev.c |
1021 | index 5d9c43d..d592214 100644 |
1022 | --- a/net/core/dev.c |
1023 | +++ b/net/core/dev.c |
1024 | @@ -1737,6 +1737,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1025 | skb->mark = 0; |
1026 | secpath_reset(skb); |
1027 | nf_reset(skb); |
1028 | + nf_reset_trace(skb); |
1029 | return netif_rx(skb); |
1030 | } |
1031 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
1032 | @@ -2017,6 +2018,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) |
1033 | struct net_device *dev = skb->dev; |
1034 | const char *driver = ""; |
1035 | |
1036 | + if (!net_ratelimit()) |
1037 | + return; |
1038 | + |
1039 | if (dev && dev->dev.parent) |
1040 | driver = dev_driver_string(dev->dev.parent); |
1041 | |
1042 | diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c |
1043 | index b079c7b..7841d87 100644 |
1044 | --- a/net/core/dev_addr_lists.c |
1045 | +++ b/net/core/dev_addr_lists.c |
1046 | @@ -38,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, |
1047 | ha->type = addr_type; |
1048 | ha->refcount = 1; |
1049 | ha->global_use = global; |
1050 | - ha->synced = false; |
1051 | + ha->synced = 0; |
1052 | list_add_tail_rcu(&ha->list, &list->list); |
1053 | list->count++; |
1054 | |
1055 | @@ -166,7 +166,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
1056 | addr_len, ha->type); |
1057 | if (err) |
1058 | break; |
1059 | - ha->synced = true; |
1060 | + ha->synced++; |
1061 | ha->refcount++; |
1062 | } else if (ha->refcount == 1) { |
1063 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); |
1064 | @@ -187,7 +187,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, |
1065 | if (ha->synced) { |
1066 | __hw_addr_del(to_list, ha->addr, |
1067 | addr_len, ha->type); |
1068 | - ha->synced = false; |
1069 | + ha->synced--; |
1070 | __hw_addr_del(from_list, ha->addr, |
1071 | addr_len, ha->type); |
1072 | } |
1073 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1074 | index 6212ec9..055fb13 100644 |
1075 | --- a/net/core/rtnetlink.c |
1076 | +++ b/net/core/rtnetlink.c |
1077 | @@ -1068,7 +1068,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
1078 | rcu_read_lock(); |
1079 | cb->seq = net->dev_base_seq; |
1080 | |
1081 | - if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, |
1082 | + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1083 | ifla_policy) >= 0) { |
1084 | |
1085 | if (tb[IFLA_EXT_MASK]) |
1086 | @@ -1924,7 +1924,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) |
1087 | u32 ext_filter_mask = 0; |
1088 | u16 min_ifinfo_dump_size = 0; |
1089 | |
1090 | - if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, |
1091 | + if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1092 | ifla_policy) >= 0) { |
1093 | if (tb[IFLA_EXT_MASK]) |
1094 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); |
1095 | diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c |
1096 | index 3b4f0cd..4cfe34d 100644 |
1097 | --- a/net/ipv4/esp4.c |
1098 | +++ b/net/ipv4/esp4.c |
1099 | @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
1100 | |
1101 | /* skb is pure payload to encrypt */ |
1102 | |
1103 | - err = -ENOMEM; |
1104 | - |
1105 | esp = x->data; |
1106 | aead = esp->aead; |
1107 | alen = crypto_aead_authsize(aead); |
1108 | @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
1109 | } |
1110 | |
1111 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
1112 | - if (!tmp) |
1113 | + if (!tmp) { |
1114 | + err = -ENOMEM; |
1115 | goto error; |
1116 | + } |
1117 | |
1118 | seqhi = esp_tmp_seqhi(tmp); |
1119 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
1120 | diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c |
1121 | index a8fc332..0fcfee3 100644 |
1122 | --- a/net/ipv4/ip_fragment.c |
1123 | +++ b/net/ipv4/ip_fragment.c |
1124 | @@ -255,8 +255,7 @@ static void ip_expire(unsigned long arg) |
1125 | if (!head->dev) |
1126 | goto out_rcu_unlock; |
1127 | |
1128 | - /* skb dst is stale, drop it, and perform route lookup again */ |
1129 | - skb_dst_drop(head); |
1130 | + /* skb has no dst, perform route lookup again */ |
1131 | iph = ip_hdr(head); |
1132 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
1133 | iph->tos, head->dev); |
1134 | @@ -525,8 +524,16 @@ found: |
1135 | qp->q.max_size = skb->len + ihl; |
1136 | |
1137 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
1138 | - qp->q.meat == qp->q.len) |
1139 | - return ip_frag_reasm(qp, prev, dev); |
1140 | + qp->q.meat == qp->q.len) { |
1141 | + unsigned long orefdst = skb->_skb_refdst; |
1142 | + |
1143 | + skb->_skb_refdst = 0UL; |
1144 | + err = ip_frag_reasm(qp, prev, dev); |
1145 | + skb->_skb_refdst = orefdst; |
1146 | + return err; |
1147 | + } |
1148 | + |
1149 | + skb_dst_drop(skb); |
1150 | |
1151 | write_lock(&ip4_frags.lock); |
1152 | list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); |
1153 | diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c |
1154 | index b236ef0..f962f19 100644 |
1155 | --- a/net/ipv4/syncookies.c |
1156 | +++ b/net/ipv4/syncookies.c |
1157 | @@ -348,8 +348,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
1158 | * hasn't changed since we received the original syn, but I see |
1159 | * no easy way to do this. |
1160 | */ |
1161 | - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), |
1162 | - RT_SCOPE_UNIVERSE, IPPROTO_TCP, |
1163 | + flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, |
1164 | + RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, |
1165 | inet_sk_flowi_flags(sk), |
1166 | (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, |
1167 | ireq->loc_addr, th->source, th->dest); |
1168 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1169 | index 9841a71..b4e8b79 100644 |
1170 | --- a/net/ipv4/tcp_input.c |
1171 | +++ b/net/ipv4/tcp_input.c |
1172 | @@ -116,6 +116,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2; |
1173 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
1174 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
1175 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
1176 | +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ |
1177 | |
1178 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
1179 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) |
1180 | @@ -3572,6 +3573,27 @@ static void tcp_send_challenge_ack(struct sock *sk) |
1181 | } |
1182 | } |
1183 | |
1184 | +static void tcp_store_ts_recent(struct tcp_sock *tp) |
1185 | +{ |
1186 | + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; |
1187 | + tp->rx_opt.ts_recent_stamp = get_seconds(); |
1188 | +} |
1189 | + |
1190 | +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) |
1191 | +{ |
1192 | + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { |
1193 | + /* PAWS bug workaround wrt. ACK frames, the PAWS discard |
1194 | + * extra check below makes sure this can only happen |
1195 | + * for pure ACK frames. -DaveM |
1196 | + * |
1197 | + * Not only, also it occurs for expired timestamps. |
1198 | + */ |
1199 | + |
1200 | + if (tcp_paws_check(&tp->rx_opt, 0)) |
1201 | + tcp_store_ts_recent(tp); |
1202 | + } |
1203 | +} |
1204 | + |
1205 | /* This routine deals with incoming acks, but not outgoing ones. */ |
1206 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
1207 | { |
1208 | @@ -3624,6 +3646,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
1209 | prior_fackets = tp->fackets_out; |
1210 | prior_in_flight = tcp_packets_in_flight(tp); |
1211 | |
1212 | + /* ts_recent update must be made after we are sure that the packet |
1213 | + * is in window. |
1214 | + */ |
1215 | + if (flag & FLAG_UPDATE_TS_RECENT) |
1216 | + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); |
1217 | + |
1218 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { |
1219 | /* Window is constant, pure forward advance. |
1220 | * No more checks are required. |
1221 | @@ -3940,27 +3968,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) |
1222 | EXPORT_SYMBOL(tcp_parse_md5sig_option); |
1223 | #endif |
1224 | |
1225 | -static inline void tcp_store_ts_recent(struct tcp_sock *tp) |
1226 | -{ |
1227 | - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; |
1228 | - tp->rx_opt.ts_recent_stamp = get_seconds(); |
1229 | -} |
1230 | - |
1231 | -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) |
1232 | -{ |
1233 | - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { |
1234 | - /* PAWS bug workaround wrt. ACK frames, the PAWS discard |
1235 | - * extra check below makes sure this can only happen |
1236 | - * for pure ACK frames. -DaveM |
1237 | - * |
1238 | - * Not only, also it occurs for expired timestamps. |
1239 | - */ |
1240 | - |
1241 | - if (tcp_paws_check(&tp->rx_opt, 0)) |
1242 | - tcp_store_ts_recent(tp); |
1243 | - } |
1244 | -} |
1245 | - |
1246 | /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM |
1247 | * |
1248 | * It is not fatal. If this ACK does _not_ change critical state (seqs, window) |
1249 | @@ -5556,14 +5563,9 @@ slow_path: |
1250 | return 0; |
1251 | |
1252 | step5: |
1253 | - if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) |
1254 | + if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) |
1255 | goto discard; |
1256 | |
1257 | - /* ts_recent update must be made after we are sure that the packet |
1258 | - * is in window. |
1259 | - */ |
1260 | - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); |
1261 | - |
1262 | tcp_rcv_rtt_measure_ts(sk, skb); |
1263 | |
1264 | /* Process urgent data. */ |
1265 | @@ -5997,7 +5999,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
1266 | |
1267 | /* step 5: check the ACK field */ |
1268 | if (true) { |
1269 | - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; |
1270 | + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | |
1271 | + FLAG_UPDATE_TS_RECENT) > 0; |
1272 | |
1273 | switch (sk->sk_state) { |
1274 | case TCP_SYN_RECV: |
1275 | @@ -6148,11 +6151,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
1276 | } |
1277 | } |
1278 | |
1279 | - /* ts_recent update must be made after we are sure that the packet |
1280 | - * is in window. |
1281 | - */ |
1282 | - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); |
1283 | - |
1284 | /* step 6: check the URG bit */ |
1285 | tcp_urg(sk, skb, th); |
1286 | |
1287 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1288 | index 17d659e..a9f50ee 100644 |
1289 | --- a/net/ipv4/tcp_output.c |
1290 | +++ b/net/ipv4/tcp_output.c |
1291 | @@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
1292 | */ |
1293 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
1294 | |
1295 | - /* make sure skb->data is aligned on arches that require it */ |
1296 | - if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { |
1297 | + /* make sure skb->data is aligned on arches that require it |
1298 | + * and check if ack-trimming & collapsing extended the headroom |
1299 | + * beyond what csum_start can cover. |
1300 | + */ |
1301 | + if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || |
1302 | + skb_headroom(skb) >= 0xFFFF)) { |
1303 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
1304 | GFP_ATOMIC); |
1305 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
1306 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
1307 | index a36d17e..e8676c2 100644 |
1308 | --- a/net/ipv6/addrconf.c |
1309 | +++ b/net/ipv6/addrconf.c |
1310 | @@ -2525,6 +2525,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) |
1311 | static void init_loopback(struct net_device *dev) |
1312 | { |
1313 | struct inet6_dev *idev; |
1314 | + struct net_device *sp_dev; |
1315 | + struct inet6_ifaddr *sp_ifa; |
1316 | + struct rt6_info *sp_rt; |
1317 | |
1318 | /* ::1 */ |
1319 | |
1320 | @@ -2536,6 +2539,30 @@ static void init_loopback(struct net_device *dev) |
1321 | } |
1322 | |
1323 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); |
1324 | + |
1325 | + /* Add routes to other interface's IPv6 addresses */ |
1326 | + for_each_netdev(dev_net(dev), sp_dev) { |
1327 | + if (!strcmp(sp_dev->name, dev->name)) |
1328 | + continue; |
1329 | + |
1330 | + idev = __in6_dev_get(sp_dev); |
1331 | + if (!idev) |
1332 | + continue; |
1333 | + |
1334 | + read_lock_bh(&idev->lock); |
1335 | + list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { |
1336 | + |
1337 | + if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) |
1338 | + continue; |
1339 | + |
1340 | + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); |
1341 | + |
1342 | + /* Failure cases are ignored */ |
1343 | + if (!IS_ERR(sp_rt)) |
1344 | + ip6_ins_rt(sp_rt); |
1345 | + } |
1346 | + read_unlock_bh(&idev->lock); |
1347 | + } |
1348 | } |
1349 | |
1350 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) |
1351 | diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c |
1352 | index d9ba8a2..7a610a6 100644 |
1353 | --- a/net/ipv6/reassembly.c |
1354 | +++ b/net/ipv6/reassembly.c |
1355 | @@ -342,8 +342,17 @@ found: |
1356 | } |
1357 | |
1358 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
1359 | - fq->q.meat == fq->q.len) |
1360 | - return ip6_frag_reasm(fq, prev, dev); |
1361 | + fq->q.meat == fq->q.len) { |
1362 | + int res; |
1363 | + unsigned long orefdst = skb->_skb_refdst; |
1364 | + |
1365 | + skb->_skb_refdst = 0UL; |
1366 | + res = ip6_frag_reasm(fq, prev, dev); |
1367 | + skb->_skb_refdst = orefdst; |
1368 | + return res; |
1369 | + } |
1370 | + |
1371 | + skb_dst_drop(skb); |
1372 | |
1373 | write_lock(&ip6_frags.lock); |
1374 | list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); |
1375 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1376 | index 8d19346..89dfedd 100644 |
1377 | --- a/net/ipv6/tcp_ipv6.c |
1378 | +++ b/net/ipv6/tcp_ipv6.c |
1379 | @@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
1380 | |
1381 | if (dst) |
1382 | dst->ops->redirect(dst, sk, skb); |
1383 | + goto out; |
1384 | } |
1385 | |
1386 | if (type == ICMPV6_PKT_TOOBIG) { |
1387 | diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c |
1388 | index 4d04105..3c9bd59 100644 |
1389 | --- a/net/irda/af_irda.c |
1390 | +++ b/net/irda/af_irda.c |
1391 | @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, |
1392 | |
1393 | IRDA_DEBUG(4, "%s()\n", __func__); |
1394 | |
1395 | + msg->msg_namelen = 0; |
1396 | + |
1397 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1398 | flags & MSG_DONTWAIT, &err); |
1399 | if (!skb) |
1400 | diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c |
1401 | index cd6f7a9..625bc50 100644 |
1402 | --- a/net/iucv/af_iucv.c |
1403 | +++ b/net/iucv/af_iucv.c |
1404 | @@ -1331,6 +1331,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
1405 | struct sk_buff *skb, *rskb, *cskb; |
1406 | int err = 0; |
1407 | |
1408 | + msg->msg_namelen = 0; |
1409 | + |
1410 | if ((sk->sk_state == IUCV_DISCONN) && |
1411 | skb_queue_empty(&iucv->backlog_skb_q) && |
1412 | skb_queue_empty(&sk->sk_receive_queue) && |
1413 | diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c |
1414 | index 8ee4a86..9e1822e 100644 |
1415 | --- a/net/l2tp/l2tp_ip6.c |
1416 | +++ b/net/l2tp/l2tp_ip6.c |
1417 | @@ -684,6 +684,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, |
1418 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; |
1419 | lsa->l2tp_flowinfo = 0; |
1420 | lsa->l2tp_scope_id = 0; |
1421 | + lsa->l2tp_conn_id = 0; |
1422 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) |
1423 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
1424 | } |
1425 | diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
1426 | index 8870988..48aaa89 100644 |
1427 | --- a/net/llc/af_llc.c |
1428 | +++ b/net/llc/af_llc.c |
1429 | @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, |
1430 | int target; /* Read at least this many bytes */ |
1431 | long timeo; |
1432 | |
1433 | + msg->msg_namelen = 0; |
1434 | + |
1435 | lock_sock(sk); |
1436 | copied = -ENOTCONN; |
1437 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) |
1438 | diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c |
1439 | index 7261eb8..14c106b 100644 |
1440 | --- a/net/netrom/af_netrom.c |
1441 | +++ b/net/netrom/af_netrom.c |
1442 | @@ -1177,6 +1177,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, |
1443 | } |
1444 | |
1445 | if (sax != NULL) { |
1446 | + memset(sax, 0, sizeof(sax)); |
1447 | sax->sax25_family = AF_NETROM; |
1448 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, |
1449 | AX25_ADDR_LEN); |
1450 | diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c |
1451 | index fea22eb..48fb1de 100644 |
1452 | --- a/net/nfc/llcp/sock.c |
1453 | +++ b/net/nfc/llcp/sock.c |
1454 | @@ -644,6 +644,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
1455 | |
1456 | pr_debug("%p %zu\n", sk, len); |
1457 | |
1458 | + msg->msg_namelen = 0; |
1459 | + |
1460 | lock_sock(sk); |
1461 | |
1462 | if (sk->sk_state == LLCP_CLOSED && |
1463 | @@ -684,6 +686,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
1464 | |
1465 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); |
1466 | |
1467 | + memset(&sockaddr, 0, sizeof(sockaddr)); |
1468 | sockaddr.sa_family = AF_NFC; |
1469 | sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP; |
1470 | sockaddr.dsap = ui_cb->dsap; |
1471 | diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c |
1472 | index c4719ce..7f645d1 100644 |
1473 | --- a/net/rose/af_rose.c |
1474 | +++ b/net/rose/af_rose.c |
1475 | @@ -1257,6 +1257,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, |
1476 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1477 | |
1478 | if (srose != NULL) { |
1479 | + memset(srose, 0, msg->msg_namelen); |
1480 | srose->srose_family = AF_ROSE; |
1481 | srose->srose_addr = rose->dest_addr; |
1482 | srose->srose_call = rose->dest_call; |
1483 | diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c |
1484 | index 0e19948..ced81a1 100644 |
1485 | --- a/net/sched/sch_cbq.c |
1486 | +++ b/net/sched/sch_cbq.c |
1487 | @@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) |
1488 | cbq_update(q); |
1489 | if ((incr -= incr2) < 0) |
1490 | incr = 0; |
1491 | + q->now += incr; |
1492 | + } else { |
1493 | + if (now > q->now) |
1494 | + q->now = now; |
1495 | } |
1496 | - q->now += incr; |
1497 | q->now_rt = now; |
1498 | |
1499 | for (;;) { |
1500 | diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
1501 | index 9b4e483..fc906d9 100644 |
1502 | --- a/net/tipc/socket.c |
1503 | +++ b/net/tipc/socket.c |
1504 | @@ -806,6 +806,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) |
1505 | if (addr) { |
1506 | addr->family = AF_TIPC; |
1507 | addr->addrtype = TIPC_ADDR_ID; |
1508 | + memset(&addr->addr, 0, sizeof(addr->addr)); |
1509 | addr->addr.id.ref = msg_origport(msg); |
1510 | addr->addr.id.node = msg_orignode(msg); |
1511 | addr->addr.name.domain = 0; /* could leave uninitialized */ |
1512 | @@ -920,6 +921,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, |
1513 | goto exit; |
1514 | } |
1515 | |
1516 | + /* will be updated in set_orig_addr() if needed */ |
1517 | + m->msg_namelen = 0; |
1518 | + |
1519 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1520 | restart: |
1521 | |
1522 | @@ -1029,6 +1033,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, |
1523 | goto exit; |
1524 | } |
1525 | |
1526 | + /* will be updated in set_orig_addr() if needed */ |
1527 | + m->msg_namelen = 0; |
1528 | + |
1529 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
1530 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1531 | |
1532 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
1533 | index b45eb65..f347754 100644 |
1534 | --- a/net/unix/af_unix.c |
1535 | +++ b/net/unix/af_unix.c |
1536 | @@ -1995,7 +1995,7 @@ again: |
1537 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1538 | (UNIXCB(skb).cred != siocb->scm->cred)) |
1539 | break; |
1540 | - } else { |
1541 | + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
1542 | /* Copy credentials */ |
1543 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
1544 | check_creds = 1; |