Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0173-4.9.74-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3062 - (show annotations) (download)
Wed Jan 10 10:33:40 2018 UTC (6 years, 3 months ago) by niro
File size: 98884 byte(s)
-linux-4.9.74
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 86a6746f6833..152ec4e87b57 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -2795,6 +2795,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 nopat [X86] Disable PAT (page attribute table extension of
7 pagetables) support.
8
9 + nopcid [X86-64] Disable the PCID cpu feature.
10 +
11 norandmaps Don't use address space randomization. Equivalent to
12 echo 0 > /proc/sys/kernel/randomize_va_space
13
14 diff --git a/Makefile b/Makefile
15 index 64eb0bf614ee..075e429732e7 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 4
20 PATCHLEVEL = 9
21 -SUBLEVEL = 73
22 +SUBLEVEL = 74
23 EXTRAVERSION =
24 NAME = Roaring Lionus
25
26 @@ -788,6 +788,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
27 # disable invalid "can't wrap" optimizations for signed / pointers
28 KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
29
30 +# Make sure -fstack-check isn't enabled (like gentoo apparently did)
31 +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
32 +
33 # conserve stack if available
34 KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
35
36 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
37 index b9c546a305a4..da8156fd3d58 100644
38 --- a/arch/x86/Kconfig
39 +++ b/arch/x86/Kconfig
40 @@ -45,7 +45,7 @@ config X86
41 select ARCH_USE_CMPXCHG_LOCKREF if X86_64
42 select ARCH_USE_QUEUED_RWLOCKS
43 select ARCH_USE_QUEUED_SPINLOCKS
44 - select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
45 + select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
46 select ARCH_WANTS_DYNAMIC_TASK_STRUCT
47 select ARCH_WANT_FRAME_POINTERS
48 select ARCH_WANT_IPC_PARSE_VERSION if X86_32
49 diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
50 index 85599ad4d024..21c5ac15657b 100644
51 --- a/arch/x86/include/asm/disabled-features.h
52 +++ b/arch/x86/include/asm/disabled-features.h
53 @@ -21,11 +21,13 @@
54 # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
55 # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
56 # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
57 +# define DISABLE_PCID 0
58 #else
59 # define DISABLE_VME 0
60 # define DISABLE_K6_MTRR 0
61 # define DISABLE_CYRIX_ARR 0
62 # define DISABLE_CENTAUR_MCR 0
63 +# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
64 #endif /* CONFIG_X86_64 */
65
66 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
67 @@ -43,7 +45,7 @@
68 #define DISABLED_MASK1 0
69 #define DISABLED_MASK2 0
70 #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
71 -#define DISABLED_MASK4 0
72 +#define DISABLED_MASK4 (DISABLE_PCID)
73 #define DISABLED_MASK5 0
74 #define DISABLED_MASK6 0
75 #define DISABLED_MASK7 0
76 diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
77 index 59405a248fc2..9b76cd331990 100644
78 --- a/arch/x86/include/asm/hardirq.h
79 +++ b/arch/x86/include/asm/hardirq.h
80 @@ -22,8 +22,8 @@ typedef struct {
81 #ifdef CONFIG_SMP
82 unsigned int irq_resched_count;
83 unsigned int irq_call_count;
84 - unsigned int irq_tlb_count;
85 #endif
86 + unsigned int irq_tlb_count;
87 #ifdef CONFIG_X86_THERMAL_VECTOR
88 unsigned int irq_thermal_count;
89 #endif
90 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
91 index 72198c64e646..8b272a08d1a8 100644
92 --- a/arch/x86/include/asm/mmu.h
93 +++ b/arch/x86/include/asm/mmu.h
94 @@ -33,12 +33,6 @@ typedef struct {
95 #endif
96 } mm_context_t;
97
98 -#ifdef CONFIG_SMP
99 void leave_mm(int cpu);
100 -#else
101 -static inline void leave_mm(int cpu)
102 -{
103 -}
104 -#endif
105
106 #endif /* _ASM_X86_MMU_H */
107 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
108 index f9dd22469388..d23e35584f15 100644
109 --- a/arch/x86/include/asm/mmu_context.h
110 +++ b/arch/x86/include/asm/mmu_context.h
111 @@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
112
113 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
114 {
115 -#ifdef CONFIG_SMP
116 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
117 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
118 -#endif
119 }
120
121 static inline int init_new_context(struct task_struct *tsk,
122 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
123 index fc5abff9b7fd..7d2ea6b1f7d9 100644
124 --- a/arch/x86/include/asm/tlbflush.h
125 +++ b/arch/x86/include/asm/tlbflush.h
126 @@ -7,6 +7,7 @@
127 #include <asm/processor.h>
128 #include <asm/cpufeature.h>
129 #include <asm/special_insns.h>
130 +#include <asm/smp.h>
131
132 static inline void __invpcid(unsigned long pcid, unsigned long addr,
133 unsigned long type)
134 @@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
135 #endif
136
137 struct tlb_state {
138 -#ifdef CONFIG_SMP
139 struct mm_struct *active_mm;
140 int state;
141 -#endif
142
143 /*
144 * Access to this CR4 shadow and to H/W CR4 is protected by
145 @@ -192,6 +191,14 @@ static inline void __flush_tlb_all(void)
146 __flush_tlb_global();
147 else
148 __flush_tlb();
149 +
150 + /*
151 + * Note: if we somehow had PCID but not PGE, then this wouldn't work --
152 + * we'd end up flushing kernel translations for the current ASID but
153 + * we might fail to flush kernel translations for other cached ASIDs.
154 + *
155 + * To avoid this issue, we force PCID off if PGE is off.
156 + */
157 }
158
159 static inline void __flush_tlb_one(unsigned long addr)
160 @@ -205,7 +212,6 @@ static inline void __flush_tlb_one(unsigned long addr)
161 /*
162 * TLB flushing:
163 *
164 - * - flush_tlb() flushes the current mm struct TLBs
165 * - flush_tlb_all() flushes all processes TLBs
166 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
167 * - flush_tlb_page(vma, vmaddr) flushes one page
168 @@ -217,84 +223,6 @@ static inline void __flush_tlb_one(unsigned long addr)
169 * and page-granular flushes are available only on i486 and up.
170 */
171
172 -#ifndef CONFIG_SMP
173 -
174 -/* "_up" is for UniProcessor.
175 - *
176 - * This is a helper for other header functions. *Not* intended to be called
177 - * directly. All global TLB flushes need to either call this, or to bump the
178 - * vm statistics themselves.
179 - */
180 -static inline void __flush_tlb_up(void)
181 -{
182 - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
183 - __flush_tlb();
184 -}
185 -
186 -static inline void flush_tlb_all(void)
187 -{
188 - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
189 - __flush_tlb_all();
190 -}
191 -
192 -static inline void flush_tlb(void)
193 -{
194 - __flush_tlb_up();
195 -}
196 -
197 -static inline void local_flush_tlb(void)
198 -{
199 - __flush_tlb_up();
200 -}
201 -
202 -static inline void flush_tlb_mm(struct mm_struct *mm)
203 -{
204 - if (mm == current->active_mm)
205 - __flush_tlb_up();
206 -}
207 -
208 -static inline void flush_tlb_page(struct vm_area_struct *vma,
209 - unsigned long addr)
210 -{
211 - if (vma->vm_mm == current->active_mm)
212 - __flush_tlb_one(addr);
213 -}
214 -
215 -static inline void flush_tlb_range(struct vm_area_struct *vma,
216 - unsigned long start, unsigned long end)
217 -{
218 - if (vma->vm_mm == current->active_mm)
219 - __flush_tlb_up();
220 -}
221 -
222 -static inline void flush_tlb_mm_range(struct mm_struct *mm,
223 - unsigned long start, unsigned long end, unsigned long vmflag)
224 -{
225 - if (mm == current->active_mm)
226 - __flush_tlb_up();
227 -}
228 -
229 -static inline void native_flush_tlb_others(const struct cpumask *cpumask,
230 - struct mm_struct *mm,
231 - unsigned long start,
232 - unsigned long end)
233 -{
234 -}
235 -
236 -static inline void reset_lazy_tlbstate(void)
237 -{
238 -}
239 -
240 -static inline void flush_tlb_kernel_range(unsigned long start,
241 - unsigned long end)
242 -{
243 - flush_tlb_all();
244 -}
245 -
246 -#else /* SMP */
247 -
248 -#include <asm/smp.h>
249 -
250 #define local_flush_tlb() __flush_tlb()
251
252 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
253 @@ -303,13 +231,14 @@ static inline void flush_tlb_kernel_range(unsigned long start,
254 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
255
256 extern void flush_tlb_all(void);
257 -extern void flush_tlb_current_task(void);
258 -extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
259 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
260 unsigned long end, unsigned long vmflag);
261 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
262
263 -#define flush_tlb() flush_tlb_current_task()
264 +static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
265 +{
266 + flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
267 +}
268
269 void native_flush_tlb_others(const struct cpumask *cpumask,
270 struct mm_struct *mm,
271 @@ -324,8 +253,6 @@ static inline void reset_lazy_tlbstate(void)
272 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
273 }
274
275 -#endif /* SMP */
276 -
277 #ifndef CONFIG_PARAVIRT
278 #define flush_tlb_others(mask, mm, start, end) \
279 native_flush_tlb_others(mask, mm, start, end)
280 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
281 index bd17db15a2c1..0b6124315441 100644
282 --- a/arch/x86/kernel/cpu/bugs.c
283 +++ b/arch/x86/kernel/cpu/bugs.c
284 @@ -19,6 +19,14 @@
285
286 void __init check_bugs(void)
287 {
288 +#ifdef CONFIG_X86_32
289 + /*
290 + * Regardless of whether PCID is enumerated, the SDM says
291 + * that it can't be enabled in 32-bit mode.
292 + */
293 + setup_clear_cpu_cap(X86_FEATURE_PCID);
294 +#endif
295 +
296 identify_boot_cpu();
297 #ifndef CONFIG_SMP
298 pr_info("CPU: ");
299 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
300 index 4eece91ada37..91588be529b9 100644
301 --- a/arch/x86/kernel/cpu/common.c
302 +++ b/arch/x86/kernel/cpu/common.c
303 @@ -163,6 +163,24 @@ static int __init x86_mpx_setup(char *s)
304 }
305 __setup("nompx", x86_mpx_setup);
306
307 +#ifdef CONFIG_X86_64
308 +static int __init x86_pcid_setup(char *s)
309 +{
310 + /* require an exact match without trailing characters */
311 + if (strlen(s))
312 + return 0;
313 +
314 + /* do not emit a message if the feature is not present */
315 + if (!boot_cpu_has(X86_FEATURE_PCID))
316 + return 1;
317 +
318 + setup_clear_cpu_cap(X86_FEATURE_PCID);
319 + pr_info("nopcid: PCID feature disabled\n");
320 + return 1;
321 +}
322 +__setup("nopcid", x86_pcid_setup);
323 +#endif
324 +
325 static int __init x86_noinvpcid_setup(char *s)
326 {
327 /* noinvpcid doesn't accept parameters */
328 @@ -306,6 +324,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
329 }
330 }
331
332 +static void setup_pcid(struct cpuinfo_x86 *c)
333 +{
334 + if (cpu_has(c, X86_FEATURE_PCID)) {
335 + if (cpu_has(c, X86_FEATURE_PGE)) {
336 + cr4_set_bits(X86_CR4_PCIDE);
337 + } else {
338 + /*
339 + * flush_tlb_all(), as currently implemented, won't
340 + * work if PCID is on but PGE is not. Since that
341 + * combination doesn't exist on real hardware, there's
342 + * no reason to try to fully support it, but it's
343 + * polite to avoid corrupting data if we're on
344 + * an improperly configured VM.
345 + */
346 + clear_cpu_cap(c, X86_FEATURE_PCID);
347 + }
348 + }
349 +}
350 +
351 /*
352 * Protection Keys are not available in 32-bit mode.
353 */
354 @@ -1064,6 +1101,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
355 setup_smep(c);
356 setup_smap(c);
357
358 + /* Set up PCID */
359 + setup_pcid(c);
360 +
361 /*
362 * The vendor-specific functions might have changed features.
363 * Now we do "generic changes."
364 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
365 index 067f9813fd2c..ce020a69bba9 100644
366 --- a/arch/x86/kernel/reboot.c
367 +++ b/arch/x86/kernel/reboot.c
368 @@ -106,6 +106,10 @@ void __noreturn machine_real_restart(unsigned int type)
369 load_cr3(initial_page_table);
370 #else
371 write_cr3(real_mode_header->trampoline_pgd);
372 +
373 + /* Exiting long mode will fail if CR4.PCIDE is set. */
374 + if (static_cpu_has(X86_FEATURE_PCID))
375 + cr4_clear_bits(X86_CR4_PCIDE);
376 #endif
377
378 /* Jump to the identity-mapped low memory code */
379 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
380 index 9fe7b9e1ae30..e803d72ef525 100644
381 --- a/arch/x86/kernel/smpboot.c
382 +++ b/arch/x86/kernel/smpboot.c
383 @@ -115,25 +115,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
384 spin_lock_irqsave(&rtc_lock, flags);
385 CMOS_WRITE(0xa, 0xf);
386 spin_unlock_irqrestore(&rtc_lock, flags);
387 - local_flush_tlb();
388 - pr_debug("1.\n");
389 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
390 start_eip >> 4;
391 - pr_debug("2.\n");
392 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
393 start_eip & 0xf;
394 - pr_debug("3.\n");
395 }
396
397 static inline void smpboot_restore_warm_reset_vector(void)
398 {
399 unsigned long flags;
400
401 - /*
402 - * Install writable page 0 entry to set BIOS data area.
403 - */
404 - local_flush_tlb();
405 -
406 /*
407 * Paranoid: Set warm reset code and vector here back
408 * to default values.
409 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
410 index 01f30e56f99e..4b3012888ada 100644
411 --- a/arch/x86/kernel/vm86_32.c
412 +++ b/arch/x86/kernel/vm86_32.c
413 @@ -191,7 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
414 pte_unmap_unlock(pte, ptl);
415 out:
416 up_write(&mm->mmap_sem);
417 - flush_tlb();
418 + flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
419 }
420
421
422 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
423 index 889e7619a091..0381638168d1 100644
424 --- a/arch/x86/mm/init.c
425 +++ b/arch/x86/mm/init.c
426 @@ -764,10 +764,8 @@ void __init zone_sizes_init(void)
427 }
428
429 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
430 -#ifdef CONFIG_SMP
431 .active_mm = &init_mm,
432 .state = 0,
433 -#endif
434 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
435 };
436 EXPORT_SYMBOL_GPL(cpu_tlbstate);
437 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
438 index 75fb01109f94..53b72fb4e781 100644
439 --- a/arch/x86/mm/tlb.c
440 +++ b/arch/x86/mm/tlb.c
441 @@ -15,7 +15,7 @@
442 #include <linux/debugfs.h>
443
444 /*
445 - * Smarter SMP flushing macros.
446 + * TLB flushing, formerly SMP-only
447 * c/o Linus Torvalds.
448 *
449 * These mean you can really definitely utterly forget about
450 @@ -28,8 +28,6 @@
451 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
452 */
453
454 -#ifdef CONFIG_SMP
455 -
456 struct flush_tlb_info {
457 struct mm_struct *flush_mm;
458 unsigned long flush_start;
459 @@ -59,8 +57,6 @@ void leave_mm(int cpu)
460 }
461 EXPORT_SYMBOL_GPL(leave_mm);
462
463 -#endif /* CONFIG_SMP */
464 -
465 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
466 struct task_struct *tsk)
467 {
468 @@ -91,10 +87,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
469 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
470 }
471
472 -#ifdef CONFIG_SMP
473 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
474 this_cpu_write(cpu_tlbstate.active_mm, next);
475 -#endif
476
477 cpumask_set_cpu(cpu, mm_cpumask(next));
478
479 @@ -152,9 +146,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
480 if (unlikely(prev->context.ldt != next->context.ldt))
481 load_mm_ldt(next);
482 #endif
483 - }
484 -#ifdef CONFIG_SMP
485 - else {
486 + } else {
487 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
488 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
489
490 @@ -181,11 +173,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
491 load_mm_ldt(next);
492 }
493 }
494 -#endif
495 }
496
497 -#ifdef CONFIG_SMP
498 -
499 /*
500 * The flush IPI assumes that a thread switch happens in this order:
501 * [cpu0: the cpu that switches]
502 @@ -287,23 +276,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
503 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
504 }
505
506 -void flush_tlb_current_task(void)
507 -{
508 - struct mm_struct *mm = current->mm;
509 -
510 - preempt_disable();
511 -
512 - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
513 -
514 - /* This is an implicit full barrier that synchronizes with switch_mm. */
515 - local_flush_tlb();
516 -
517 - trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
518 - if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
519 - flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
520 - preempt_enable();
521 -}
522 -
523 /*
524 * See Documentation/x86/tlb.txt for details. We choose 33
525 * because it is large enough to cover the vast majority (at
526 @@ -324,6 +296,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
527 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
528
529 preempt_disable();
530 +
531 + if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
532 + base_pages_to_flush = (end - start) >> PAGE_SHIFT;
533 + if (base_pages_to_flush > tlb_single_page_flush_ceiling)
534 + base_pages_to_flush = TLB_FLUSH_ALL;
535 +
536 if (current->active_mm != mm) {
537 /* Synchronize with switch_mm. */
538 smp_mb();
539 @@ -340,15 +318,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
540 goto out;
541 }
542
543 - if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
544 - base_pages_to_flush = (end - start) >> PAGE_SHIFT;
545 -
546 /*
547 * Both branches below are implicit full barriers (MOV to CR or
548 * INVLPG) that synchronize with switch_mm.
549 */
550 - if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
551 - base_pages_to_flush = TLB_FLUSH_ALL;
552 + if (base_pages_to_flush == TLB_FLUSH_ALL) {
553 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
554 local_flush_tlb();
555 } else {
556 @@ -369,33 +343,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
557 preempt_enable();
558 }
559
560 -void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
561 -{
562 - struct mm_struct *mm = vma->vm_mm;
563 -
564 - preempt_disable();
565 -
566 - if (current->active_mm == mm) {
567 - if (current->mm) {
568 - /*
569 - * Implicit full barrier (INVLPG) that synchronizes
570 - * with switch_mm.
571 - */
572 - __flush_tlb_one(start);
573 - } else {
574 - leave_mm(smp_processor_id());
575 -
576 - /* Synchronize with switch_mm. */
577 - smp_mb();
578 - }
579 - }
580 -
581 - if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
582 - flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
583 -
584 - preempt_enable();
585 -}
586 -
587 static void do_flush_tlb_all(void *info)
588 {
589 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
590 @@ -480,5 +427,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
591 return 0;
592 }
593 late_initcall(create_tlb_single_page_flush_ceiling);
594 -
595 -#endif /* CONFIG_SMP */
596 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
597 index 8f1f7efa848c..2bea87cc0ff2 100644
598 --- a/arch/x86/xen/enlighten.c
599 +++ b/arch/x86/xen/enlighten.c
600 @@ -444,6 +444,12 @@ static void __init xen_init_cpuid_mask(void)
601 ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
602 (1 << X86_FEATURE_ACC)); /* thermal monitoring */
603
604 + /*
605 + * Xen PV would need some work to support PCID: CR3 handling as well
606 + * as xen_flush_tlb_others() would need updating.
607 + */
608 + cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */
609 +
610 if (!xen_initial_domain())
611 cpuid_leaf1_edx_mask &=
612 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
613 diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
614 index 19c6477af19f..a856371bbe58 100644
615 --- a/drivers/infiniband/hw/cxgb4/cq.c
616 +++ b/drivers/infiniband/hw/cxgb4/cq.c
617 @@ -575,10 +575,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
618 ret = -EAGAIN;
619 goto skip_cqe;
620 }
621 - if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
622 + if (unlikely(!CQE_STATUS(hw_cqe) &&
623 + CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
624 t4_set_wq_in_error(wq);
625 - hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
626 - goto proc_cqe;
627 + hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
628 }
629 goto proc_cqe;
630 }
631 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
632 index edae2dcc4927..bb22d325e965 100644
633 --- a/drivers/net/ethernet/broadcom/tg3.c
634 +++ b/drivers/net/ethernet/broadcom/tg3.c
635 @@ -14226,7 +14226,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
636 /* Reset PHY, otherwise the read DMA engine will be in a mode that
637 * breaks all requests to 256 bytes.
638 */
639 - if (tg3_asic_rev(tp) == ASIC_REV_57766)
640 + if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
641 + tg3_asic_rev(tp) == ASIC_REV_5717 ||
642 + tg3_asic_rev(tp) == ASIC_REV_5719)
643 reset_phy = true;
644
645 err = tg3_restart_hw(tp, reset_phy);
646 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
647 index 849b8712ec81..917091871259 100644
648 --- a/drivers/net/ethernet/freescale/fec_main.c
649 +++ b/drivers/net/ethernet/freescale/fec_main.c
650 @@ -172,10 +172,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
651 #endif /* CONFIG_M5272 */
652
653 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
654 + *
655 + * 2048 byte skbufs are allocated. However, alignment requirements
656 + * varies between FEC variants. Worst case is 64, so round down by 64.
657 */
658 -#define PKT_MAXBUF_SIZE 1522
659 +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
660 #define PKT_MINBUF_SIZE 64
661 -#define PKT_MAXBLR_SIZE 1536
662
663 /* FEC receive acceleration */
664 #define FEC_RACC_IPDIS (1 << 1)
665 @@ -813,6 +815,12 @@ static void fec_enet_bd_init(struct net_device *dev)
666 for (i = 0; i < txq->bd.ring_size; i++) {
667 /* Initialize the BD for every fragment in the page. */
668 bdp->cbd_sc = cpu_to_fec16(0);
669 + if (bdp->cbd_bufaddr &&
670 + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
671 + dma_unmap_single(&fep->pdev->dev,
672 + fec32_to_cpu(bdp->cbd_bufaddr),
673 + fec16_to_cpu(bdp->cbd_datlen),
674 + DMA_TO_DEVICE);
675 if (txq->tx_skbuff[i]) {
676 dev_kfree_skb_any(txq->tx_skbuff[i]);
677 txq->tx_skbuff[i] = NULL;
678 @@ -847,7 +855,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
679 for (i = 0; i < fep->num_rx_queues; i++) {
680 rxq = fep->rx_queue[i];
681 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
682 - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
683 + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
684
685 /* enable DMA1/2 */
686 if (i)
687 diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
688 index a0d1b084ecec..7aeb7fedb364 100644
689 --- a/drivers/net/ethernet/marvell/mvmdio.c
690 +++ b/drivers/net/ethernet/marvell/mvmdio.c
691 @@ -232,7 +232,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
692 dev->regs + MVMDIO_ERR_INT_MASK);
693
694 } else if (dev->err_interrupt == -EPROBE_DEFER) {
695 - return -EPROBE_DEFER;
696 + ret = -EPROBE_DEFER;
697 + goto out_mdio;
698 }
699
700 mutex_init(&dev->lock);
701 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
702 index f7fabecc104f..4c3f1cb7e2c9 100644
703 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
704 +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
705 @@ -367,7 +367,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
706 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
707 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
708 case MLX5_CMD_OP_QUERY_Q_COUNTER:
709 - case MLX5_CMD_OP_SET_RATE_LIMIT:
710 + case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
711 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
712 case MLX5_CMD_OP_ALLOC_PD:
713 case MLX5_CMD_OP_ALLOC_UAR:
714 @@ -502,7 +502,7 @@ const char *mlx5_command_str(int command)
715 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
716 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
717 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
718 - MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
719 + MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
720 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
721 MLX5_COMMAND_STR_CASE(ALLOC_PD);
722 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
723 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
724 index 9d3722930c95..38981db43bc3 100644
725 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
726 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
727 @@ -3038,6 +3038,7 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
728 struct sk_buff *skb,
729 netdev_features_t features)
730 {
731 + unsigned int offset = 0;
732 struct udphdr *udph;
733 u16 proto;
734 u16 port = 0;
735 @@ -3047,7 +3048,7 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
736 proto = ip_hdr(skb)->protocol;
737 break;
738 case htons(ETH_P_IPV6):
739 - proto = ipv6_hdr(skb)->nexthdr;
740 + proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
741 break;
742 default:
743 goto out;
744 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
745 index d0a4005fe63a..9346f3985edf 100644
746 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
747 +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
748 @@ -303,8 +303,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
749 err_cmd:
750 memset(din, 0, sizeof(din));
751 memset(dout, 0, sizeof(dout));
752 - MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
753 - MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
754 + MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
755 + MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
756 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
757 return err;
758 }
759 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
760 index 104902a93a0b..2be9ec5fd651 100644
761 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
762 +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
763 @@ -60,16 +60,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
764 return ret_entry;
765 }
766
767 -static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
768 +static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
769 u32 rate, u16 index)
770 {
771 - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
772 - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
773 + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
774 + u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
775
776 - MLX5_SET(set_rate_limit_in, in, opcode,
777 - MLX5_CMD_OP_SET_RATE_LIMIT);
778 - MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
779 - MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
780 + MLX5_SET(set_pp_rate_limit_in, in, opcode,
781 + MLX5_CMD_OP_SET_PP_RATE_LIMIT);
782 + MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
783 + MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
784 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
785 }
786
787 @@ -108,7 +108,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
788 entry->refcount++;
789 } else {
790 /* new rate limit */
791 - err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
792 + err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
793 if (err) {
794 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
795 rate, err);
796 @@ -144,7 +144,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
797 entry->refcount--;
798 if (!entry->refcount) {
799 /* need to remove rate */
800 - mlx5_set_rate_limit_cmd(dev, 0, entry->index);
801 + mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
802 entry->rate = 0;
803 }
804
805 @@ -197,8 +197,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
806 /* Clear all configured rates */
807 for (i = 0; i < table->max_size; i++)
808 if (table->rl_entry[i].rate)
809 - mlx5_set_rate_limit_cmd(dev, 0,
810 - table->rl_entry[i].index);
811 + mlx5_set_pp_rate_limit_cmd(dev, 0,
812 + table->rl_entry[i].index);
813
814 kfree(dev->priv.rl_table.rl_entry);
815 }
816 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
817 index 07a9ba6cfc70..2f74953e4561 100644
818 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
819 +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
820 @@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
821 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
822 struct mlx5e_vxlan *vxlan;
823
824 - spin_lock(&vxlan_db->lock);
825 + spin_lock_bh(&vxlan_db->lock);
826 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
827 - spin_unlock(&vxlan_db->lock);
828 + spin_unlock_bh(&vxlan_db->lock);
829
830 return vxlan;
831 }
832 @@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
833 struct mlx5e_vxlan *vxlan;
834 int err;
835
836 - if (mlx5e_vxlan_lookup_port(priv, port))
837 + mutex_lock(&priv->state_lock);
838 + vxlan = mlx5e_vxlan_lookup_port(priv, port);
839 + if (vxlan) {
840 + atomic_inc(&vxlan->refcount);
841 goto free_work;
842 + }
843
844 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
845 goto free_work;
846 @@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
847 goto err_delete_port;
848
849 vxlan->udp_port = port;
850 + atomic_set(&vxlan->refcount, 1);
851
852 - spin_lock_irq(&vxlan_db->lock);
853 + spin_lock_bh(&vxlan_db->lock);
854 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
855 - spin_unlock_irq(&vxlan_db->lock);
856 + spin_unlock_bh(&vxlan_db->lock);
857 if (err)
858 goto err_free;
859
860 @@ -113,35 +118,39 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
861 err_delete_port:
862 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
863 free_work:
864 + mutex_unlock(&priv->state_lock);
865 kfree(vxlan_work);
866 }
867
868 -static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
869 +static void mlx5e_vxlan_del_port(struct work_struct *work)
870 {
871 + struct mlx5e_vxlan_work *vxlan_work =
872 + container_of(work, struct mlx5e_vxlan_work, work);
873 + struct mlx5e_priv *priv = vxlan_work->priv;
874 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
875 + u16 port = vxlan_work->port;
876 struct mlx5e_vxlan *vxlan;
877 + bool remove = false;
878
879 - spin_lock_irq(&vxlan_db->lock);
880 - vxlan = radix_tree_delete(&vxlan_db->tree, port);
881 - spin_unlock_irq(&vxlan_db->lock);
882 -
883 + mutex_lock(&priv->state_lock);
884 + spin_lock_bh(&vxlan_db->lock);
885 + vxlan = radix_tree_lookup(&vxlan_db->tree, port);
886 if (!vxlan)
887 - return;
888 -
889 - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
890 -
891 - kfree(vxlan);
892 -}
893 + goto out_unlock;
894
895 -static void mlx5e_vxlan_del_port(struct work_struct *work)
896 -{
897 - struct mlx5e_vxlan_work *vxlan_work =
898 - container_of(work, struct mlx5e_vxlan_work, work);
899 - struct mlx5e_priv *priv = vxlan_work->priv;
900 - u16 port = vxlan_work->port;
901 + if (atomic_dec_and_test(&vxlan->refcount)) {
902 + radix_tree_delete(&vxlan_db->tree, port);
903 + remove = true;
904 + }
905
906 - __mlx5e_vxlan_core_del_port(priv, port);
907 +out_unlock:
908 + spin_unlock_bh(&vxlan_db->lock);
909
910 + if (remove) {
911 + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
912 + kfree(vxlan);
913 + }
914 + mutex_unlock(&priv->state_lock);
915 kfree(vxlan_work);
916 }
917
918 @@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
919 struct mlx5e_vxlan *vxlan;
920 unsigned int port = 0;
921
922 - spin_lock_irq(&vxlan_db->lock);
923 + /* Lockless since we are the only radix-tree consumers, wq is disabled */
924 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
925 port = vxlan->udp_port;
926 - spin_unlock_irq(&vxlan_db->lock);
927 - __mlx5e_vxlan_core_del_port(priv, (u16)port);
928 - spin_lock_irq(&vxlan_db->lock);
929 + radix_tree_delete(&vxlan_db->tree, port);
930 + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
931 + kfree(vxlan);
932 }
933 - spin_unlock_irq(&vxlan_db->lock);
934 }
935 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
936 index 5def12c048e3..5ef6ae7d568a 100644
937 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
938 +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
939 @@ -36,6 +36,7 @@
940 #include "en.h"
941
942 struct mlx5e_vxlan {
943 + atomic_t refcount;
944 u16 udp_port;
945 };
946
947 diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
948 index fbf5945ce00d..2032a6de026b 100644
949 --- a/drivers/net/phy/micrel.c
950 +++ b/drivers/net/phy/micrel.c
951 @@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
952 phydev->link = 0;
953 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
954 phydev->drv->config_intr(phydev);
955 + return genphy_config_aneg(phydev);
956 }
957
958 return 0;
959 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
960 index 105fbfb47e3a..db65d9ad4488 100644
961 --- a/drivers/net/usb/qmi_wwan.c
962 +++ b/drivers/net/usb/qmi_wwan.c
963 @@ -907,6 +907,7 @@ static const struct usb_device_id products[] = {
964 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
965 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
966 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
967 + {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
968 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
969 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
970 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
971 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
972 index e72234efb648..9b5fc502f6a1 100644
973 --- a/drivers/s390/net/qeth_core.h
974 +++ b/drivers/s390/net/qeth_core.h
975 @@ -576,9 +576,9 @@ enum qeth_cq {
976 };
977
978 struct qeth_ipato {
979 - int enabled;
980 - int invert4;
981 - int invert6;
982 + bool enabled;
983 + bool invert4;
984 + bool invert6;
985 struct list_head entries;
986 };
987
988 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
989 index 838ed6213118..df8f74cb1406 100644
990 --- a/drivers/s390/net/qeth_core_main.c
991 +++ b/drivers/s390/net/qeth_core_main.c
992 @@ -1475,9 +1475,9 @@ static int qeth_setup_card(struct qeth_card *card)
993 qeth_set_intial_options(card);
994 /* IP address takeover */
995 INIT_LIST_HEAD(&card->ipato.entries);
996 - card->ipato.enabled = 0;
997 - card->ipato.invert4 = 0;
998 - card->ipato.invert6 = 0;
999 + card->ipato.enabled = false;
1000 + card->ipato.invert4 = false;
1001 + card->ipato.invert6 = false;
1002 /* init QDIO stuff */
1003 qeth_init_qdio_info(card);
1004 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1005 diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
1006 index 26f79533e62e..eedf9b01a496 100644
1007 --- a/drivers/s390/net/qeth_l3.h
1008 +++ b/drivers/s390/net/qeth_l3.h
1009 @@ -80,7 +80,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1010 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1011 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
1012 const u8 *);
1013 -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
1014 +void qeth_l3_update_ipato(struct qeth_card *card);
1015 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
1016 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
1017 int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
1018 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1019 index f91e70c369ed..1487f8a0c575 100644
1020 --- a/drivers/s390/net/qeth_l3_main.c
1021 +++ b/drivers/s390/net/qeth_l3_main.c
1022 @@ -168,8 +168,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
1023 }
1024 }
1025
1026 -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1027 - struct qeth_ipaddr *addr)
1028 +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1029 + struct qeth_ipaddr *addr)
1030 {
1031 struct qeth_ipato_entry *ipatoe;
1032 u8 addr_bits[128] = {0, };
1033 @@ -178,6 +178,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1034
1035 if (!card->ipato.enabled)
1036 return 0;
1037 + if (addr->type != QETH_IP_TYPE_NORMAL)
1038 + return 0;
1039
1040 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
1041 (addr->proto == QETH_PROT_IPV4)? 4:16);
1042 @@ -293,8 +295,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1043 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
1044 addr->ref_counter = 1;
1045
1046 - if (addr->type == QETH_IP_TYPE_NORMAL &&
1047 - qeth_l3_is_addr_covered_by_ipato(card, addr)) {
1048 + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
1049 QETH_CARD_TEXT(card, 2, "tkovaddr");
1050 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
1051 }
1052 @@ -607,6 +608,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
1053 /*
1054 * IP address takeover related functions
1055 */
1056 +
1057 +/**
1058 + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
1059 + *
1060 + * Caller must hold ip_lock.
1061 + */
1062 +void qeth_l3_update_ipato(struct qeth_card *card)
1063 +{
1064 + struct qeth_ipaddr *addr;
1065 + unsigned int i;
1066 +
1067 + hash_for_each(card->ip_htable, i, addr, hnode) {
1068 + if (addr->type != QETH_IP_TYPE_NORMAL)
1069 + continue;
1070 + if (qeth_l3_is_addr_covered_by_ipato(card, addr))
1071 + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
1072 + else
1073 + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
1074 + }
1075 +}
1076 +
1077 static void qeth_l3_clear_ipato_list(struct qeth_card *card)
1078 {
1079 struct qeth_ipato_entry *ipatoe, *tmp;
1080 @@ -618,6 +640,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
1081 kfree(ipatoe);
1082 }
1083
1084 + qeth_l3_update_ipato(card);
1085 spin_unlock_bh(&card->ip_lock);
1086 }
1087
1088 @@ -642,8 +665,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
1089 }
1090 }
1091
1092 - if (!rc)
1093 + if (!rc) {
1094 list_add_tail(&new->entry, &card->ipato.entries);
1095 + qeth_l3_update_ipato(card);
1096 + }
1097
1098 spin_unlock_bh(&card->ip_lock);
1099
1100 @@ -666,6 +691,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
1101 (proto == QETH_PROT_IPV4)? 4:16) &&
1102 (ipatoe->mask_bits == mask_bits)) {
1103 list_del(&ipatoe->entry);
1104 + qeth_l3_update_ipato(card);
1105 kfree(ipatoe);
1106 }
1107 }
1108 diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
1109 index cffe42f5775d..d6bdfc6e905a 100644
1110 --- a/drivers/s390/net/qeth_l3_sys.c
1111 +++ b/drivers/s390/net/qeth_l3_sys.c
1112 @@ -372,8 +372,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
1113 struct device_attribute *attr, const char *buf, size_t count)
1114 {
1115 struct qeth_card *card = dev_get_drvdata(dev);
1116 - struct qeth_ipaddr *addr;
1117 - int i, rc = 0;
1118 + bool enable;
1119 + int rc = 0;
1120
1121 if (!card)
1122 return -EINVAL;
1123 @@ -386,25 +386,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
1124 }
1125
1126 if (sysfs_streq(buf, "toggle")) {
1127 - card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
1128 - } else if (sysfs_streq(buf, "1")) {
1129 - card->ipato.enabled = 1;
1130 - hash_for_each(card->ip_htable, i, addr, hnode) {
1131 - if ((addr->type == QETH_IP_TYPE_NORMAL) &&
1132 - qeth_l3_is_addr_covered_by_ipato(card, addr))
1133 - addr->set_flags |=
1134 - QETH_IPA_SETIP_TAKEOVER_FLAG;
1135 - }
1136 - } else if (sysfs_streq(buf, "0")) {
1137 - card->ipato.enabled = 0;
1138 - hash_for_each(card->ip_htable, i, addr, hnode) {
1139 - if (addr->set_flags &
1140 - QETH_IPA_SETIP_TAKEOVER_FLAG)
1141 - addr->set_flags &=
1142 - ~QETH_IPA_SETIP_TAKEOVER_FLAG;
1143 - }
1144 - } else
1145 + enable = !card->ipato.enabled;
1146 + } else if (kstrtobool(buf, &enable)) {
1147 rc = -EINVAL;
1148 + goto out;
1149 + }
1150 +
1151 + if (card->ipato.enabled != enable) {
1152 + card->ipato.enabled = enable;
1153 + spin_lock_bh(&card->ip_lock);
1154 + qeth_l3_update_ipato(card);
1155 + spin_unlock_bh(&card->ip_lock);
1156 + }
1157 out:
1158 mutex_unlock(&card->conf_mutex);
1159 return rc ? rc : count;
1160 @@ -430,20 +423,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
1161 const char *buf, size_t count)
1162 {
1163 struct qeth_card *card = dev_get_drvdata(dev);
1164 + bool invert;
1165 int rc = 0;
1166
1167 if (!card)
1168 return -EINVAL;
1169
1170 mutex_lock(&card->conf_mutex);
1171 - if (sysfs_streq(buf, "toggle"))
1172 - card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1173 - else if (sysfs_streq(buf, "1"))
1174 - card->ipato.invert4 = 1;
1175 - else if (sysfs_streq(buf, "0"))
1176 - card->ipato.invert4 = 0;
1177 - else
1178 + if (sysfs_streq(buf, "toggle")) {
1179 + invert = !card->ipato.invert4;
1180 + } else if (kstrtobool(buf, &invert)) {
1181 rc = -EINVAL;
1182 + goto out;
1183 + }
1184 +
1185 + if (card->ipato.invert4 != invert) {
1186 + card->ipato.invert4 = invert;
1187 + spin_lock_bh(&card->ip_lock);
1188 + qeth_l3_update_ipato(card);
1189 + spin_unlock_bh(&card->ip_lock);
1190 + }
1191 +out:
1192 mutex_unlock(&card->conf_mutex);
1193 return rc ? rc : count;
1194 }
1195 @@ -609,20 +609,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
1196 struct device_attribute *attr, const char *buf, size_t count)
1197 {
1198 struct qeth_card *card = dev_get_drvdata(dev);
1199 + bool invert;
1200 int rc = 0;
1201
1202 if (!card)
1203 return -EINVAL;
1204
1205 mutex_lock(&card->conf_mutex);
1206 - if (sysfs_streq(buf, "toggle"))
1207 - card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1208 - else if (sysfs_streq(buf, "1"))
1209 - card->ipato.invert6 = 1;
1210 - else if (sysfs_streq(buf, "0"))
1211 - card->ipato.invert6 = 0;
1212 - else
1213 + if (sysfs_streq(buf, "toggle")) {
1214 + invert = !card->ipato.invert6;
1215 + } else if (kstrtobool(buf, &invert)) {
1216 rc = -EINVAL;
1217 + goto out;
1218 + }
1219 +
1220 + if (card->ipato.invert6 != invert) {
1221 + card->ipato.invert6 = invert;
1222 + spin_lock_bh(&card->ip_lock);
1223 + qeth_l3_update_ipato(card);
1224 + spin_unlock_bh(&card->ip_lock);
1225 + }
1226 +out:
1227 mutex_unlock(&card->conf_mutex);
1228 return rc ? rc : count;
1229 }
1230 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1231 index bdf0e6e89991..faf50df81622 100644
1232 --- a/drivers/tty/n_tty.c
1233 +++ b/drivers/tty/n_tty.c
1234 @@ -1764,7 +1764,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1235 {
1236 struct n_tty_data *ldata = tty->disc_data;
1237
1238 - if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
1239 + if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
1240 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1241 ldata->line_start = ldata->read_tail;
1242 if (!L_ICANON(tty) || !read_cnt(ldata)) {
1243 @@ -2427,7 +2427,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
1244 return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
1245 case TIOCINQ:
1246 down_write(&tty->termios_rwsem);
1247 - if (L_ICANON(tty))
1248 + if (L_ICANON(tty) && !L_EXTPROC(tty))
1249 retval = inq_canon(ldata);
1250 else
1251 retval = read_cnt(ldata);
1252 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
1253 index c220c2c0893f..e99f1c5b1df6 100644
1254 --- a/drivers/tty/tty_buffer.c
1255 +++ b/drivers/tty/tty_buffer.c
1256 @@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
1257 * Callers other than flush_to_ldisc() need to exclude the kworker
1258 * from concurrent use of the line discipline, see paste_selection().
1259 *
1260 - * Returns the number of bytes not processed
1261 + * Returns the number of bytes processed
1262 */
1263 int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
1264 char *f, int count)
1265 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1266 index ba9b29bc441f..7c54a19b20e0 100644
1267 --- a/drivers/usb/core/config.c
1268 +++ b/drivers/usb/core/config.c
1269 @@ -1002,7 +1002,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1270 case USB_SSP_CAP_TYPE:
1271 ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
1272 ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
1273 - USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
1274 + USB_SSP_SUBLINK_SPEED_ATTRIBS);
1275 if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
1276 dev->bos->ssp_cap = ssp_cap;
1277 break;
1278 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1279 index 50010282c010..c05c4f877750 100644
1280 --- a/drivers/usb/core/quirks.c
1281 +++ b/drivers/usb/core/quirks.c
1282 @@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
1283 /* Microsoft LifeCam-VX700 v2.0 */
1284 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
1285
1286 - /* Logitech HD Pro Webcams C920, C920-C and C930e */
1287 + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
1288 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1289 { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
1290 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
1291 + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
1292
1293 /* Logitech ConferenceCam CC3000e */
1294 { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
1295 @@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1296 /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
1297 { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
1298
1299 + /* ELSA MicroLink 56K */
1300 + { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
1301 +
1302 /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
1303 { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
1304
1305 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1306 index c87ef38e7416..f6782a347cde 100644
1307 --- a/drivers/usb/host/xhci-pci.c
1308 +++ b/drivers/usb/host/xhci-pci.c
1309 @@ -189,6 +189,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1310 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1311 xhci->quirks |= XHCI_BROKEN_STREAMS;
1312 }
1313 + if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1314 + pdev->device == 0x0014)
1315 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1316 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1317 pdev->device == 0x0015)
1318 xhci->quirks |= XHCI_RESET_ON_RESUME;
1319 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1320 index 3249f42b4b93..0c743e4cca1e 100644
1321 --- a/drivers/usb/serial/ftdi_sio.c
1322 +++ b/drivers/usb/serial/ftdi_sio.c
1323 @@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = {
1324 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1325 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1326 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1327 + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
1328 { } /* Terminating entry */
1329 };
1330
1331 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1332 index f9d15bd62785..543d2801632b 100644
1333 --- a/drivers/usb/serial/ftdi_sio_ids.h
1334 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1335 @@ -913,6 +913,12 @@
1336 #define ICPDAS_I7561U_PID 0x0104
1337 #define ICPDAS_I7563U_PID 0x0105
1338
1339 +/*
1340 + * Airbus Defence and Space
1341 + */
1342 +#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
1343 +#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
1344 +
1345 /*
1346 * RT Systems programming cables for various ham radios
1347 */
1348 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1349 index ffa8ec917ff5..a818c43a02ec 100644
1350 --- a/drivers/usb/serial/option.c
1351 +++ b/drivers/usb/serial/option.c
1352 @@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
1353 /* These Quectel products use Qualcomm's vendor ID */
1354 #define QUECTEL_PRODUCT_UC20 0x9003
1355 #define QUECTEL_PRODUCT_UC15 0x9090
1356 +/* These Yuga products use Qualcomm's vendor ID */
1357 +#define YUGA_PRODUCT_CLM920_NC5 0x9625
1358
1359 #define QUECTEL_VENDOR_ID 0x2c7c
1360 /* These Quectel products use Quectel's vendor ID */
1361 @@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb);
1362 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1363 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1364 #define TELIT_PRODUCT_ME910 0x1100
1365 +#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
1366 #define TELIT_PRODUCT_LE920 0x1200
1367 #define TELIT_PRODUCT_LE910 0x1201
1368 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
1369 @@ -648,6 +651,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
1370 .reserved = BIT(1) | BIT(3),
1371 };
1372
1373 +static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
1374 + .sendsetup = BIT(0),
1375 + .reserved = BIT(3),
1376 +};
1377 +
1378 static const struct option_blacklist_info telit_le910_blacklist = {
1379 .sendsetup = BIT(0),
1380 .reserved = BIT(1) | BIT(2),
1381 @@ -677,6 +685,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
1382 .reserved = BIT(4) | BIT(5),
1383 };
1384
1385 +static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
1386 + .reserved = BIT(1) | BIT(4),
1387 +};
1388 +
1389 static const struct usb_device_id option_ids[] = {
1390 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
1391 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
1392 @@ -1181,6 +1193,9 @@ static const struct usb_device_id option_ids[] = {
1393 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1394 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1395 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1396 + /* Yuga products use Qualcomm vendor ID */
1397 + { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
1398 + .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
1399 /* Quectel products using Quectel vendor ID */
1400 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1401 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1402 @@ -1247,6 +1262,8 @@ static const struct usb_device_id option_ids[] = {
1403 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1404 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1405 .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1406 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1407 + .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
1408 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1409 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1410 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1411 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1412 index 4516291df1b8..fb6dc16c754a 100644
1413 --- a/drivers/usb/serial/qcserial.c
1414 +++ b/drivers/usb/serial/qcserial.c
1415 @@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
1416 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
1417 {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
1418 {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
1419 + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
1420 + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
1421 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1422 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1423 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1424 @@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
1425 break;
1426 case 2:
1427 dev_dbg(dev, "NMEA GPS interface found\n");
1428 + sendsetup = true;
1429 break;
1430 case 3:
1431 dev_dbg(dev, "Modem port found\n");
1432 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
1433 index c653ce533430..1886d8e4f14e 100644
1434 --- a/drivers/usb/usbip/stub_dev.c
1435 +++ b/drivers/usb/usbip/stub_dev.c
1436 @@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
1437 * step 1?
1438 */
1439 if (ud->tcp_socket) {
1440 - dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
1441 - ud->tcp_socket);
1442 + dev_dbg(&sdev->udev->dev, "shutdown sockfd\n");
1443 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
1444 }
1445
1446 diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
1447 index af10f7b131a4..325b4c05acdd 100644
1448 --- a/drivers/usb/usbip/stub_main.c
1449 +++ b/drivers/usb/usbip/stub_main.c
1450 @@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
1451 struct stub_priv *priv;
1452 struct urb *urb;
1453
1454 - dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
1455 + dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
1456
1457 while ((priv = stub_priv_pop(sdev))) {
1458 urb = priv->urb;
1459 - dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
1460 + dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
1461 + priv->seqnum);
1462 usb_kill_urb(urb);
1463
1464 kmem_cache_free(stub_priv_cache, priv);
1465 diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
1466 index 283a9be77a22..5b807185f79e 100644
1467 --- a/drivers/usb/usbip/stub_rx.c
1468 +++ b/drivers/usb/usbip/stub_rx.c
1469 @@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1470 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
1471 continue;
1472
1473 - dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
1474 - priv->urb);
1475 -
1476 /*
1477 * This matched urb is not completed yet (i.e., be in
1478 * flight in usb hcd hardware/driver). Now we are
1479 @@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1480 ret = usb_unlink_urb(priv->urb);
1481 if (ret != -EINPROGRESS)
1482 dev_err(&priv->urb->dev->dev,
1483 - "failed to unlink a urb %p, ret %d\n",
1484 - priv->urb, ret);
1485 + "failed to unlink a urb # %lu, ret %d\n",
1486 + priv->seqnum, ret);
1487
1488 return 0;
1489 }
1490 diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
1491 index 87ff94be4235..96aa375b80d9 100644
1492 --- a/drivers/usb/usbip/stub_tx.c
1493 +++ b/drivers/usb/usbip/stub_tx.c
1494 @@ -102,7 +102,7 @@ void stub_complete(struct urb *urb)
1495 /* link a urb to the queue of tx. */
1496 spin_lock_irqsave(&sdev->priv_lock, flags);
1497 if (sdev->ud.tcp_socket == NULL) {
1498 - usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
1499 + usbip_dbg_stub_tx("ignore urb for closed connection\n");
1500 /* It will be freed in stub_device_cleanup_urbs(). */
1501 } else if (priv->unlinking) {
1502 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
1503 @@ -204,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
1504
1505 /* 1. setup usbip_header */
1506 setup_ret_submit_pdu(&pdu_header, urb);
1507 - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
1508 - pdu_header.base.seqnum, urb);
1509 + usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
1510 + pdu_header.base.seqnum);
1511 usbip_header_correct_endian(&pdu_header, 1);
1512
1513 iov[iovnum].iov_base = &pdu_header;
1514 diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
1515 index 8b232290be6b..e24b24fa0f16 100644
1516 --- a/drivers/usb/usbip/usbip_common.c
1517 +++ b/drivers/usb/usbip/usbip_common.c
1518 @@ -335,13 +335,10 @@ int usbip_recv(struct socket *sock, void *buf, int size)
1519 char *bp = buf;
1520 int osize = size;
1521
1522 - usbip_dbg_xmit("enter\n");
1523 -
1524 - if (!sock || !buf || !size) {
1525 - pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
1526 - size);
1527 + if (!sock || !buf || !size)
1528 return -EINVAL;
1529 - }
1530 +
1531 + usbip_dbg_xmit("enter\n");
1532
1533 do {
1534 sock->sk->sk_allocation = GFP_NOIO;
1535 @@ -354,11 +351,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
1536 msg.msg_flags = MSG_NOSIGNAL;
1537
1538 result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
1539 - if (result <= 0) {
1540 - pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
1541 - sock, buf, size, result, total);
1542 + if (result <= 0)
1543 goto err;
1544 - }
1545
1546 size -= result;
1547 buf += result;
1548 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1549 index d6dc165e924b..7f161b095176 100644
1550 --- a/drivers/usb/usbip/vhci_hcd.c
1551 +++ b/drivers/usb/usbip/vhci_hcd.c
1552 @@ -506,9 +506,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1553 struct vhci_device *vdev;
1554 unsigned long flags;
1555
1556 - usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
1557 - hcd, urb, mem_flags);
1558 -
1559 if (portnum > VHCI_HC_PORTS) {
1560 pr_err("invalid port number %d\n", portnum);
1561 return -ENODEV;
1562 @@ -671,8 +668,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1563 struct vhci_device *vdev;
1564 unsigned long flags;
1565
1566 - pr_info("dequeue a urb %p\n", urb);
1567 -
1568 spin_lock_irqsave(&vhci->lock, flags);
1569
1570 priv = urb->hcpriv;
1571 @@ -700,7 +695,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1572 /* tcp connection is closed */
1573 spin_lock(&vdev->priv_lock);
1574
1575 - pr_info("device %p seems to be disconnected\n", vdev);
1576 list_del(&priv->list);
1577 kfree(priv);
1578 urb->hcpriv = NULL;
1579 @@ -712,8 +706,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1580 * vhci_rx will receive RET_UNLINK and give back the URB.
1581 * Otherwise, we give back it here.
1582 */
1583 - pr_info("gives back urb %p\n", urb);
1584 -
1585 usb_hcd_unlink_urb_from_ep(hcd, urb);
1586
1587 spin_unlock_irqrestore(&vhci->lock, flags);
1588 @@ -741,8 +733,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1589
1590 unlink->unlink_seqnum = priv->seqnum;
1591
1592 - pr_info("device %p seems to be still connected\n", vdev);
1593 -
1594 /* send cmd_unlink and try to cancel the pending URB in the
1595 * peer */
1596 list_add_tail(&unlink->list, &vdev->unlink_tx);
1597 @@ -823,7 +813,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
1598
1599 /* need this? see stub_dev.c */
1600 if (ud->tcp_socket) {
1601 - pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
1602 + pr_debug("shutdown tcp_socket\n");
1603 kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
1604 }
1605
1606 diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
1607 index fc2d319e2360..5943deeec115 100644
1608 --- a/drivers/usb/usbip/vhci_rx.c
1609 +++ b/drivers/usb/usbip/vhci_rx.c
1610 @@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
1611 urb = priv->urb;
1612 status = urb->status;
1613
1614 - usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
1615 - urb, priv, seqnum);
1616 + usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
1617
1618 switch (status) {
1619 case -ENOENT:
1620 /* fall through */
1621 case -ECONNRESET:
1622 - dev_info(&urb->dev->dev,
1623 - "urb %p was unlinked %ssynchronuously.\n", urb,
1624 - status == -ENOENT ? "" : "a");
1625 + dev_dbg(&urb->dev->dev,
1626 + "urb seq# %u was unlinked %ssynchronuously\n",
1627 + seqnum, status == -ENOENT ? "" : "a");
1628 break;
1629 case -EINPROGRESS:
1630 /* no info output */
1631 break;
1632 default:
1633 - dev_info(&urb->dev->dev,
1634 - "urb %p may be in a error, status %d\n", urb,
1635 - status);
1636 + dev_dbg(&urb->dev->dev,
1637 + "urb seq# %u may be in a error, status %d\n",
1638 + seqnum, status);
1639 }
1640
1641 list_del(&priv->list);
1642 @@ -80,8 +79,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1643 spin_unlock_irqrestore(&vdev->priv_lock, flags);
1644
1645 if (!urb) {
1646 - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
1647 - pr_info("max seqnum %d\n",
1648 + pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
1649 + pdu->base.seqnum,
1650 atomic_read(&vhci->seqnum));
1651 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
1652 return;
1653 @@ -104,7 +103,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1654 if (usbip_dbg_flag_vhci_rx)
1655 usbip_dump_urb(urb);
1656
1657 - usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1658 + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
1659
1660 spin_lock_irqsave(&vhci->lock, flags);
1661 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
1662 @@ -170,7 +169,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1663 pr_info("the urb (seqnum %d) was already given back\n",
1664 pdu->base.seqnum);
1665 } else {
1666 - usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1667 + usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
1668
1669 /* If unlink is successful, status is -ECONNRESET */
1670 urb->status = pdu->u.ret_unlink.status;
1671 diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
1672 index 3e7878fe2fd4..a9a663a578b6 100644
1673 --- a/drivers/usb/usbip/vhci_tx.c
1674 +++ b/drivers/usb/usbip/vhci_tx.c
1675 @@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
1676 memset(&msg, 0, sizeof(msg));
1677 memset(&iov, 0, sizeof(iov));
1678
1679 - usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
1680 + usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
1681 + priv->seqnum);
1682
1683 /* 1. setup usbip_header */
1684 setup_cmd_submit_pdu(&pdu_header, urb);
1685 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
1686 index 965cc5693a46..c9447a689522 100644
1687 --- a/include/linux/cpuhotplug.h
1688 +++ b/include/linux/cpuhotplug.h
1689 @@ -48,7 +48,7 @@ enum cpuhp_state {
1690 CPUHP_ARM_SHMOBILE_SCU_PREPARE,
1691 CPUHP_SH_SH3X_PREPARE,
1692 CPUHP_BLK_MQ_PREPARE,
1693 - CPUHP_TIMERS_DEAD,
1694 + CPUHP_TIMERS_PREPARE,
1695 CPUHP_NOTF_ERR_INJ_PREPARE,
1696 CPUHP_MIPS_SOC_PREPARE,
1697 CPUHP_BRINGUP_CPU,
1698 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
1699 index a0649973ee5b..b9dfca557a6c 100644
1700 --- a/include/linux/ipv6.h
1701 +++ b/include/linux/ipv6.h
1702 @@ -246,7 +246,8 @@ struct ipv6_pinfo {
1703 * 100: prefer care-of address
1704 */
1705 dontfrag:1,
1706 - autoflowlabel:1;
1707 + autoflowlabel:1,
1708 + autoflowlabel_set:1;
1709 __u8 min_hopcount;
1710 __u8 tclass;
1711 __be32 rcv_flowinfo;
1712 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1713 index 6045d4d58065..25ed105bbcfb 100644
1714 --- a/include/linux/mlx5/mlx5_ifc.h
1715 +++ b/include/linux/mlx5/mlx5_ifc.h
1716 @@ -143,7 +143,7 @@ enum {
1717 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
1718 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
1719 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
1720 - MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
1721 + MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
1722 MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
1723 MLX5_CMD_OP_ALLOC_PD = 0x800,
1724 MLX5_CMD_OP_DEALLOC_PD = 0x801,
1725 @@ -6689,7 +6689,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
1726 u8 vxlan_udp_port[0x10];
1727 };
1728
1729 -struct mlx5_ifc_set_rate_limit_out_bits {
1730 +struct mlx5_ifc_set_pp_rate_limit_out_bits {
1731 u8 status[0x8];
1732 u8 reserved_at_8[0x18];
1733
1734 @@ -6698,7 +6698,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
1735 u8 reserved_at_40[0x40];
1736 };
1737
1738 -struct mlx5_ifc_set_rate_limit_in_bits {
1739 +struct mlx5_ifc_set_pp_rate_limit_in_bits {
1740 u8 opcode[0x10];
1741 u8 reserved_at_10[0x10];
1742
1743 @@ -6711,6 +6711,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
1744 u8 reserved_at_60[0x20];
1745
1746 u8 rate_limit[0x20];
1747 +
1748 + u8 reserved_at_a0[0x160];
1749 };
1750
1751 struct mlx5_ifc_access_register_out_bits {
1752 diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
1753 index b83507c0640c..e38f471a5402 100644
1754 --- a/include/linux/ptr_ring.h
1755 +++ b/include/linux/ptr_ring.h
1756 @@ -99,12 +99,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
1757
1758 /* Note: callers invoking this in a loop must use a compiler barrier,
1759 * for example cpu_relax(). Callers must hold producer_lock.
1760 + * Callers are responsible for making sure pointer that is being queued
1761 + * points to a valid data.
1762 */
1763 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
1764 {
1765 if (unlikely(!r->size) || r->queue[r->producer])
1766 return -ENOSPC;
1767
1768 + /* Make sure the pointer we are storing points to a valid data. */
1769 + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
1770 + smp_wmb();
1771 +
1772 r->queue[r->producer++] = ptr;
1773 if (unlikely(r->producer >= r->size))
1774 r->producer = 0;
1775 @@ -244,6 +250,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
1776 if (ptr)
1777 __ptr_ring_discard_one(r);
1778
1779 + /* Make sure anyone accessing data through the pointer is up to date. */
1780 + /* Pairs with smp_wmb in __ptr_ring_produce. */
1781 + smp_read_barrier_depends();
1782 return ptr;
1783 }
1784
1785 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1786 index 647532b0eb03..f50b717ce644 100644
1787 --- a/include/linux/tcp.h
1788 +++ b/include/linux/tcp.h
1789 @@ -219,7 +219,8 @@ struct tcp_sock {
1790 } rack;
1791 u16 advmss; /* Advertised MSS */
1792 u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
1793 - unused:7;
1794 + is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
1795 + unused:6;
1796 u8 nonagle : 4,/* Disable Nagle algorithm? */
1797 thin_lto : 1,/* Use linear timeouts for thin streams */
1798 thin_dupack : 1,/* Fast retransmit on first dupack */
1799 diff --git a/include/linux/timer.h b/include/linux/timer.h
1800 index 51d601f192d4..ec86e4e55ea3 100644
1801 --- a/include/linux/timer.h
1802 +++ b/include/linux/timer.h
1803 @@ -274,9 +274,11 @@ unsigned long round_jiffies_up(unsigned long j);
1804 unsigned long round_jiffies_up_relative(unsigned long j);
1805
1806 #ifdef CONFIG_HOTPLUG_CPU
1807 +int timers_prepare_cpu(unsigned int cpu);
1808 int timers_dead_cpu(unsigned int cpu);
1809 #else
1810 -#define timers_dead_cpu NULL
1811 +#define timers_prepare_cpu NULL
1812 +#define timers_dead_cpu NULL
1813 #endif
1814
1815 #endif
1816 diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
1817 index 4d6ec58a8d45..2edb150f1a4d 100644
1818 --- a/include/linux/vm_event_item.h
1819 +++ b/include/linux/vm_event_item.h
1820 @@ -89,10 +89,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
1821 #endif
1822 #endif
1823 #ifdef CONFIG_DEBUG_TLBFLUSH
1824 -#ifdef CONFIG_SMP
1825 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
1826 NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
1827 -#endif /* CONFIG_SMP */
1828 NR_TLB_LOCAL_FLUSH_ALL,
1829 NR_TLB_LOCAL_FLUSH_ONE,
1830 #endif /* CONFIG_DEBUG_TLBFLUSH */
1831 diff --git a/include/net/ip.h b/include/net/ip.h
1832 index 51c6b9786c46..0e3dcd5a134d 100644
1833 --- a/include/net/ip.h
1834 +++ b/include/net/ip.h
1835 @@ -33,6 +33,8 @@
1836 #include <net/flow.h>
1837 #include <net/flow_dissector.h>
1838
1839 +#define IPV4_MIN_MTU 68 /* RFC 791 */
1840 +
1841 struct sock;
1842
1843 struct inet_skb_parm {
1844 diff --git a/include/net/tcp.h b/include/net/tcp.h
1845 index fba4fc46871d..caf35e062639 100644
1846 --- a/include/net/tcp.h
1847 +++ b/include/net/tcp.h
1848 @@ -1001,7 +1001,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1849 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1850 struct rate_sample *rs);
1851 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1852 - struct skb_mstamp *now, struct rate_sample *rs);
1853 + bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs);
1854 void tcp_rate_check_app_limited(struct sock *sk);
1855
1856 /* These functions determine how the current flow behaves in respect of SACK
1857 diff --git a/kernel/cpu.c b/kernel/cpu.c
1858 index e1436ca4aed0..802eb3361a0a 100644
1859 --- a/kernel/cpu.c
1860 +++ b/kernel/cpu.c
1861 @@ -1309,9 +1309,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1862 * before blk_mq_queue_reinit_notify() from notify_dead(),
1863 * otherwise a RCU stall occurs.
1864 */
1865 - [CPUHP_TIMERS_DEAD] = {
1866 + [CPUHP_TIMERS_PREPARE] = {
1867 .name = "timers:dead",
1868 - .startup.single = NULL,
1869 + .startup.single = timers_prepare_cpu,
1870 .teardown.single = timers_dead_cpu,
1871 },
1872 /* Kicks the plugged cpu into life */
1873 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1874 index 3bcb61b52f6c..dae1a45be504 100644
1875 --- a/kernel/time/tick-sched.c
1876 +++ b/kernel/time/tick-sched.c
1877 @@ -663,6 +663,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
1878 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1879 }
1880
1881 +static inline bool local_timer_softirq_pending(void)
1882 +{
1883 + return local_softirq_pending() & TIMER_SOFTIRQ;
1884 +}
1885 +
1886 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1887 ktime_t now, int cpu)
1888 {
1889 @@ -679,8 +684,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1890 } while (read_seqretry(&jiffies_lock, seq));
1891 ts->last_jiffies = basejiff;
1892
1893 - if (rcu_needs_cpu(basemono, &next_rcu) ||
1894 - arch_needs_cpu() || irq_work_needs_cpu()) {
1895 + /*
1896 + * Keep the periodic tick, when RCU, architecture or irq_work
1897 + * requests it.
1898 + * Aside of that check whether the local timer softirq is
1899 + * pending. If so its a bad idea to call get_next_timer_interrupt()
1900 + * because there is an already expired timer, so it will request
1901 + * immeditate expiry, which rearms the hardware timer with a
1902 + * minimal delta which brings us back to this place
1903 + * immediately. Lather, rinse and repeat...
1904 + */
1905 + if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
1906 + irq_work_needs_cpu() || local_timer_softirq_pending()) {
1907 next_tick = basemono + TICK_NSEC;
1908 } else {
1909 /*
1910 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1911 index 7d670362891a..e872f7f05e8a 100644
1912 --- a/kernel/time/timer.c
1913 +++ b/kernel/time/timer.c
1914 @@ -849,11 +849,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
1915 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
1916
1917 /*
1918 - * If the timer is deferrable and nohz is active then we need to use
1919 - * the deferrable base.
1920 + * If the timer is deferrable and NO_HZ_COMMON is set then we need
1921 + * to use the deferrable base.
1922 */
1923 - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
1924 - (tflags & TIMER_DEFERRABLE))
1925 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
1926 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
1927 return base;
1928 }
1929 @@ -863,11 +862,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
1930 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1931
1932 /*
1933 - * If the timer is deferrable and nohz is active then we need to use
1934 - * the deferrable base.
1935 + * If the timer is deferrable and NO_HZ_COMMON is set then we need
1936 + * to use the deferrable base.
1937 */
1938 - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
1939 - (tflags & TIMER_DEFERRABLE))
1940 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
1941 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
1942 return base;
1943 }
1944 @@ -1021,8 +1019,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1945 if (!ret && pending_only)
1946 goto out_unlock;
1947
1948 - debug_activate(timer, expires);
1949 -
1950 new_base = get_target_base(base, timer->flags);
1951
1952 if (base != new_base) {
1953 @@ -1046,6 +1042,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1954 }
1955 }
1956
1957 + debug_activate(timer, expires);
1958 +
1959 timer->expires = expires;
1960 /*
1961 * If 'idx' was calculated above and the base time did not advance
1962 @@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1963 base->must_forward_clk = false;
1964
1965 __run_timers(base);
1966 - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1967 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1968 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1969 }
1970
1971 @@ -1853,6 +1851,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
1972 }
1973 }
1974
1975 +int timers_prepare_cpu(unsigned int cpu)
1976 +{
1977 + struct timer_base *base;
1978 + int b;
1979 +
1980 + for (b = 0; b < NR_BASES; b++) {
1981 + base = per_cpu_ptr(&timer_bases[b], cpu);
1982 + base->clk = jiffies;
1983 + base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1984 + base->is_idle = false;
1985 + base->must_forward_clk = true;
1986 + }
1987 + return 0;
1988 +}
1989 +
1990 int timers_dead_cpu(unsigned int cpu)
1991 {
1992 struct timer_base *old_base;
1993 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1994 index f5c016e8fc88..3e1d11f4fe44 100644
1995 --- a/kernel/trace/ring_buffer.c
1996 +++ b/kernel/trace/ring_buffer.c
1997 @@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
1998 /* Missed count stored at end */
1999 #define RB_MISSED_STORED (1 << 30)
2000
2001 +#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
2002 +
2003 struct buffer_data_page {
2004 u64 time_stamp; /* page time stamp */
2005 local_t commit; /* write committed index */
2006 @@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
2007 */
2008 size_t ring_buffer_page_len(void *page)
2009 {
2010 - return local_read(&((struct buffer_data_page *)page)->commit)
2011 + struct buffer_data_page *bpage = page;
2012 +
2013 + return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
2014 + BUF_PAGE_HDR_SIZE;
2015 }
2016
2017 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2018 index 4214cd960b8e..15b02645ce8b 100644
2019 --- a/kernel/trace/trace.c
2020 +++ b/kernel/trace/trace.c
2021 @@ -6181,7 +6181,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2022 .spd_release = buffer_spd_release,
2023 };
2024 struct buffer_ref *ref;
2025 - int entries, size, i;
2026 + int entries, i;
2027 ssize_t ret = 0;
2028
2029 #ifdef CONFIG_TRACER_MAX_TRACE
2030 @@ -6232,14 +6232,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2031 break;
2032 }
2033
2034 - /*
2035 - * zero out any left over data, this is going to
2036 - * user land.
2037 - */
2038 - size = ring_buffer_page_len(ref->page);
2039 - if (size < PAGE_SIZE)
2040 - memset(ref->page + size, 0, PAGE_SIZE - size);
2041 -
2042 page = virt_to_page(ref->page);
2043
2044 spd.pages[i] = page;
2045 @@ -6963,6 +6955,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
2046 buf->data = alloc_percpu(struct trace_array_cpu);
2047 if (!buf->data) {
2048 ring_buffer_free(buf->buffer);
2049 + buf->buffer = NULL;
2050 return -ENOMEM;
2051 }
2052
2053 @@ -6986,7 +6979,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
2054 allocate_snapshot ? size : 1);
2055 if (WARN_ON(ret)) {
2056 ring_buffer_free(tr->trace_buffer.buffer);
2057 + tr->trace_buffer.buffer = NULL;
2058 free_percpu(tr->trace_buffer.data);
2059 + tr->trace_buffer.data = NULL;
2060 return -ENOMEM;
2061 }
2062 tr->allocated_snapshot = allocate_snapshot;
2063 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2064 index 5d4006e589cb..4f831225d34f 100644
2065 --- a/net/bridge/br_netlink.c
2066 +++ b/net/bridge/br_netlink.c
2067 @@ -1092,19 +1092,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
2068 struct net_bridge *br = netdev_priv(dev);
2069 int err;
2070
2071 + err = register_netdevice(dev);
2072 + if (err)
2073 + return err;
2074 +
2075 if (tb[IFLA_ADDRESS]) {
2076 spin_lock_bh(&br->lock);
2077 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
2078 spin_unlock_bh(&br->lock);
2079 }
2080
2081 - err = register_netdevice(dev);
2082 - if (err)
2083 - return err;
2084 -
2085 err = br_changelink(dev, tb, data);
2086 if (err)
2087 - unregister_netdevice(dev);
2088 + br_dev_delete(dev, NULL);
2089 +
2090 return err;
2091 }
2092
2093 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2094 index 7001da910c6b..b7efe2f19f83 100644
2095 --- a/net/core/net_namespace.c
2096 +++ b/net/core/net_namespace.c
2097 @@ -263,7 +263,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
2098 spin_lock_irqsave(&net->nsid_lock, flags);
2099 peer = idr_find(&net->netns_ids, id);
2100 if (peer)
2101 - get_net(peer);
2102 + peer = maybe_get_net(peer);
2103 spin_unlock_irqrestore(&net->nsid_lock, flags);
2104 rcu_read_unlock();
2105
2106 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2107 index aec5605944d3..a64515583bc1 100644
2108 --- a/net/core/skbuff.c
2109 +++ b/net/core/skbuff.c
2110 @@ -3823,7 +3823,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
2111 struct sock *sk = skb->sk;
2112
2113 if (!skb_may_tx_timestamp(sk, false))
2114 - return;
2115 + goto err;
2116
2117 /* Take a reference to prevent skb_orphan() from freeing the socket,
2118 * but only if the socket refcount is not zero.
2119 @@ -3832,7 +3832,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
2120 *skb_hwtstamps(skb) = *hwtstamps;
2121 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
2122 sock_put(sk);
2123 + return;
2124 }
2125 +
2126 +err:
2127 + kfree_skb(skb);
2128 }
2129 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
2130
2131 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
2132 index 062a67ca9a21..f08f984ebc56 100644
2133 --- a/net/ipv4/devinet.c
2134 +++ b/net/ipv4/devinet.c
2135 @@ -1380,7 +1380,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
2136
2137 static bool inetdev_valid_mtu(unsigned int mtu)
2138 {
2139 - return mtu >= 68;
2140 + return mtu >= IPV4_MIN_MTU;
2141 }
2142
2143 static void inetdev_send_gratuitous_arp(struct net_device *dev,
2144 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2145 index 968d8e165e3d..ffae472e250a 100644
2146 --- a/net/ipv4/fib_frontend.c
2147 +++ b/net/ipv4/fib_frontend.c
2148 @@ -1253,14 +1253,19 @@ static int __net_init ip_fib_net_init(struct net *net)
2149
2150 static void ip_fib_net_exit(struct net *net)
2151 {
2152 - unsigned int i;
2153 + int i;
2154
2155 rtnl_lock();
2156 #ifdef CONFIG_IP_MULTIPLE_TABLES
2157 RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
2158 RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
2159 #endif
2160 - for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
2161 + /* Destroy the tables in reverse order to guarantee that the
2162 + * local table, ID 255, is destroyed before the main table, ID
2163 + * 254. This is necessary as the local table may contain
2164 + * references to data contained in the main table.
2165 + */
2166 + for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
2167 struct hlist_head *head = &net->ipv4.fib_table_hash[i];
2168 struct hlist_node *tmp;
2169 struct fib_table *tb;
2170 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2171 index 08575e3bd135..7bff0c65046f 100644
2172 --- a/net/ipv4/igmp.c
2173 +++ b/net/ipv4/igmp.c
2174 @@ -89,6 +89,7 @@
2175 #include <linux/rtnetlink.h>
2176 #include <linux/times.h>
2177 #include <linux/pkt_sched.h>
2178 +#include <linux/byteorder/generic.h>
2179
2180 #include <net/net_namespace.h>
2181 #include <net/arp.h>
2182 @@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
2183 return scount;
2184 }
2185
2186 +/* source address selection per RFC 3376 section 4.2.13 */
2187 +static __be32 igmpv3_get_srcaddr(struct net_device *dev,
2188 + const struct flowi4 *fl4)
2189 +{
2190 + struct in_device *in_dev = __in_dev_get_rcu(dev);
2191 +
2192 + if (!in_dev)
2193 + return htonl(INADDR_ANY);
2194 +
2195 + for_ifa(in_dev) {
2196 + if (inet_ifa_match(fl4->saddr, ifa))
2197 + return fl4->saddr;
2198 + } endfor_ifa(in_dev);
2199 +
2200 + return htonl(INADDR_ANY);
2201 +}
2202 +
2203 static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
2204 {
2205 struct sk_buff *skb;
2206 @@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
2207 pip->frag_off = htons(IP_DF);
2208 pip->ttl = 1;
2209 pip->daddr = fl4.daddr;
2210 - pip->saddr = fl4.saddr;
2211 + pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
2212 pip->protocol = IPPROTO_IGMP;
2213 pip->tot_len = 0; /* filled in later */
2214 ip_select_ident(net, skb, NULL);
2215 @@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
2216 }
2217
2218 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
2219 - int type, struct igmpv3_grec **ppgr)
2220 + int type, struct igmpv3_grec **ppgr, unsigned int mtu)
2221 {
2222 struct net_device *dev = pmc->interface->dev;
2223 struct igmpv3_report *pih;
2224 struct igmpv3_grec *pgr;
2225
2226 - if (!skb)
2227 - skb = igmpv3_newpack(dev, dev->mtu);
2228 - if (!skb)
2229 - return NULL;
2230 + if (!skb) {
2231 + skb = igmpv3_newpack(dev, mtu);
2232 + if (!skb)
2233 + return NULL;
2234 + }
2235 pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
2236 pgr->grec_type = type;
2237 pgr->grec_auxwords = 0;
2238 @@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2239 struct igmpv3_grec *pgr = NULL;
2240 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2241 int scount, stotal, first, isquery, truncate;
2242 + unsigned int mtu;
2243
2244 if (pmc->multiaddr == IGMP_ALL_HOSTS)
2245 return skb;
2246 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
2247 return skb;
2248
2249 + mtu = READ_ONCE(dev->mtu);
2250 + if (mtu < IPV4_MIN_MTU)
2251 + return skb;
2252 +
2253 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
2254 type == IGMPV3_MODE_IS_EXCLUDE;
2255 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
2256 @@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2257 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2258 if (skb)
2259 igmpv3_sendpack(skb);
2260 - skb = igmpv3_newpack(dev, dev->mtu);
2261 + skb = igmpv3_newpack(dev, mtu);
2262 }
2263 }
2264 first = 1;
2265 @@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2266 pgr->grec_nsrcs = htons(scount);
2267 if (skb)
2268 igmpv3_sendpack(skb);
2269 - skb = igmpv3_newpack(dev, dev->mtu);
2270 + skb = igmpv3_newpack(dev, mtu);
2271 first = 1;
2272 scount = 0;
2273 }
2274 if (first) {
2275 - skb = add_grhead(skb, pmc, type, &pgr);
2276 + skb = add_grhead(skb, pmc, type, &pgr, mtu);
2277 first = 0;
2278 }
2279 if (!skb)
2280 @@ -538,7 +562,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2281 igmpv3_sendpack(skb);
2282 skb = NULL; /* add_grhead will get a new one */
2283 }
2284 - skb = add_grhead(skb, pmc, type, &pgr);
2285 + skb = add_grhead(skb, pmc, type, &pgr, mtu);
2286 }
2287 }
2288 if (pgr)
2289 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2290 index bd7f1836bb70..96536a0d6e2d 100644
2291 --- a/net/ipv4/ip_tunnel.c
2292 +++ b/net/ipv4/ip_tunnel.c
2293 @@ -346,8 +346,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
2294 dev->needed_headroom = t_hlen + hlen;
2295 mtu -= (dev->hard_header_len + t_hlen);
2296
2297 - if (mtu < 68)
2298 - mtu = 68;
2299 + if (mtu < IPV4_MIN_MTU)
2300 + mtu = IPV4_MIN_MTU;
2301
2302 return mtu;
2303 }
2304 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2305 index 9879b73d5565..59d8770055ed 100644
2306 --- a/net/ipv4/raw.c
2307 +++ b/net/ipv4/raw.c
2308 @@ -502,11 +502,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2309 int err;
2310 struct ip_options_data opt_copy;
2311 struct raw_frag_vec rfv;
2312 + int hdrincl;
2313
2314 err = -EMSGSIZE;
2315 if (len > 0xFFFF)
2316 goto out;
2317
2318 + /* hdrincl should be READ_ONCE(inet->hdrincl)
2319 + * but READ_ONCE() doesn't work with bit fields
2320 + */
2321 + hdrincl = inet->hdrincl;
2322 /*
2323 * Check the flags.
2324 */
2325 @@ -582,7 +587,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2326 /* Linux does not mangle headers on raw sockets,
2327 * so that IP options + IP_HDRINCL is non-sense.
2328 */
2329 - if (inet->hdrincl)
2330 + if (hdrincl)
2331 goto done;
2332 if (ipc.opt->opt.srr) {
2333 if (!daddr)
2334 @@ -604,12 +609,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2335
2336 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
2337 RT_SCOPE_UNIVERSE,
2338 - inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
2339 + hdrincl ? IPPROTO_RAW : sk->sk_protocol,
2340 inet_sk_flowi_flags(sk) |
2341 - (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
2342 + (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
2343 daddr, saddr, 0, 0);
2344
2345 - if (!inet->hdrincl) {
2346 + if (!hdrincl) {
2347 rfv.msg = msg;
2348 rfv.hlen = 0;
2349
2350 @@ -634,7 +639,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2351 goto do_confirm;
2352 back_from_confirm:
2353
2354 - if (inet->hdrincl)
2355 + if (hdrincl)
2356 err = raw_send_hdrinc(sk, &fl4, msg, len,
2357 &rt, msg->msg_flags, &ipc.sockc);
2358
2359 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2360 index dd33c785ce16..05d2bde00864 100644
2361 --- a/net/ipv4/tcp.c
2362 +++ b/net/ipv4/tcp.c
2363 @@ -2297,6 +2297,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2364 tp->snd_cwnd_cnt = 0;
2365 tp->window_clamp = 0;
2366 tcp_set_ca_state(sk, TCP_CA_Open);
2367 + tp->is_sack_reneg = 0;
2368 tcp_clear_retrans(tp);
2369 inet_csk_delack_init(sk);
2370 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2371 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
2372 index cb8db347c680..97f9cac98348 100644
2373 --- a/net/ipv4/tcp_bbr.c
2374 +++ b/net/ipv4/tcp_bbr.c
2375 @@ -81,7 +81,8 @@ struct bbr {
2376 u32 lt_last_lost; /* LT intvl start: tp->lost */
2377 u32 pacing_gain:10, /* current gain for setting pacing rate */
2378 cwnd_gain:10, /* current gain for setting cwnd */
2379 - full_bw_cnt:3, /* number of rounds without large bw gains */
2380 + full_bw_reached:1, /* reached full bw in Startup? */
2381 + full_bw_cnt:2, /* number of rounds without large bw gains */
2382 cycle_idx:3, /* current index in pacing_gain cycle array */
2383 has_seen_rtt:1, /* have we seen an RTT sample yet? */
2384 unused_b:5;
2385 @@ -151,7 +152,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
2386 {
2387 const struct bbr *bbr = inet_csk_ca(sk);
2388
2389 - return bbr->full_bw_cnt >= bbr_full_bw_cnt;
2390 + return bbr->full_bw_reached;
2391 }
2392
2393 /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
2394 @@ -688,6 +689,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
2395 return;
2396 }
2397 ++bbr->full_bw_cnt;
2398 + bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
2399 }
2400
2401 /* If pipe is probably full, drain the queue and then enter steady-state. */
2402 @@ -821,6 +823,7 @@ static void bbr_init(struct sock *sk)
2403 bbr->restore_cwnd = 0;
2404 bbr->round_start = 0;
2405 bbr->idle_restart = 0;
2406 + bbr->full_bw_reached = 0;
2407 bbr->full_bw = 0;
2408 bbr->full_bw_cnt = 0;
2409 bbr->cycle_mstamp.v64 = 0;
2410 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2411 index 05255a286888..2f107e46355c 100644
2412 --- a/net/ipv4/tcp_input.c
2413 +++ b/net/ipv4/tcp_input.c
2414 @@ -1966,6 +1966,8 @@ void tcp_enter_loss(struct sock *sk)
2415 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2416 tp->sacked_out = 0;
2417 tp->fackets_out = 0;
2418 + /* Mark SACK reneging until we recover from this loss event. */
2419 + tp->is_sack_reneg = 1;
2420 }
2421 tcp_clear_all_retrans_hints(tp);
2422
2423 @@ -2463,6 +2465,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2424 return true;
2425 }
2426 tcp_set_ca_state(sk, TCP_CA_Open);
2427 + tp->is_sack_reneg = 0;
2428 return false;
2429 }
2430
2431 @@ -2494,8 +2497,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2432 NET_INC_STATS(sock_net(sk),
2433 LINUX_MIB_TCPSPURIOUSRTOS);
2434 inet_csk(sk)->icsk_retransmits = 0;
2435 - if (frto_undo || tcp_is_sack(tp))
2436 + if (frto_undo || tcp_is_sack(tp)) {
2437 tcp_set_ca_state(sk, TCP_CA_Open);
2438 + tp->is_sack_reneg = 0;
2439 + }
2440 return true;
2441 }
2442 return false;
2443 @@ -3589,6 +3594,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2444 struct tcp_sacktag_state sack_state;
2445 struct rate_sample rs = { .prior_delivered = 0 };
2446 u32 prior_snd_una = tp->snd_una;
2447 + bool is_sack_reneg = tp->is_sack_reneg;
2448 u32 ack_seq = TCP_SKB_CB(skb)->seq;
2449 u32 ack = TCP_SKB_CB(skb)->ack_seq;
2450 bool is_dupack = false;
2451 @@ -3711,7 +3717,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2452 tcp_schedule_loss_probe(sk);
2453 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
2454 lost = tp->lost - lost; /* freshly marked lost */
2455 - tcp_rate_gen(sk, delivered, lost, &now, &rs);
2456 + tcp_rate_gen(sk, delivered, lost, is_sack_reneg, &now, &rs);
2457 tcp_cong_control(sk, ack, delivered, flag, &rs);
2458 tcp_xmit_recovery(sk, rexmit);
2459 return 1;
2460 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2461 index d577ec07a0d8..b3960738464e 100644
2462 --- a/net/ipv4/tcp_ipv4.c
2463 +++ b/net/ipv4/tcp_ipv4.c
2464 @@ -828,7 +828,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2465 tcp_time_stamp,
2466 req->ts_recent,
2467 0,
2468 - tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
2469 + tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
2470 AF_INET),
2471 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
2472 ip_hdr(skb)->tos);
2473 diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
2474 index 9be1581a5a08..18309f58ab8d 100644
2475 --- a/net/ipv4/tcp_rate.c
2476 +++ b/net/ipv4/tcp_rate.c
2477 @@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
2478
2479 /* Update the connection delivery information and generate a rate sample. */
2480 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
2481 - struct skb_mstamp *now, struct rate_sample *rs)
2482 + bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs)
2483 {
2484 struct tcp_sock *tp = tcp_sk(sk);
2485 u32 snd_us, ack_us;
2486 @@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
2487
2488 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
2489 rs->losses = lost; /* freshly marked lost */
2490 - /* Return an invalid sample if no timing information is available. */
2491 - if (!rs->prior_mstamp.v64) {
2492 + /* Return an invalid sample if no timing information is available or
2493 + * in recovery from loss with SACK reneging. Rate samples taken during
2494 + * a SACK reneging event may overestimate bw by including packets that
2495 + * were SACKed before the reneg.
2496 + */
2497 + if (!rs->prior_mstamp.v64 || is_sack_reneg) {
2498 rs->delivered = -1;
2499 rs->interval_us = -1;
2500 return;
2501 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
2502 index 8285a1c108c9..5cad76f87536 100644
2503 --- a/net/ipv6/af_inet6.c
2504 +++ b/net/ipv6/af_inet6.c
2505 @@ -209,7 +209,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
2506 np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
2507 np->mc_loop = 1;
2508 np->pmtudisc = IPV6_PMTUDISC_WANT;
2509 - np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
2510 sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
2511
2512 /* Init the ipv4 part of the socket since we can have sockets
2513 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2514 index 6e01c9a8dfd3..506efba33a89 100644
2515 --- a/net/ipv6/ip6_output.c
2516 +++ b/net/ipv6/ip6_output.c
2517 @@ -156,6 +156,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2518 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
2519 }
2520
2521 +static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2522 +{
2523 + if (!np->autoflowlabel_set)
2524 + return ip6_default_np_autolabel(net);
2525 + else
2526 + return np->autoflowlabel;
2527 +}
2528 +
2529 /*
2530 * xmit an sk_buff (used by TCP, SCTP and DCCP)
2531 * Note : socket lock is not held for SYNACK packets, but might be modified
2532 @@ -219,7 +227,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
2533 hlimit = ip6_dst_hoplimit(dst);
2534
2535 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
2536 - np->autoflowlabel, fl6));
2537 + ip6_autoflowlabel(net, np), fl6));
2538
2539 hdr->payload_len = htons(seg_len);
2540 hdr->nexthdr = proto;
2541 @@ -1691,7 +1699,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
2542
2543 ip6_flow_hdr(hdr, v6_cork->tclass,
2544 ip6_make_flowlabel(net, skb, fl6->flowlabel,
2545 - np->autoflowlabel, fl6));
2546 + ip6_autoflowlabel(net, np), fl6));
2547 hdr->hop_limit = v6_cork->hop_limit;
2548 hdr->nexthdr = proto;
2549 hdr->saddr = fl6->saddr;
2550 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2551 index 12b2fd512f32..11d22d642488 100644
2552 --- a/net/ipv6/ip6_tunnel.c
2553 +++ b/net/ipv6/ip6_tunnel.c
2554 @@ -911,7 +911,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
2555 if (t->parms.collect_md) {
2556 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
2557 if (!tun_dst)
2558 - return 0;
2559 + goto drop;
2560 }
2561 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
2562 log_ecn_error);
2563 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2564 index 38bee173dc2b..6e3871c7f8f7 100644
2565 --- a/net/ipv6/ipv6_sockglue.c
2566 +++ b/net/ipv6/ipv6_sockglue.c
2567 @@ -874,6 +874,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2568 break;
2569 case IPV6_AUTOFLOWLABEL:
2570 np->autoflowlabel = valbool;
2571 + np->autoflowlabel_set = 1;
2572 retv = 0;
2573 break;
2574 }
2575 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2576 index 1bdc703cb966..ca8fac6e5a09 100644
2577 --- a/net/ipv6/mcast.c
2578 +++ b/net/ipv6/mcast.c
2579 @@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
2580 }
2581
2582 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2583 - int type, struct mld2_grec **ppgr)
2584 + int type, struct mld2_grec **ppgr, unsigned int mtu)
2585 {
2586 - struct net_device *dev = pmc->idev->dev;
2587 struct mld2_report *pmr;
2588 struct mld2_grec *pgr;
2589
2590 - if (!skb)
2591 - skb = mld_newpack(pmc->idev, dev->mtu);
2592 - if (!skb)
2593 - return NULL;
2594 + if (!skb) {
2595 + skb = mld_newpack(pmc->idev, mtu);
2596 + if (!skb)
2597 + return NULL;
2598 + }
2599 pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
2600 pgr->grec_type = type;
2601 pgr->grec_auxwords = 0;
2602 @@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2603 struct mld2_grec *pgr = NULL;
2604 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2605 int scount, stotal, first, isquery, truncate;
2606 + unsigned int mtu;
2607
2608 if (pmc->mca_flags & MAF_NOREPORT)
2609 return skb;
2610
2611 + mtu = READ_ONCE(dev->mtu);
2612 + if (mtu < IPV6_MIN_MTU)
2613 + return skb;
2614 +
2615 isquery = type == MLD2_MODE_IS_INCLUDE ||
2616 type == MLD2_MODE_IS_EXCLUDE;
2617 truncate = type == MLD2_MODE_IS_EXCLUDE ||
2618 @@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2619 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2620 if (skb)
2621 mld_sendpack(skb);
2622 - skb = mld_newpack(idev, dev->mtu);
2623 + skb = mld_newpack(idev, mtu);
2624 }
2625 }
2626 first = 1;
2627 @@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2628 pgr->grec_nsrcs = htons(scount);
2629 if (skb)
2630 mld_sendpack(skb);
2631 - skb = mld_newpack(idev, dev->mtu);
2632 + skb = mld_newpack(idev, mtu);
2633 first = 1;
2634 scount = 0;
2635 }
2636 if (first) {
2637 - skb = add_grhead(skb, pmc, type, &pgr);
2638 + skb = add_grhead(skb, pmc, type, &pgr, mtu);
2639 first = 0;
2640 }
2641 if (!skb)
2642 @@ -1814,7 +1819,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2643 mld_sendpack(skb);
2644 skb = NULL; /* add_grhead will get a new one */
2645 }
2646 - skb = add_grhead(skb, pmc, type, &pgr);
2647 + skb = add_grhead(skb, pmc, type, &pgr, mtu);
2648 }
2649 }
2650 if (pgr)
2651 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2652 index 7ac2365aa6fb..eb624547382f 100644
2653 --- a/net/ipv6/tcp_ipv6.c
2654 +++ b/net/ipv6/tcp_ipv6.c
2655 @@ -962,7 +962,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2656 tcp_rsk(req)->rcv_nxt,
2657 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2658 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
2659 - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
2660 + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
2661 0, 0);
2662 }
2663
2664 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2665 index 1ff497bd9c20..e1c123d4cdda 100644
2666 --- a/net/netlink/af_netlink.c
2667 +++ b/net/netlink/af_netlink.c
2668 @@ -261,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
2669 struct sock *sk = skb->sk;
2670 int ret = -ENOMEM;
2671
2672 + if (!net_eq(dev_net(dev), sock_net(sk)))
2673 + return 0;
2674 +
2675 dev_hold(dev);
2676
2677 if (is_vmalloc_addr(skb->head))
2678 diff --git a/net/rds/send.c b/net/rds/send.c
2679 index ad247dc71ebb..ef53d164e146 100644
2680 --- a/net/rds/send.c
2681 +++ b/net/rds/send.c
2682 @@ -1006,6 +1006,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
2683 continue;
2684
2685 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
2686 + if (cmsg->cmsg_len <
2687 + CMSG_LEN(sizeof(struct rds_rdma_args)))
2688 + return -EINVAL;
2689 args = CMSG_DATA(cmsg);
2690 *rdma_bytes += args->remote_vec.bytes;
2691 }
2692 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2693 index c2ab864da50d..7181ce6c62bf 100644
2694 --- a/net/sctp/socket.c
2695 +++ b/net/sctp/socket.c
2696 @@ -4246,7 +4246,7 @@ static int sctp_init_sock(struct sock *sk)
2697 SCTP_DBG_OBJCNT_INC(sock);
2698
2699 local_bh_disable();
2700 - percpu_counter_inc(&sctp_sockets_allocated);
2701 + sk_sockets_allocated_inc(sk);
2702 sock_prot_inuse_add(net, sk->sk_prot, 1);
2703
2704 /* Nothing can fail after this block, otherwise
2705 @@ -4290,7 +4290,7 @@ static void sctp_destroy_sock(struct sock *sk)
2706 }
2707 sctp_endpoint_free(sp->ep);
2708 local_bh_disable();
2709 - percpu_counter_dec(&sctp_sockets_allocated);
2710 + sk_sockets_allocated_dec(sk);
2711 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2712 local_bh_enable();
2713 }
2714 diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
2715 index c9af022676c2..47c3e97c3136 100644
2716 --- a/sound/hda/hdac_i915.c
2717 +++ b/sound/hda/hdac_i915.c
2718 @@ -319,7 +319,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
2719 */
2720 int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
2721 {
2722 - if (WARN_ON(!hdac_acomp))
2723 + if (!hdac_acomp)
2724 return -ENODEV;
2725
2726 hdac_acomp->audio_ops = aops;
2727 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2728 index ba40596b9d92..4ef3b0067876 100644
2729 --- a/sound/pci/hda/patch_realtek.c
2730 +++ b/sound/pci/hda/patch_realtek.c
2731 @@ -5971,6 +5971,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2732 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2733 {0x1b, 0x01011020},
2734 {0x21, 0x02211010}),
2735 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2736 + {0x12, 0x90a60130},
2737 + {0x14, 0x90170110},
2738 + {0x1b, 0x01011020},
2739 + {0x21, 0x0221101f}),
2740 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2741 {0x12, 0x90a60160},
2742 {0x14, 0x90170120},
2743 diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
2744 index c69e97654fc6..f88632426c0a 100644
2745 --- a/sound/soc/codecs/da7218.c
2746 +++ b/sound/soc/codecs/da7218.c
2747 @@ -2519,7 +2519,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
2748 }
2749
2750 if (da7218->dev_id == DA7218_DEV_ID) {
2751 - hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
2752 + hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
2753 if (!hpldet_np)
2754 return pdata;
2755
2756 diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
2757 index 5acd5b69fb83..f9b6c5a81b47 100644
2758 --- a/sound/soc/codecs/tlv320aic31xx.h
2759 +++ b/sound/soc/codecs/tlv320aic31xx.h
2760 @@ -115,7 +115,7 @@ struct aic31xx_pdata {
2761 /* INT2 interrupt control */
2762 #define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
2763 /* GPIO1 control */
2764 -#define AIC31XX_GPIO1 AIC31XX_REG(0, 50)
2765 +#define AIC31XX_GPIO1 AIC31XX_REG(0, 51)
2766
2767 #define AIC31XX_DACPRB AIC31XX_REG(0, 60)
2768 /* ADC Instruction Set Register */
2769 diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
2770 index a2104d68169d..26fd6a664b9b 100644
2771 --- a/sound/soc/codecs/twl4030.c
2772 +++ b/sound/soc/codecs/twl4030.c
2773 @@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2774 struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
2775 struct device_node *twl4030_codec_node = NULL;
2776
2777 - twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
2778 + twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
2779 "codec");
2780
2781 if (!pdata && twl4030_codec_node) {
2782 @@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2783 GFP_KERNEL);
2784 if (!pdata) {
2785 dev_err(codec->dev, "Can not allocate memory\n");
2786 + of_node_put(twl4030_codec_node);
2787 return NULL;
2788 }
2789 twl4030_setup_pdata_of(pdata, twl4030_codec_node);
2790 + of_node_put(twl4030_codec_node);
2791 }
2792
2793 return pdata;
2794 diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
2795 index 757af795cebd..c03c9da076c2 100644
2796 --- a/sound/soc/codecs/wm_adsp.c
2797 +++ b/sound/soc/codecs/wm_adsp.c
2798 @@ -1465,7 +1465,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
2799 le64_to_cpu(footer->timestamp));
2800
2801 while (pos < firmware->size &&
2802 - pos - firmware->size > sizeof(*region)) {
2803 + sizeof(*region) < firmware->size - pos) {
2804 region = (void *)&(firmware->data[pos]);
2805 region_name = "Unknown";
2806 reg = 0;
2807 @@ -1526,8 +1526,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
2808 regions, le32_to_cpu(region->len), offset,
2809 region_name);
2810
2811 - if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
2812 - firmware->size) {
2813 + if (le32_to_cpu(region->len) >
2814 + firmware->size - pos - sizeof(*region)) {
2815 adsp_err(dsp,
2816 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2817 file, regions, region_name,
2818 @@ -1992,7 +1992,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2819
2820 blocks = 0;
2821 while (pos < firmware->size &&
2822 - pos - firmware->size > sizeof(*blk)) {
2823 + sizeof(*blk) < firmware->size - pos) {
2824 blk = (void *)(&firmware->data[pos]);
2825
2826 type = le16_to_cpu(blk->type);
2827 @@ -2066,8 +2066,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2828 }
2829
2830 if (reg) {
2831 - if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
2832 - firmware->size) {
2833 + if (le32_to_cpu(blk->len) >
2834 + firmware->size - pos - sizeof(*blk)) {
2835 adsp_err(dsp,
2836 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2837 file, blocks, region_name,
2838 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
2839 index fde08660b63b..1c03490e1182 100644
2840 --- a/sound/soc/fsl/fsl_ssi.c
2841 +++ b/sound/soc/fsl/fsl_ssi.c
2842 @@ -1467,12 +1467,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2843 sizeof(fsl_ssi_ac97_dai));
2844
2845 fsl_ac97_data = ssi_private;
2846 -
2847 - ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2848 - if (ret) {
2849 - dev_err(&pdev->dev, "could not set AC'97 ops\n");
2850 - return ret;
2851 - }
2852 } else {
2853 /* Initialize this copy of the CPU DAI driver structure */
2854 memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
2855 @@ -1583,6 +1577,14 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2856 return ret;
2857 }
2858
2859 + if (fsl_ssi_is_ac97(ssi_private)) {
2860 + ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2861 + if (ret) {
2862 + dev_err(&pdev->dev, "could not set AC'97 ops\n");
2863 + goto error_ac97_ops;
2864 + }
2865 + }
2866 +
2867 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
2868 &ssi_private->cpu_dai_drv, 1);
2869 if (ret) {
2870 @@ -1666,6 +1668,10 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2871 fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
2872
2873 error_asoc_register:
2874 + if (fsl_ssi_is_ac97(ssi_private))
2875 + snd_soc_set_ac97_ops(NULL);
2876 +
2877 +error_ac97_ops:
2878 if (ssi_private->soc->imx)
2879 fsl_ssi_imx_clean(pdev, ssi_private);
2880
2881 diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2882 index 767be7c76034..1754e094bc28 100644
2883 --- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2884 +++ b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2885 @@ -896,7 +896,7 @@ EndTable
2886
2887 GrpTable: Grp3_1
2888 0: TEST Eb,Ib
2889 -1:
2890 +1: TEST Eb,Ib
2891 2: NOT Eb
2892 3: NEG Eb
2893 4: MUL AL,Eb
2894 diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
2895 index 2b3d6d235015..3d7b42e77299 100644
2896 --- a/tools/usb/usbip/src/utils.c
2897 +++ b/tools/usb/usbip/src/utils.c
2898 @@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
2899 char command[SYSFS_BUS_ID_SIZE + 4];
2900 char match_busid_attr_path[SYSFS_PATH_MAX];
2901 int rc;
2902 + int cmd_size;
2903
2904 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
2905 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
2906 @@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
2907 attr_name);
2908
2909 if (add)
2910 - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
2911 + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
2912 + busid);
2913 else
2914 - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
2915 + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
2916 + busid);
2917
2918 rc = write_sysfs_attribute(match_busid_attr_path, command,
2919 - sizeof(command));
2920 + cmd_size);
2921 if (rc < 0) {
2922 dbg("failed to write match_busid: %s", strerror(errno));
2923 return -1;