Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0100-4.4.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2792 - (show annotations) (download)
Mon May 30 12:45:25 2016 UTC (7 years, 11 months ago) by niro
File size: 87375 byte(s)
-linux-4.4.1
1 diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
2 index 3a4abfc44f5e..136ba17d2da0 100644
3 --- a/Documentation/ABI/testing/sysfs-bus-usb
4 +++ b/Documentation/ABI/testing/sysfs-bus-usb
5 @@ -134,19 +134,21 @@ Description:
6 enabled for the device. Developer can write y/Y/1 or n/N/0 to
7 the file to enable/disable the feature.
8
9 -What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm
10 -Date: June 2015
11 +What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u1
12 + /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u2
13 +Date: November 2015
14 Contact: Kevin Strasser <kevin.strasser@linux.intel.com>
15 + Lu Baolu <baolu.lu@linux.intel.com>
16 Description:
17 If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
18 in to a xHCI host which supports link PM, it will check if U1
19 and U2 exit latencies have been set in the BOS descriptor; if
20 - the check is is passed and the host supports USB3 hardware LPM,
21 + the check is passed and the host supports USB3 hardware LPM,
22 USB3 hardware LPM will be enabled for the device and the USB
23 - device directory will contain a file named
24 - power/usb3_hardware_lpm. The file holds a string value (enable
25 - or disable) indicating whether or not USB3 hardware LPM is
26 - enabled for the device.
27 + device directory will contain two files named
28 + power/usb3_hardware_lpm_u1 and power/usb3_hardware_lpm_u2. These
29 + files hold a string value (enable or disable) indicating whether
30 + or not USB3 hardware LPM U1 or U2 is enabled for the device.
31
32 What: /sys/bus/usb/devices/.../removable
33 Date: February 2012
34 diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
35 index 4a15c90bc11d..0a94ffe17ab6 100644
36 --- a/Documentation/usb/power-management.txt
37 +++ b/Documentation/usb/power-management.txt
38 @@ -537,17 +537,18 @@ relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
39 can write y/Y/1 or n/N/0 to the file to enable/disable
40 USB2 hardware LPM manually. This is for test purpose mainly.
41
42 - power/usb3_hardware_lpm
43 + power/usb3_hardware_lpm_u1
44 + power/usb3_hardware_lpm_u2
45
46 When a USB 3.0 lpm-capable device is plugged in to a
47 xHCI host which supports link PM, it will check if U1
48 and U2 exit latencies have been set in the BOS
49 descriptor; if the check is is passed and the host
50 supports USB3 hardware LPM, USB3 hardware LPM will be
51 - enabled for the device and this file will be created.
52 - The file holds a string value (enable or disable)
53 - indicating whether or not USB3 hardware LPM is
54 - enabled for the device.
55 + enabled for the device and these files will be created.
56 + The files hold a string value (enable or disable)
57 + indicating whether or not USB3 hardware LPM U1 or U2
58 + is enabled for the device.
59
60 USB Port Power Control
61 ----------------------
62 diff --git a/Makefile b/Makefile
63 index 70dea02f1346..c6a265b52c93 100644
64 --- a/Makefile
65 +++ b/Makefile
66 @@ -1,6 +1,6 @@
67 VERSION = 4
68 PATCHLEVEL = 4
69 -SUBLEVEL = 0
70 +SUBLEVEL = 1
71 EXTRAVERSION =
72 NAME = Blurry Fish Butt
73
74 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
75 index 23cfc08fc8ba..b685257926f0 100644
76 --- a/arch/arm64/kernel/head.S
77 +++ b/arch/arm64/kernel/head.S
78 @@ -512,9 +512,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
79 #endif
80
81 /* EL2 debug */
82 + mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
83 + sbfx x0, x0, #8, #4
84 + cmp x0, #1
85 + b.lt 4f // Skip if no PMU present
86 mrs x0, pmcr_el0 // Disable debug access traps
87 ubfx x0, x0, #11, #5 // to EL2 and allow access to
88 msr mdcr_el2, x0 // all PMU counters from EL1
89 +4:
90
91 /* Stage-2 translation */
92 msr vttbr_el2, xzr
93 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
94 index 5b1897e8ca24..62d3dc60ca09 100644
95 --- a/arch/arm64/kernel/perf_event.c
96 +++ b/arch/arm64/kernel/perf_event.c
97 @@ -574,9 +574,6 @@ static void armv8pmu_reset(void *info)
98
99 /* Initialize & Reset PMNC: C and P bits. */
100 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
101 -
102 - /* Disable access from userspace. */
103 - asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
104 }
105
106 static int armv8_pmuv3_map_event(struct perf_event *event)
107 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
108 index 1971f491bb90..ff7f13239515 100644
109 --- a/arch/arm64/kernel/ptrace.c
110 +++ b/arch/arm64/kernel/ptrace.c
111 @@ -58,6 +58,12 @@
112 */
113 void ptrace_disable(struct task_struct *child)
114 {
115 + /*
116 + * This would be better off in core code, but PTRACE_DETACH has
117 + * grown its fair share of arch-specific worts and changing it
118 + * is likely to cause regressions on obscure architectures.
119 + */
120 + user_disable_single_step(child);
121 }
122
123 #ifdef CONFIG_HAVE_HW_BREAKPOINT
124 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
125 index 873e363048c6..116ad654dd59 100644
126 --- a/arch/arm64/mm/mmu.c
127 +++ b/arch/arm64/mm/mmu.c
128 @@ -456,6 +456,9 @@ void __init paging_init(void)
129
130 empty_zero_page = virt_to_page(zero_page);
131
132 + /* Ensure the zero page is visible to the page table walker */
133 + dsb(ishst);
134 +
135 /*
136 * TTBR0 is only used for the identity mapping at this stage. Make it
137 * point to zero page to avoid speculatively fetching new entries.
138 diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
139 index 4c4d93c4bf65..d69dffffaa89 100644
140 --- a/arch/arm64/mm/proc-macros.S
141 +++ b/arch/arm64/mm/proc-macros.S
142 @@ -62,3 +62,15 @@
143 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
144 #endif
145 .endm
146 +
147 +/*
148 + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
149 + */
150 + .macro reset_pmuserenr_el0, tmpreg
151 + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
152 + sbfx \tmpreg, \tmpreg, #8, #4
153 + cmp \tmpreg, #1 // Skip if no PMU present
154 + b.lt 9000f
155 + msr pmuserenr_el0, xzr // Disable PMU access from EL0
156 +9000:
157 + .endm
158 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
159 index cacecc4ad3e5..b8f04b3f2786 100644
160 --- a/arch/arm64/mm/proc.S
161 +++ b/arch/arm64/mm/proc.S
162 @@ -117,6 +117,7 @@ ENTRY(cpu_do_resume)
163 */
164 ubfx x11, x11, #1, #1
165 msr oslar_el1, x11
166 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0
167 mov x0, x12
168 dsb nsh // Make sure local tlb invalidation completed
169 isb
170 @@ -155,6 +156,7 @@ ENTRY(__cpu_setup)
171 msr cpacr_el1, x0 // Enable FP/ASIMD
172 mov x0, #1 << 12 // Reset mdscr_el1 and disable
173 msr mdscr_el1, x0 // access to the DCC from EL0
174 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0
175 /*
176 * Memory region attributes for LPAE:
177 *
178 diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
179 index ad6263cffb0f..d1a8d93cccfd 100644
180 --- a/arch/powerpc/include/asm/cmpxchg.h
181 +++ b/arch/powerpc/include/asm/cmpxchg.h
182 @@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
183 unsigned long prev;
184
185 __asm__ __volatile__(
186 - PPC_RELEASE_BARRIER
187 + PPC_ATOMIC_ENTRY_BARRIER
188 "1: lwarx %0,0,%2 \n"
189 PPC405_ERR77(0,%2)
190 " stwcx. %3,0,%2 \n\
191 bne- 1b"
192 - PPC_ACQUIRE_BARRIER
193 + PPC_ATOMIC_EXIT_BARRIER
194 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
195 : "r" (p), "r" (val)
196 : "cc", "memory");
197 @@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
198 unsigned long prev;
199
200 __asm__ __volatile__(
201 - PPC_RELEASE_BARRIER
202 + PPC_ATOMIC_ENTRY_BARRIER
203 "1: ldarx %0,0,%2 \n"
204 PPC405_ERR77(0,%2)
205 " stdcx. %3,0,%2 \n\
206 bne- 1b"
207 - PPC_ACQUIRE_BARRIER
208 + PPC_ATOMIC_EXIT_BARRIER
209 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
210 : "r" (p), "r" (val)
211 : "cc", "memory");
212 @@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
213 unsigned int prev;
214
215 __asm__ __volatile__ (
216 - PPC_RELEASE_BARRIER
217 + PPC_ATOMIC_ENTRY_BARRIER
218 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
219 cmpw 0,%0,%3\n\
220 bne- 2f\n"
221 PPC405_ERR77(0,%2)
222 " stwcx. %4,0,%2\n\
223 bne- 1b"
224 - PPC_ACQUIRE_BARRIER
225 + PPC_ATOMIC_EXIT_BARRIER
226 "\n\
227 2:"
228 : "=&r" (prev), "+m" (*p)
229 @@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
230 unsigned long prev;
231
232 __asm__ __volatile__ (
233 - PPC_RELEASE_BARRIER
234 + PPC_ATOMIC_ENTRY_BARRIER
235 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
236 cmpd 0,%0,%3\n\
237 bne- 2f\n\
238 stdcx. %4,0,%2\n\
239 bne- 1b"
240 - PPC_ACQUIRE_BARRIER
241 + PPC_ATOMIC_EXIT_BARRIER
242 "\n\
243 2:"
244 : "=&r" (prev), "+m" (*p)
245 diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
246 index e682a7143edb..c50868681f9e 100644
247 --- a/arch/powerpc/include/asm/synch.h
248 +++ b/arch/powerpc/include/asm/synch.h
249 @@ -44,7 +44,7 @@ static inline void isync(void)
250 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
251 #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
252 #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
253 -#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
254 +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
255 #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
256 #else
257 #define PPC_ACQUIRE_BARRIER
258 diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
259 index 59dad113897b..c2d21d11c2d2 100644
260 --- a/arch/powerpc/include/uapi/asm/elf.h
261 +++ b/arch/powerpc/include/uapi/asm/elf.h
262 @@ -295,6 +295,8 @@ do { \
263 #define R_PPC64_TLSLD 108
264 #define R_PPC64_TOCSAVE 109
265
266 +#define R_PPC64_ENTRY 118
267 +
268 #define R_PPC64_REL16 249
269 #define R_PPC64_REL16_LO 250
270 #define R_PPC64_REL16_HI 251
271 diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
272 index 68384514506b..59663af9315f 100644
273 --- a/arch/powerpc/kernel/module_64.c
274 +++ b/arch/powerpc/kernel/module_64.c
275 @@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
276 */
277 break;
278
279 + case R_PPC64_ENTRY:
280 + /*
281 + * Optimize ELFv2 large code model entry point if
282 + * the TOC is within 2GB range of current location.
283 + */
284 + value = my_r2(sechdrs, me) - (unsigned long)location;
285 + if (value + 0x80008000 > 0xffffffff)
286 + break;
287 + /*
288 + * Check for the large code model prolog sequence:
289 + * ld r2, ...(r12)
290 + * add r2, r2, r12
291 + */
292 + if ((((uint32_t *)location)[0] & ~0xfffc)
293 + != 0xe84c0000)
294 + break;
295 + if (((uint32_t *)location)[1] != 0x7c426214)
296 + break;
297 + /*
298 + * If found, replace it with:
299 + * addis r2, r12, (.TOC.-func)@ha
300 + * addi r2, r12, (.TOC.-func)@l
301 + */
302 + ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
303 + ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
304 + break;
305 +
306 case R_PPC64_REL16_HA:
307 /* Subtract location pointer */
308 value -= (unsigned long)location;
309 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
310 index 646bf4d222c1..ef2ad2d682da 100644
311 --- a/arch/powerpc/kernel/process.c
312 +++ b/arch/powerpc/kernel/process.c
313 @@ -569,6 +569,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
314 if (!MSR_TM_SUSPENDED(mfmsr()))
315 return;
316
317 + /*
318 + * Use the current MSR TM suspended bit to track if we have
319 + * checkpointed state outstanding.
320 + * On signal delivery, we'd normally reclaim the checkpointed
321 + * state to obtain stack pointer (see:get_tm_stackpointer()).
322 + * This will then directly return to userspace without going
323 + * through __switch_to(). However, if the stack frame is bad,
324 + * we need to exit this thread which calls __switch_to() which
325 + * will again attempt to reclaim the already saved tm state.
326 + * Hence we need to check that we've not already reclaimed
327 + * this state.
328 + * We do this using the current MSR, rather tracking it in
329 + * some specific thread_struct bit, as it has the additional
330 + * benifit of checking for a potential TM bad thing exception.
331 + */
332 + if (!MSR_TM_SUSPENDED(mfmsr()))
333 + return;
334 +
335 tm_reclaim(thr, thr->regs->msr, cause);
336
337 /* Having done the reclaim, we now have the checkpointed
338 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
339 index 4fa687a47a62..6b8d6e8cd449 100644
340 --- a/arch/x86/include/asm/boot.h
341 +++ b/arch/x86/include/asm/boot.h
342 @@ -27,7 +27,7 @@
343 #define BOOT_HEAP_SIZE 0x400000
344 #else /* !CONFIG_KERNEL_BZIP2 */
345
346 -#define BOOT_HEAP_SIZE 0x8000
347 +#define BOOT_HEAP_SIZE 0x10000
348
349 #endif /* !CONFIG_KERNEL_BZIP2 */
350
351 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
352 index 379cd3658799..bfd9b2a35a0b 100644
353 --- a/arch/x86/include/asm/mmu_context.h
354 +++ b/arch/x86/include/asm/mmu_context.h
355 @@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
356 #endif
357 cpumask_set_cpu(cpu, mm_cpumask(next));
358
359 - /* Re-load page tables */
360 + /*
361 + * Re-load page tables.
362 + *
363 + * This logic has an ordering constraint:
364 + *
365 + * CPU 0: Write to a PTE for 'next'
366 + * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
367 + * CPU 1: set bit 1 in next's mm_cpumask
368 + * CPU 1: load from the PTE that CPU 0 writes (implicit)
369 + *
370 + * We need to prevent an outcome in which CPU 1 observes
371 + * the new PTE value and CPU 0 observes bit 1 clear in
372 + * mm_cpumask. (If that occurs, then the IPI will never
373 + * be sent, and CPU 0's TLB will contain a stale entry.)
374 + *
375 + * The bad outcome can occur if either CPU's load is
376 + * reordered before that CPU's store, so both CPUs must
377 + * execute full barriers to prevent this from happening.
378 + *
379 + * Thus, switch_mm needs a full barrier between the
380 + * store to mm_cpumask and any operation that could load
381 + * from next->pgd. TLB fills are special and can happen
382 + * due to instruction fetches or for no reason at all,
383 + * and neither LOCK nor MFENCE orders them.
384 + * Fortunately, load_cr3() is serializing and gives the
385 + * ordering guarantee we need.
386 + *
387 + */
388 load_cr3(next->pgd);
389 +
390 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
391
392 /* Stop flush ipis for the previous mm */
393 @@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
394 * schedule, protecting us from simultaneous changes.
395 */
396 cpumask_set_cpu(cpu, mm_cpumask(next));
397 +
398 /*
399 * We were in lazy tlb mode and leave_mm disabled
400 * tlb flush IPI delivery. We must reload CR3
401 * to make sure to use no freed page tables.
402 + *
403 + * As above, load_cr3() is serializing and orders TLB
404 + * fills with respect to the mm_cpumask write.
405 */
406 load_cr3(next->pgd);
407 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
408 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
409 index 02693dd9a079..f660d63f40fe 100644
410 --- a/arch/x86/kernel/reboot.c
411 +++ b/arch/x86/kernel/reboot.c
412 @@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
413 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
414 },
415 },
416 + { /* Handle problems with rebooting on the iMac10,1. */
417 + .callback = set_pci_reboot,
418 + .ident = "Apple iMac10,1",
419 + .matches = {
420 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
421 + DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
422 + },
423 + },
424
425 /* ASRock */
426 { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
427 diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
428 index 120302511802..ab9ae67a80e4 100644
429 --- a/arch/x86/kvm/trace.h
430 +++ b/arch/x86/kvm/trace.h
431 @@ -268,7 +268,7 @@ TRACE_EVENT(kvm_inj_virq,
432 #define kvm_trace_sym_exc \
433 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
434 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
435 - EXS(MF), EXS(MC)
436 + EXS(MF), EXS(AC), EXS(MC)
437
438 /*
439 * Tracepoint for kvm interrupt injection:
440 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
441 index 44976a596fa6..10e7693b3540 100644
442 --- a/arch/x86/kvm/vmx.c
443 +++ b/arch/x86/kvm/vmx.c
444 @@ -8932,7 +8932,8 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
445 best->ebx &= ~bit(X86_FEATURE_INVPCID);
446 }
447
448 - vmcs_set_secondary_exec_control(secondary_exec_ctl);
449 + if (cpu_has_secondary_exec_ctrls())
450 + vmcs_set_secondary_exec_control(secondary_exec_ctl);
451
452 if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
453 if (guest_cpuid_has_pcommit(vcpu))
454 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
455 index 97592e190413..9a2ed8904513 100644
456 --- a/arch/x86/kvm/x86.c
457 +++ b/arch/x86/kvm/x86.c
458 @@ -951,7 +951,7 @@ static u32 msrs_to_save[] = {
459 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
460 #endif
461 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
462 - MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
463 + MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
464 };
465
466 static unsigned num_msrs_to_save;
467 @@ -4006,16 +4006,17 @@ static void kvm_init_msr_list(void)
468
469 /*
470 * Even MSRs that are valid in the host may not be exposed
471 - * to the guests in some cases. We could work around this
472 - * in VMX with the generic MSR save/load machinery, but it
473 - * is not really worthwhile since it will really only
474 - * happen with nested virtualization.
475 + * to the guests in some cases.
476 */
477 switch (msrs_to_save[i]) {
478 case MSR_IA32_BNDCFGS:
479 if (!kvm_x86_ops->mpx_supported())
480 continue;
481 break;
482 + case MSR_TSC_AUX:
483 + if (!kvm_x86_ops->rdtscp_supported())
484 + continue;
485 + break;
486 default:
487 break;
488 }
489 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
490 index 8ddb5d0d66fb..8f4cc3dfac32 100644
491 --- a/arch/x86/mm/tlb.c
492 +++ b/arch/x86/mm/tlb.c
493 @@ -161,7 +161,10 @@ void flush_tlb_current_task(void)
494 preempt_disable();
495
496 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
497 +
498 + /* This is an implicit full barrier that synchronizes with switch_mm. */
499 local_flush_tlb();
500 +
501 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
502 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
503 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
504 @@ -188,17 +191,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
505 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
506
507 preempt_disable();
508 - if (current->active_mm != mm)
509 + if (current->active_mm != mm) {
510 + /* Synchronize with switch_mm. */
511 + smp_mb();
512 +
513 goto out;
514 + }
515
516 if (!current->mm) {
517 leave_mm(smp_processor_id());
518 +
519 + /* Synchronize with switch_mm. */
520 + smp_mb();
521 +
522 goto out;
523 }
524
525 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
526 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
527
528 + /*
529 + * Both branches below are implicit full barriers (MOV to CR or
530 + * INVLPG) that synchronize with switch_mm.
531 + */
532 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
533 base_pages_to_flush = TLB_FLUSH_ALL;
534 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
535 @@ -228,10 +243,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
536 preempt_disable();
537
538 if (current->active_mm == mm) {
539 - if (current->mm)
540 + if (current->mm) {
541 + /*
542 + * Implicit full barrier (INVLPG) that synchronizes
543 + * with switch_mm.
544 + */
545 __flush_tlb_one(start);
546 - else
547 + } else {
548 leave_mm(smp_processor_id());
549 +
550 + /* Synchronize with switch_mm. */
551 + smp_mb();
552 + }
553 }
554
555 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
556 diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
557 index df0c40559583..7f664c416faf 100644
558 --- a/arch/x86/xen/suspend.c
559 +++ b/arch/x86/xen/suspend.c
560 @@ -34,7 +34,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
561 {
562 #ifdef CONFIG_XEN_PVHVM
563 int cpu;
564 - xen_hvm_init_shared_info();
565 + if (!suspend_cancelled)
566 + xen_hvm_init_shared_info();
567 xen_callback_vector();
568 xen_unplug_emulated_devices();
569 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
570 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
571 index 3dfd287256d6..92ddae101ecc 100644
572 --- a/drivers/infiniband/hw/mlx5/cq.c
573 +++ b/drivers/infiniband/hw/mlx5/cq.c
574 @@ -756,7 +756,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
575 int uninitialized_var(index);
576 int uninitialized_var(inlen);
577 int cqe_size;
578 - int irqn;
579 + unsigned int irqn;
580 int eqn;
581 int err;
582
583 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
584 index 9e0f8a7ef8b1..f1692e418fe4 100644
585 --- a/drivers/net/bonding/bond_main.c
586 +++ b/drivers/net/bonding/bond_main.c
587 @@ -1207,7 +1207,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
588 err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
589 if (err)
590 return err;
591 - slave_dev->flags |= IFF_SLAVE;
592 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
593 return 0;
594 }
595 @@ -1465,6 +1464,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
596 }
597 }
598
599 + /* set slave flag before open to prevent IPv6 addrconf */
600 + slave_dev->flags |= IFF_SLAVE;
601 +
602 /* open the slave since the application closed it */
603 res = dev_open(slave_dev);
604 if (res) {
605 @@ -1725,6 +1727,7 @@ err_close:
606 dev_close(slave_dev);
607
608 err_restore_mac:
609 + slave_dev->flags &= ~IFF_SLAVE;
610 if (!bond->params.fail_over_mac ||
611 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
612 /* XXX TODO - fom follow mode needs to change master's
613 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
614 index 1e52db32c73d..1203d892e842 100644
615 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
616 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
617 @@ -746,7 +746,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
618 struct mlx5_core_dev *mdev = priv->mdev;
619 struct mlx5_core_cq *mcq = &cq->mcq;
620 int eqn_not_used;
621 - int irqn;
622 + unsigned int irqn;
623 int err;
624 u32 i;
625
626 @@ -800,7 +800,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
627 void *in;
628 void *cqc;
629 int inlen;
630 - int irqn_not_used;
631 + unsigned int irqn_not_used;
632 int eqn;
633 int err;
634
635 @@ -1504,7 +1504,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
636 struct mlx5_core_dev *mdev = priv->mdev;
637 struct mlx5_core_cq *mcq = &cq->mcq;
638 int eqn_not_used;
639 - int irqn;
640 + unsigned int irqn;
641 int err;
642
643 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
644 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
645 index 4ac8d4cc4973..6cf6d93d8831 100644
646 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
647 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
648 @@ -568,7 +568,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
649 mlx5_irq_clear_affinity_hint(mdev, i);
650 }
651
652 -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
653 +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
654 + unsigned int *irqn)
655 {
656 struct mlx5_eq_table *table = &dev->priv.eq_table;
657 struct mlx5_eq *eq, *n;
658 diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
659 index 9066d7a8483c..f96c6b3606f2 100644
660 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
661 +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
662 @@ -2107,7 +2107,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
663 dd = &lp->tx_descs[lp->tx_next];
664
665 /* Set DMA Descriptor fields */
666 - dd->des0 = dma_handle;
667 + dd->des0 = dma_handle + consumed_size;
668 dd->des1 = 0;
669 dd->des2 = dma_size;
670
671 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
672 index 651d35ea22c5..59fefca74263 100644
673 --- a/drivers/net/team/team.c
674 +++ b/drivers/net/team/team.c
675 @@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
676 struct team *team = netdev_priv(dev);
677 struct team_port *port;
678
679 - rcu_read_lock();
680 - list_for_each_entry_rcu(port, &team->port_list, list)
681 + mutex_lock(&team->lock);
682 + list_for_each_entry(port, &team->port_list, list)
683 vlan_vid_del(port->dev, proto, vid);
684 - rcu_read_unlock();
685 + mutex_unlock(&team->lock);
686
687 return 0;
688 }
689 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
690 index ba363cedef80..405a7b6cca25 100644
691 --- a/drivers/net/vxlan.c
692 +++ b/drivers/net/vxlan.c
693 @@ -2751,7 +2751,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
694 struct vxlan_config *conf)
695 {
696 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
697 - struct vxlan_dev *vxlan = netdev_priv(dev);
698 + struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
699 struct vxlan_rdst *dst = &vxlan->default_dst;
700 unsigned short needed_headroom = ETH_HLEN;
701 int err;
702 @@ -2817,9 +2817,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
703 if (!vxlan->cfg.age_interval)
704 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
705
706 - if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
707 - vxlan->cfg.dst_port, vxlan->flags))
708 + list_for_each_entry(tmp, &vn->vxlan_list, next) {
709 + if (tmp->cfg.vni == conf->vni &&
710 + (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
711 + tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
712 + tmp->cfg.dst_port == vxlan->cfg.dst_port &&
713 + (tmp->flags & VXLAN_F_RCV_FLAGS) ==
714 + (vxlan->flags & VXLAN_F_RCV_FLAGS))
715 return -EEXIST;
716 + }
717
718 dev->ethtool_ops = &vxlan_ethtool_ops;
719
720 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
721 index 2721cf89fb16..aac1ed3f7bb4 100644
722 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
723 +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
724 @@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
725 ieee80211_rx(hw, skb);
726 else
727 dev_kfree_skb_any(skb);
728 + } else {
729 + dev_kfree_skb_any(skb);
730 }
731 }
732
733 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
734 index ddbf32d599cb..8683436788c3 100644
735 --- a/drivers/usb/core/hub.c
736 +++ b/drivers/usb/core/hub.c
737 @@ -3895,17 +3895,30 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
738 return;
739 }
740
741 - if (usb_set_lpm_timeout(udev, state, timeout))
742 + if (usb_set_lpm_timeout(udev, state, timeout)) {
743 /* If we can't set the parent hub U1/U2 timeout,
744 * device-initiated LPM won't be allowed either, so let the xHCI
745 * host know that this link state won't be enabled.
746 */
747 hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
748 + } else {
749 + /* Only a configured device will accept the Set Feature
750 + * U1/U2_ENABLE
751 + */
752 + if (udev->actconfig)
753 + usb_set_device_initiated_lpm(udev, state, true);
754
755 - /* Only a configured device will accept the Set Feature U1/U2_ENABLE */
756 - else if (udev->actconfig)
757 - usb_set_device_initiated_lpm(udev, state, true);
758 -
759 + /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
760 + * hub-initiated LPM is enabled. Thus, LPM is enabled no
761 + * matter the result of usb_set_device_initiated_lpm().
762 + * The only difference is whether device is able to initiate
763 + * LPM.
764 + */
765 + if (state == USB3_LPM_U1)
766 + udev->usb3_lpm_u1_enabled = 1;
767 + else if (state == USB3_LPM_U2)
768 + udev->usb3_lpm_u2_enabled = 1;
769 + }
770 }
771
772 /*
773 @@ -3945,6 +3958,18 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
774 dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
775 "bus schedule bandwidth may be impacted.\n",
776 usb3_lpm_names[state]);
777 +
778 + /* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM
779 + * is disabled. Hub will disallows link to enter U1/U2 as well,
780 + * even device is initiating LPM. Hence LPM is disabled if hub LPM
781 + * timeout set to 0, no matter device-initiated LPM is disabled or
782 + * not.
783 + */
784 + if (state == USB3_LPM_U1)
785 + udev->usb3_lpm_u1_enabled = 0;
786 + else if (state == USB3_LPM_U2)
787 + udev->usb3_lpm_u2_enabled = 0;
788 +
789 return 0;
790 }
791
792 @@ -3979,8 +4004,6 @@ int usb_disable_lpm(struct usb_device *udev)
793 if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
794 goto enable_lpm;
795
796 - udev->usb3_lpm_enabled = 0;
797 -
798 return 0;
799
800 enable_lpm:
801 @@ -4038,8 +4061,6 @@ void usb_enable_lpm(struct usb_device *udev)
802
803 usb_enable_link_state(hcd, udev, USB3_LPM_U1);
804 usb_enable_link_state(hcd, udev, USB3_LPM_U2);
805 -
806 - udev->usb3_lpm_enabled = 1;
807 }
808 EXPORT_SYMBOL_GPL(usb_enable_lpm);
809
810 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
811 index d9ec2de6c4cf..65b6e6b84043 100644
812 --- a/drivers/usb/core/sysfs.c
813 +++ b/drivers/usb/core/sysfs.c
814 @@ -531,7 +531,7 @@ static ssize_t usb2_lpm_besl_store(struct device *dev,
815 }
816 static DEVICE_ATTR_RW(usb2_lpm_besl);
817
818 -static ssize_t usb3_hardware_lpm_show(struct device *dev,
819 +static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
820 struct device_attribute *attr, char *buf)
821 {
822 struct usb_device *udev = to_usb_device(dev);
823 @@ -539,7 +539,7 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
824
825 usb_lock_device(udev);
826
827 - if (udev->usb3_lpm_enabled)
828 + if (udev->usb3_lpm_u1_enabled)
829 p = "enabled";
830 else
831 p = "disabled";
832 @@ -548,7 +548,26 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
833
834 return sprintf(buf, "%s\n", p);
835 }
836 -static DEVICE_ATTR_RO(usb3_hardware_lpm);
837 +static DEVICE_ATTR_RO(usb3_hardware_lpm_u1);
838 +
839 +static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
840 + struct device_attribute *attr, char *buf)
841 +{
842 + struct usb_device *udev = to_usb_device(dev);
843 + const char *p;
844 +
845 + usb_lock_device(udev);
846 +
847 + if (udev->usb3_lpm_u2_enabled)
848 + p = "enabled";
849 + else
850 + p = "disabled";
851 +
852 + usb_unlock_device(udev);
853 +
854 + return sprintf(buf, "%s\n", p);
855 +}
856 +static DEVICE_ATTR_RO(usb3_hardware_lpm_u2);
857
858 static struct attribute *usb2_hardware_lpm_attr[] = {
859 &dev_attr_usb2_hardware_lpm.attr,
860 @@ -562,7 +581,8 @@ static struct attribute_group usb2_hardware_lpm_attr_group = {
861 };
862
863 static struct attribute *usb3_hardware_lpm_attr[] = {
864 - &dev_attr_usb3_hardware_lpm.attr,
865 + &dev_attr_usb3_hardware_lpm_u1.attr,
866 + &dev_attr_usb3_hardware_lpm_u2.attr,
867 NULL,
868 };
869 static struct attribute_group usb3_hardware_lpm_attr_group = {
870 @@ -592,7 +612,8 @@ static int add_power_attributes(struct device *dev)
871 if (udev->usb2_hw_lpm_capable == 1)
872 rc = sysfs_merge_group(&dev->kobj,
873 &usb2_hardware_lpm_attr_group);
874 - if (udev->lpm_capable == 1)
875 + if (udev->speed == USB_SPEED_SUPER &&
876 + udev->lpm_capable == 1)
877 rc = sysfs_merge_group(&dev->kobj,
878 &usb3_hardware_lpm_attr_group);
879 }
880 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
881 index 3f912705dcef..dca0a4692f08 100644
882 --- a/drivers/usb/host/xhci.c
883 +++ b/drivers/usb/host/xhci.c
884 @@ -5059,6 +5059,10 @@ static int __init xhci_hcd_init(void)
885 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
886 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
887 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
888 +
889 + if (usb_disabled())
890 + return -ENODEV;
891 +
892 return 0;
893 }
894
895 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
896 index 7d4f51a32e66..59b2126b21a3 100644
897 --- a/drivers/usb/serial/cp210x.c
898 +++ b/drivers/usb/serial/cp210x.c
899 @@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
900 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
901 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
902 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
903 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
904 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
905 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
906 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
907 diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
908 index abc4767695e4..b2c9fada8eac 100644
909 --- a/include/linux/mlx5/cq.h
910 +++ b/include/linux/mlx5/cq.h
911 @@ -45,7 +45,7 @@ struct mlx5_core_cq {
912 atomic_t refcount;
913 struct completion free;
914 unsigned vector;
915 - int irqn;
916 + unsigned int irqn;
917 void (*comp) (struct mlx5_core_cq *);
918 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
919 struct mlx5_uar *uar;
920 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
921 index 5c857f2a20d7..af3efd9157f0 100644
922 --- a/include/linux/mlx5/driver.h
923 +++ b/include/linux/mlx5/driver.h
924 @@ -303,7 +303,7 @@ struct mlx5_eq {
925 u32 cons_index;
926 struct mlx5_buf buf;
927 int size;
928 - u8 irqn;
929 + unsigned int irqn;
930 u8 eqn;
931 int nent;
932 u64 mask;
933 @@ -762,7 +762,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
934 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
935 int mlx5_start_eqs(struct mlx5_core_dev *dev);
936 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
937 -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
938 +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
939 + unsigned int *irqn);
940 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
941 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
942
943 diff --git a/include/linux/sched.h b/include/linux/sched.h
944 index fa39434e3fdd..21a6e9649012 100644
945 --- a/include/linux/sched.h
946 +++ b/include/linux/sched.h
947 @@ -830,6 +830,7 @@ struct user_struct {
948 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
949 #endif
950 unsigned long locked_shm; /* How many pages of mlocked shm ? */
951 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
952
953 #ifdef CONFIG_KEYS
954 struct key *uid_keyring; /* UID specific keyring */
955 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
956 index 4355129fff91..9147f9f34cbe 100644
957 --- a/include/linux/skbuff.h
958 +++ b/include/linux/skbuff.h
959 @@ -3446,7 +3446,8 @@ struct skb_gso_cb {
960 int encap_level;
961 __u16 csum_start;
962 };
963 -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
964 +#define SKB_SGO_CB_OFFSET 32
965 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
966
967 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
968 {
969 diff --git a/include/linux/usb.h b/include/linux/usb.h
970 index b9a28074210f..b79925dd2b41 100644
971 --- a/include/linux/usb.h
972 +++ b/include/linux/usb.h
973 @@ -511,6 +511,8 @@ struct usb3_lpm_parameters {
974 * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
975 * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
976 * @usb3_lpm_enabled: USB3 hardware LPM enabled
977 + * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled
978 + * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled
979 * @string_langid: language ID for strings
980 * @product: iProduct string, if present (static)
981 * @manufacturer: iManufacturer string, if present (static)
982 @@ -584,6 +586,8 @@ struct usb_device {
983 unsigned usb2_hw_lpm_enabled:1;
984 unsigned usb2_hw_lpm_allowed:1;
985 unsigned usb3_lpm_enabled:1;
986 + unsigned usb3_lpm_u1_enabled:1;
987 + unsigned usb3_lpm_u2_enabled:1;
988 int string_langid;
989
990 /* static strings from the device */
991 diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
992 index 84b20835b736..0dc0a51da38f 100644
993 --- a/include/net/inet_ecn.h
994 +++ b/include/net/inet_ecn.h
995 @@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
996
997 struct ipv6hdr;
998
999 -static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
1000 +/* Note:
1001 + * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
1002 + * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
1003 + * In IPv6 case, no checksum compensates the change in IPv6 header,
1004 + * so we have to update skb->csum.
1005 + */
1006 +static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
1007 {
1008 + __be32 from, to;
1009 +
1010 if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
1011 return 0;
1012 - *(__be32*)iph |= htonl(INET_ECN_CE << 20);
1013 +
1014 + from = *(__be32 *)iph;
1015 + to = from | htonl(INET_ECN_CE << 20);
1016 + *(__be32 *)iph = to;
1017 + if (skb->ip_summed == CHECKSUM_COMPLETE)
1018 + skb->csum = csum_add(csum_sub(skb->csum, from), to);
1019 return 1;
1020 }
1021
1022 @@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
1023 case cpu_to_be16(ETH_P_IPV6):
1024 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
1025 skb_tail_pointer(skb))
1026 - return IP6_ECN_set_ce(ipv6_hdr(skb));
1027 + return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
1028 break;
1029 }
1030
1031 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1032 index a7945d10b378..d1d3e8f57de9 100644
1033 --- a/kernel/bpf/verifier.c
1034 +++ b/kernel/bpf/verifier.c
1035 @@ -1121,6 +1121,16 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1036 return -EINVAL;
1037 }
1038
1039 + if ((opcode == BPF_LSH || opcode == BPF_RSH ||
1040 + opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
1041 + int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
1042 +
1043 + if (insn->imm < 0 || insn->imm >= size) {
1044 + verbose("invalid shift %d\n", insn->imm);
1045 + return -EINVAL;
1046 + }
1047 + }
1048 +
1049 /* pattern match 'bpf_add Rx, imm' instruction */
1050 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
1051 regs[insn->dst_reg].type == FRAME_PTR &&
1052 diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
1053 index 191a70290dca..f5d2fe5e31cc 100644
1054 --- a/net/batman-adv/bridge_loop_avoidance.c
1055 +++ b/net/batman-adv/bridge_loop_avoidance.c
1056 @@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
1057 }
1058
1059 /* finally deinitialize the claim */
1060 -static void batadv_claim_free_rcu(struct rcu_head *rcu)
1061 +static void batadv_claim_release(struct batadv_bla_claim *claim)
1062 {
1063 - struct batadv_bla_claim *claim;
1064 -
1065 - claim = container_of(rcu, struct batadv_bla_claim, rcu);
1066 -
1067 batadv_backbone_gw_free_ref(claim->backbone_gw);
1068 - kfree(claim);
1069 + kfree_rcu(claim, rcu);
1070 }
1071
1072 /* free a claim, call claim_free_rcu if its the last reference */
1073 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
1074 {
1075 if (atomic_dec_and_test(&claim->refcount))
1076 - call_rcu(&claim->rcu, batadv_claim_free_rcu);
1077 + batadv_claim_release(claim);
1078 }
1079
1080 /**
1081 diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
1082 index 5a31420513e1..7b12ea8ea29d 100644
1083 --- a/net/batman-adv/hard-interface.h
1084 +++ b/net/batman-adv/hard-interface.h
1085 @@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
1086 call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
1087 }
1088
1089 -/**
1090 - * batadv_hardif_free_ref_now - decrement the hard interface refcounter and
1091 - * possibly free it (without rcu callback)
1092 - * @hard_iface: the hard interface to free
1093 - */
1094 -static inline void
1095 -batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
1096 -{
1097 - if (atomic_dec_and_test(&hard_iface->refcount))
1098 - batadv_hardif_free_rcu(&hard_iface->rcu);
1099 -}
1100 -
1101 static inline struct batadv_hard_iface *
1102 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
1103 {
1104 diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
1105 index f5276be2c77c..d0956f726547 100644
1106 --- a/net/batman-adv/network-coding.c
1107 +++ b/net/batman-adv/network-coding.c
1108 @@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
1109 }
1110
1111 /**
1112 - * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
1113 - * its refcount on the orig_node
1114 - * @rcu: rcu pointer of the nc node
1115 + * batadv_nc_node_release - release nc_node from lists and queue for free after
1116 + * rcu grace period
1117 + * @nc_node: the nc node to free
1118 */
1119 -static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
1120 +static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
1121 {
1122 - struct batadv_nc_node *nc_node;
1123 -
1124 - nc_node = container_of(rcu, struct batadv_nc_node, rcu);
1125 batadv_orig_node_free_ref(nc_node->orig_node);
1126 - kfree(nc_node);
1127 + kfree_rcu(nc_node, rcu);
1128 }
1129
1130 /**
1131 - * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
1132 - * frees it
1133 + * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
1134 + * release it
1135 * @nc_node: the nc node to free
1136 */
1137 static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
1138 {
1139 if (atomic_dec_and_test(&nc_node->refcount))
1140 - call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
1141 + batadv_nc_node_release(nc_node);
1142 }
1143
1144 /**
1145 diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
1146 index 7486df9ed48d..17851d3aaf22 100644
1147 --- a/net/batman-adv/originator.c
1148 +++ b/net/batman-adv/originator.c
1149 @@ -163,92 +163,66 @@ err:
1150 }
1151
1152 /**
1153 - * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
1154 - * @rcu: rcu pointer of the neigh_ifinfo object
1155 - */
1156 -static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
1157 -{
1158 - struct batadv_neigh_ifinfo *neigh_ifinfo;
1159 -
1160 - neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
1161 -
1162 - if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
1163 - batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
1164 -
1165 - kfree(neigh_ifinfo);
1166 -}
1167 -
1168 -/**
1169 - * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
1170 - * the neigh_ifinfo (without rcu callback)
1171 + * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
1172 + * free after rcu grace period
1173 * @neigh_ifinfo: the neigh_ifinfo object to release
1174 */
1175 static void
1176 -batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
1177 +batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
1178 {
1179 - if (atomic_dec_and_test(&neigh_ifinfo->refcount))
1180 - batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
1181 + if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
1182 + batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
1183 +
1184 + kfree_rcu(neigh_ifinfo, rcu);
1185 }
1186
1187 /**
1188 - * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
1189 + * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
1190 * the neigh_ifinfo
1191 * @neigh_ifinfo: the neigh_ifinfo object to release
1192 */
1193 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
1194 {
1195 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
1196 - call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
1197 + batadv_neigh_ifinfo_release(neigh_ifinfo);
1198 }
1199
1200 /**
1201 * batadv_neigh_node_free_rcu - free the neigh_node
1202 - * @rcu: rcu pointer of the neigh_node
1203 + * batadv_neigh_node_release - release neigh_node from lists and queue for
1204 + * free after rcu grace period
1205 + * @neigh_node: neigh neighbor to free
1206 */
1207 -static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
1208 +static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
1209 {
1210 struct hlist_node *node_tmp;
1211 - struct batadv_neigh_node *neigh_node;
1212 struct batadv_neigh_ifinfo *neigh_ifinfo;
1213 struct batadv_algo_ops *bao;
1214
1215 - neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
1216 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
1217
1218 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
1219 &neigh_node->ifinfo_list, list) {
1220 - batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
1221 + batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
1222 }
1223
1224 if (bao->bat_neigh_free)
1225 bao->bat_neigh_free(neigh_node);
1226
1227 - batadv_hardif_free_ref_now(neigh_node->if_incoming);
1228 + batadv_hardif_free_ref(neigh_node->if_incoming);
1229
1230 - kfree(neigh_node);
1231 -}
1232 -
1233 -/**
1234 - * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
1235 - * and possibly free it (without rcu callback)
1236 - * @neigh_node: neigh neighbor to free
1237 - */
1238 -static void
1239 -batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
1240 -{
1241 - if (atomic_dec_and_test(&neigh_node->refcount))
1242 - batadv_neigh_node_free_rcu(&neigh_node->rcu);
1243 + kfree_rcu(neigh_node, rcu);
1244 }
1245
1246 /**
1247 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
1248 - * and possibly free it
1249 + * and possibly release it
1250 * @neigh_node: neigh neighbor to free
1251 */
1252 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
1253 {
1254 if (atomic_dec_and_test(&neigh_node->refcount))
1255 - call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
1256 + batadv_neigh_node_release(neigh_node);
1257 }
1258
1259 /**
1260 @@ -532,108 +506,99 @@ out:
1261 }
1262
1263 /**
1264 - * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
1265 - * @rcu: rcu pointer of the orig_ifinfo object
1266 + * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
1267 + * free after rcu grace period
1268 + * @orig_ifinfo: the orig_ifinfo object to release
1269 */
1270 -static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
1271 +static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
1272 {
1273 - struct batadv_orig_ifinfo *orig_ifinfo;
1274 struct batadv_neigh_node *router;
1275
1276 - orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
1277 -
1278 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
1279 - batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
1280 + batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
1281
1282 /* this is the last reference to this object */
1283 router = rcu_dereference_protected(orig_ifinfo->router, true);
1284 if (router)
1285 - batadv_neigh_node_free_ref_now(router);
1286 - kfree(orig_ifinfo);
1287 + batadv_neigh_node_free_ref(router);
1288 +
1289 + kfree_rcu(orig_ifinfo, rcu);
1290 }
1291
1292 /**
1293 - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
1294 - * the orig_ifinfo (without rcu callback)
1295 + * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
1296 + * the orig_ifinfo
1297 * @orig_ifinfo: the orig_ifinfo object to release
1298 */
1299 -static void
1300 -batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
1301 +void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
1302 {
1303 if (atomic_dec_and_test(&orig_ifinfo->refcount))
1304 - batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
1305 + batadv_orig_ifinfo_release(orig_ifinfo);
1306 }
1307
1308 /**
1309 - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
1310 - * the orig_ifinfo
1311 - * @orig_ifinfo: the orig_ifinfo object to release
1312 + * batadv_orig_node_free_rcu - free the orig_node
1313 + * @rcu: rcu pointer of the orig_node
1314 */
1315 -void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
1316 +static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
1317 {
1318 - if (atomic_dec_and_test(&orig_ifinfo->refcount))
1319 - call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
1320 + struct batadv_orig_node *orig_node;
1321 +
1322 + orig_node = container_of(rcu, struct batadv_orig_node, rcu);
1323 +
1324 + batadv_mcast_purge_orig(orig_node);
1325 +
1326 + batadv_frag_purge_orig(orig_node, NULL);
1327 +
1328 + if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
1329 + orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
1330 +
1331 + kfree(orig_node->tt_buff);
1332 + kfree(orig_node);
1333 }
1334
1335 -static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
1336 +/**
1337 + * batadv_orig_node_release - release orig_node from lists and queue for
1338 + * free after rcu grace period
1339 + * @orig_node: the orig node to free
1340 + */
1341 +static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
1342 {
1343 struct hlist_node *node_tmp;
1344 struct batadv_neigh_node *neigh_node;
1345 - struct batadv_orig_node *orig_node;
1346 struct batadv_orig_ifinfo *orig_ifinfo;
1347
1348 - orig_node = container_of(rcu, struct batadv_orig_node, rcu);
1349 -
1350 spin_lock_bh(&orig_node->neigh_list_lock);
1351
1352 /* for all neighbors towards this originator ... */
1353 hlist_for_each_entry_safe(neigh_node, node_tmp,
1354 &orig_node->neigh_list, list) {
1355 hlist_del_rcu(&neigh_node->list);
1356 - batadv_neigh_node_free_ref_now(neigh_node);
1357 + batadv_neigh_node_free_ref(neigh_node);
1358 }
1359
1360 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
1361 &orig_node->ifinfo_list, list) {
1362 hlist_del_rcu(&orig_ifinfo->list);
1363 - batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
1364 + batadv_orig_ifinfo_free_ref(orig_ifinfo);
1365 }
1366 spin_unlock_bh(&orig_node->neigh_list_lock);
1367
1368 - batadv_mcast_purge_orig(orig_node);
1369 -
1370 /* Free nc_nodes */
1371 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
1372
1373 - batadv_frag_purge_orig(orig_node, NULL);
1374 -
1375 - if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
1376 - orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
1377 -
1378 - kfree(orig_node->tt_buff);
1379 - kfree(orig_node);
1380 + call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
1381 }
1382
1383 /**
1384 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
1385 - * schedule an rcu callback for freeing it
1386 + * release it
1387 * @orig_node: the orig node to free
1388 */
1389 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
1390 {
1391 if (atomic_dec_and_test(&orig_node->refcount))
1392 - call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
1393 -}
1394 -
1395 -/**
1396 - * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
1397 - * possibly free it (without rcu callback)
1398 - * @orig_node: the orig node to free
1399 - */
1400 -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
1401 -{
1402 - if (atomic_dec_and_test(&orig_node->refcount))
1403 - batadv_orig_node_free_rcu(&orig_node->rcu);
1404 + batadv_orig_node_release(orig_node);
1405 }
1406
1407 void batadv_originator_free(struct batadv_priv *bat_priv)
1408 diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
1409 index fa18f9bf266b..a5c37882b409 100644
1410 --- a/net/batman-adv/originator.h
1411 +++ b/net/batman-adv/originator.h
1412 @@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
1413 void batadv_originator_free(struct batadv_priv *bat_priv);
1414 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
1415 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
1416 -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
1417 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
1418 const u8 *addr);
1419 struct batadv_neigh_node *
1420 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
1421 index 76f19ba62462..83b0ca27a45e 100644
1422 --- a/net/batman-adv/translation-table.c
1423 +++ b/net/batman-adv/translation-table.c
1424 @@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
1425 return count;
1426 }
1427
1428 -static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
1429 -{
1430 - struct batadv_tt_orig_list_entry *orig_entry;
1431 -
1432 - orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
1433 -
1434 - /* We are in an rcu callback here, therefore we cannot use
1435 - * batadv_orig_node_free_ref() and its call_rcu():
1436 - * An rcu_barrier() wouldn't wait for that to finish
1437 - */
1438 - batadv_orig_node_free_ref_now(orig_entry->orig_node);
1439 - kfree(orig_entry);
1440 -}
1441 -
1442 /**
1443 * batadv_tt_local_size_mod - change the size by v of the local table identified
1444 * by vid
1445 @@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
1446 batadv_tt_global_size_mod(orig_node, vid, -1);
1447 }
1448
1449 +/**
1450 + * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
1451 + * queue for free after rcu grace period
1452 + * @orig_entry: tt orig entry to be free'd
1453 + */
1454 +static void
1455 +batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
1456 +{
1457 + batadv_orig_node_free_ref(orig_entry->orig_node);
1458 + kfree_rcu(orig_entry, rcu);
1459 +}
1460 +
1461 static void
1462 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
1463 {
1464 if (!atomic_dec_and_test(&orig_entry->refcount))
1465 return;
1466
1467 - call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
1468 + batadv_tt_orig_list_entry_release(orig_entry);
1469 }
1470
1471 /**
1472 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
1473 index 5e88d3e17546..2c8095a5d824 100644
1474 --- a/net/bridge/br_device.c
1475 +++ b/net/bridge/br_device.c
1476 @@ -28,6 +28,8 @@
1477 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
1478 EXPORT_SYMBOL_GPL(nf_br_ops);
1479
1480 +static struct lock_class_key bridge_netdev_addr_lock_key;
1481 +
1482 /* net device transmit always called with BH disabled */
1483 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1484 {
1485 @@ -87,6 +89,11 @@ out:
1486 return NETDEV_TX_OK;
1487 }
1488
1489 +static void br_set_lockdep_class(struct net_device *dev)
1490 +{
1491 + lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
1492 +}
1493 +
1494 static int br_dev_init(struct net_device *dev)
1495 {
1496 struct net_bridge *br = netdev_priv(dev);
1497 @@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev)
1498 err = br_vlan_init(br);
1499 if (err)
1500 free_percpu(br->stats);
1501 + br_set_lockdep_class(dev);
1502
1503 return err;
1504 }
1505 diff --git a/net/core/dev.c b/net/core/dev.c
1506 index ae00b894e675..7f00f2439770 100644
1507 --- a/net/core/dev.c
1508 +++ b/net/core/dev.c
1509 @@ -2542,6 +2542,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
1510 *
1511 * It may return NULL if the skb requires no segmentation. This is
1512 * only possible when GSO is used for verifying header integrity.
1513 + *
1514 + * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
1515 */
1516 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
1517 netdev_features_t features, bool tx_path)
1518 @@ -2556,6 +2558,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
1519 return ERR_PTR(err);
1520 }
1521
1522 + BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
1523 + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
1524 +
1525 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
1526 SKB_GSO_CB(skb)->encap_level = 0;
1527
1528 diff --git a/net/core/filter.c b/net/core/filter.c
1529 index 672eefbfbe99..37157c4c1a78 100644
1530 --- a/net/core/filter.c
1531 +++ b/net/core/filter.c
1532 @@ -777,6 +777,11 @@ static int bpf_check_classic(const struct sock_filter *filter,
1533 if (ftest->k == 0)
1534 return -EINVAL;
1535 break;
1536 + case BPF_ALU | BPF_LSH | BPF_K:
1537 + case BPF_ALU | BPF_RSH | BPF_K:
1538 + if (ftest->k >= 32)
1539 + return -EINVAL;
1540 + break;
1541 case BPF_LD | BPF_MEM:
1542 case BPF_LDX | BPF_MEM:
1543 case BPF_ST:
1544 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1545 index de8d5cc5eb24..4da4d51a2ccf 100644
1546 --- a/net/core/pktgen.c
1547 +++ b/net/core/pktgen.c
1548 @@ -2787,7 +2787,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
1549 } else {
1550 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
1551 }
1552 - skb_reserve(skb, LL_RESERVED_SPACE(dev));
1553 +
1554 + if (likely(skb))
1555 + skb_reserve(skb, LL_RESERVED_SPACE(dev));
1556
1557 return skb;
1558 }
1559 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1560 index 4233cbe47052..49f02856304d 100644
1561 --- a/net/ipv4/ip_output.c
1562 +++ b/net/ipv4/ip_output.c
1563 @@ -240,6 +240,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
1564 * from host network stack.
1565 */
1566 features = netif_skb_features(skb);
1567 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
1568 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1569 if (IS_ERR_OR_NULL(segs)) {
1570 kfree_skb(skb);
1571 @@ -921,7 +922,7 @@ static int __ip_append_data(struct sock *sk,
1572 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
1573 (sk->sk_protocol == IPPROTO_UDP) &&
1574 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
1575 - (sk->sk_type == SOCK_DGRAM)) {
1576 + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
1577 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
1578 hh_len, fragheaderlen, transhdrlen,
1579 maxfraglen, flags);
1580 diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
1581 index 17d35662930d..3e6a472e6b88 100644
1582 --- a/net/ipv4/tcp_yeah.c
1583 +++ b/net/ipv4/tcp_yeah.c
1584 @@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
1585 yeah->fast_count = 0;
1586 yeah->reno_count = max(yeah->reno_count>>1, 2U);
1587
1588 - return tp->snd_cwnd - reduction;
1589 + return max_t(int, tp->snd_cwnd - reduction, 2);
1590 }
1591
1592 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
1593 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1594 index e6a7bd15b9b7..6473889f1736 100644
1595 --- a/net/ipv6/ip6_output.c
1596 +++ b/net/ipv6/ip6_output.c
1597 @@ -1353,7 +1353,7 @@ emsgsize:
1598 (skb && skb_is_gso(skb))) &&
1599 (sk->sk_protocol == IPPROTO_UDP) &&
1600 (rt->dst.dev->features & NETIF_F_UFO) &&
1601 - (sk->sk_type == SOCK_DGRAM)) {
1602 + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1603 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1604 hh_len, fragheaderlen,
1605 transhdrlen, mtu, flags, fl6);
1606 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1607 index 6b8a8a9091fa..bd100b47c717 100644
1608 --- a/net/ipv6/tcp_ipv6.c
1609 +++ b/net/ipv6/tcp_ipv6.c
1610 @@ -462,8 +462,10 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
1611 if (np->repflow && ireq->pktopts)
1612 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
1613
1614 + rcu_read_lock();
1615 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
1616 np->tclass);
1617 + rcu_read_unlock();
1618 err = net_xmit_eval(err);
1619 }
1620
1621 diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
1622 index f7fbdbabe50e..372855eeaf42 100644
1623 --- a/net/ipv6/xfrm6_mode_tunnel.c
1624 +++ b/net/ipv6/xfrm6_mode_tunnel.c
1625 @@ -23,7 +23,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
1626 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
1627
1628 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
1629 - IP6_ECN_set_ce(inner_iph);
1630 + IP6_ECN_set_ce(skb, inner_iph);
1631 }
1632
1633 /* Add encapsulation header.
1634 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
1635 index 91a8b004dc51..deadfdab1bc3 100644
1636 --- a/net/openvswitch/datapath.c
1637 +++ b/net/openvswitch/datapath.c
1638 @@ -336,12 +336,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
1639 unsigned short gso_type = skb_shinfo(skb)->gso_type;
1640 struct sw_flow_key later_key;
1641 struct sk_buff *segs, *nskb;
1642 - struct ovs_skb_cb ovs_cb;
1643 int err;
1644
1645 - ovs_cb = *OVS_CB(skb);
1646 + BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
1647 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
1648 - *OVS_CB(skb) = ovs_cb;
1649 if (IS_ERR(segs))
1650 return PTR_ERR(segs);
1651 if (segs == NULL)
1652 @@ -359,7 +357,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
1653 /* Queue all of the segments. */
1654 skb = segs;
1655 do {
1656 - *OVS_CB(skb) = ovs_cb;
1657 if (gso_type & SKB_GSO_UDP && skb != segs)
1658 key = &later_key;
1659
1660 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
1661 index 10d42f3220ab..f925753668a7 100644
1662 --- a/net/phonet/af_phonet.c
1663 +++ b/net/phonet/af_phonet.c
1664 @@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
1665 struct sockaddr_pn sa;
1666 u16 len;
1667
1668 + skb = skb_share_check(skb, GFP_ATOMIC);
1669 + if (!skb)
1670 + return NET_RX_DROP;
1671 +
1672 /* check we have at least a full Phonet header */
1673 if (!pskb_pull(skb, sizeof(struct phonethdr)))
1674 goto out;
1675 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1676 index 57692947ebbe..95b021243233 100644
1677 --- a/net/sched/cls_flower.c
1678 +++ b/net/sched/cls_flower.c
1679 @@ -252,23 +252,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
1680 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1681 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1682 sizeof(key->eth.src));
1683 +
1684 fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1685 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1686 sizeof(key->basic.n_proto));
1687 +
1688 if (key->basic.n_proto == htons(ETH_P_IP) ||
1689 key->basic.n_proto == htons(ETH_P_IPV6)) {
1690 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1691 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1692 sizeof(key->basic.ip_proto));
1693 }
1694 - if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1695 +
1696 + if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1697 + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1698 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1699 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1700 sizeof(key->ipv4.src));
1701 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1702 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1703 sizeof(key->ipv4.dst));
1704 - } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1705 + } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1706 + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1707 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1708 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1709 sizeof(key->ipv6.src));
1710 @@ -276,6 +281,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
1711 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1712 sizeof(key->ipv6.dst));
1713 }
1714 +
1715 if (key->basic.ip_proto == IPPROTO_TCP) {
1716 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1717 &mask->tp.src, TCA_FLOWER_UNSPEC,
1718 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
1719 index 26d50c565f54..3e0fc5127225 100644
1720 --- a/net/sctp/sysctl.c
1721 +++ b/net/sctp/sysctl.c
1722 @@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
1723 struct ctl_table tbl;
1724 bool changed = false;
1725 char *none = "none";
1726 - char tmp[8];
1727 + char tmp[8] = {0};
1728 int ret;
1729
1730 memset(&tbl, 0, sizeof(struct ctl_table));
1731 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1732 index ef05cd9403d4..e3f85bc8b135 100644
1733 --- a/net/unix/af_unix.c
1734 +++ b/net/unix/af_unix.c
1735 @@ -1513,6 +1513,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
1736 sock_wfree(skb);
1737 }
1738
1739 +/*
1740 + * The "user->unix_inflight" variable is protected by the garbage
1741 + * collection lock, and we just read it locklessly here. If you go
1742 + * over the limit, there might be a tiny race in actually noticing
1743 + * it across threads. Tough.
1744 + */
1745 +static inline bool too_many_unix_fds(struct task_struct *p)
1746 +{
1747 + struct user_struct *user = current_user();
1748 +
1749 + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1750 + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1751 + return false;
1752 +}
1753 +
1754 #define MAX_RECURSION_LEVEL 4
1755
1756 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1757 @@ -1521,6 +1536,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1758 unsigned char max_level = 0;
1759 int unix_sock_count = 0;
1760
1761 + if (too_many_unix_fds(current))
1762 + return -ETOOMANYREFS;
1763 +
1764 for (i = scm->fp->count - 1; i >= 0; i--) {
1765 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1766
1767 @@ -1542,10 +1560,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1768 if (!UNIXCB(skb).fp)
1769 return -ENOMEM;
1770
1771 - if (unix_sock_count) {
1772 - for (i = scm->fp->count - 1; i >= 0; i--)
1773 - unix_inflight(scm->fp->fp[i]);
1774 - }
1775 + for (i = scm->fp->count - 1; i >= 0; i--)
1776 + unix_inflight(scm->fp->fp[i]);
1777 return max_level;
1778 }
1779
1780 diff --git a/net/unix/garbage.c b/net/unix/garbage.c
1781 index a73a226f2d33..8fcdc2283af5 100644
1782 --- a/net/unix/garbage.c
1783 +++ b/net/unix/garbage.c
1784 @@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
1785 {
1786 struct sock *s = unix_get_socket(fp);
1787
1788 + spin_lock(&unix_gc_lock);
1789 +
1790 if (s) {
1791 struct unix_sock *u = unix_sk(s);
1792
1793 - spin_lock(&unix_gc_lock);
1794 -
1795 if (atomic_long_inc_return(&u->inflight) == 1) {
1796 BUG_ON(!list_empty(&u->link));
1797 list_add_tail(&u->link, &gc_inflight_list);
1798 @@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
1799 BUG_ON(list_empty(&u->link));
1800 }
1801 unix_tot_inflight++;
1802 - spin_unlock(&unix_gc_lock);
1803 }
1804 + fp->f_cred->user->unix_inflight++;
1805 + spin_unlock(&unix_gc_lock);
1806 }
1807
1808 void unix_notinflight(struct file *fp)
1809 {
1810 struct sock *s = unix_get_socket(fp);
1811
1812 + spin_lock(&unix_gc_lock);
1813 +
1814 if (s) {
1815 struct unix_sock *u = unix_sk(s);
1816
1817 - spin_lock(&unix_gc_lock);
1818 BUG_ON(list_empty(&u->link));
1819
1820 if (atomic_long_dec_and_test(&u->inflight))
1821 list_del_init(&u->link);
1822 unix_tot_inflight--;
1823 - spin_unlock(&unix_gc_lock);
1824 }
1825 + fp->f_cred->user->unix_inflight--;
1826 + spin_unlock(&unix_gc_lock);
1827 }
1828
1829 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
1830 diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
1831 index cc3676eb6239..ff4a91fcab9f 100644
1832 --- a/net/xfrm/xfrm_output.c
1833 +++ b/net/xfrm/xfrm_output.c
1834 @@ -167,6 +167,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
1835 {
1836 struct sk_buff *segs;
1837
1838 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
1839 + BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
1840 segs = skb_gso_segment(skb, 0);
1841 kfree_skb(skb);
1842 if (IS_ERR(segs))
1843 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
1844 index 826470d7f000..96e2486a6fc4 100755
1845 --- a/scripts/recordmcount.pl
1846 +++ b/scripts/recordmcount.pl
1847 @@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
1848
1849 } elsif ($arch eq "powerpc") {
1850 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
1851 - $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
1852 + # See comment in the sparc64 section for why we use '\w'.
1853 + $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
1854 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
1855
1856 if ($bits == 64) {
1857 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
1858 index a3f85d2a00bb..e6d50172872f 100644
1859 --- a/security/keys/process_keys.c
1860 +++ b/security/keys/process_keys.c
1861 @@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
1862 ret = PTR_ERR(keyring);
1863 goto error2;
1864 } else if (keyring == new->session_keyring) {
1865 + key_put(keyring);
1866 ret = 0;
1867 goto error2;
1868 }
1869 diff --git a/sound/core/control.c b/sound/core/control.c
1870 index 196a6fe100ca..a85d45595d02 100644
1871 --- a/sound/core/control.c
1872 +++ b/sound/core/control.c
1873 @@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
1874 return -EFAULT;
1875 if (tlv.length < sizeof(unsigned int) * 2)
1876 return -EINVAL;
1877 + if (!tlv.numid)
1878 + return -EINVAL;
1879 down_read(&card->controls_rwsem);
1880 kctl = snd_ctl_find_numid(card, tlv.numid);
1881 if (kctl == NULL) {
1882 diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
1883 index f845ecf7e172..656d9a9032dc 100644
1884 --- a/sound/core/hrtimer.c
1885 +++ b/sound/core/hrtimer.c
1886 @@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
1887 struct snd_hrtimer *stime = t->private_data;
1888
1889 atomic_set(&stime->running, 0);
1890 - hrtimer_cancel(&stime->hrt);
1891 + hrtimer_try_to_cancel(&stime->hrt);
1892 hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
1893 HRTIMER_MODE_REL);
1894 atomic_set(&stime->running, 1);
1895 @@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
1896 {
1897 struct snd_hrtimer *stime = t->private_data;
1898 atomic_set(&stime->running, 0);
1899 + hrtimer_try_to_cancel(&stime->hrt);
1900 return 0;
1901 }
1902
1903 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
1904 index b48b434444ed..9630e9f72b7b 100644
1905 --- a/sound/core/pcm_compat.c
1906 +++ b/sound/core/pcm_compat.c
1907 @@ -255,10 +255,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
1908 if (! (runtime = substream->runtime))
1909 return -ENOTTY;
1910
1911 - /* only fifo_size is different, so just copy all */
1912 - data = memdup_user(data32, sizeof(*data32));
1913 - if (IS_ERR(data))
1914 - return PTR_ERR(data);
1915 + data = kmalloc(sizeof(*data), GFP_KERNEL);
1916 + if (!data)
1917 + return -ENOMEM;
1918 +
1919 + /* only fifo_size (RO from userspace) is different, so just copy all */
1920 + if (copy_from_user(data, data32, sizeof(*data32))) {
1921 + err = -EFAULT;
1922 + goto error;
1923 + }
1924
1925 if (refine)
1926 err = snd_pcm_hw_refine(substream, data);
1927 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
1928 index b64f20deba90..13cfa815732d 100644
1929 --- a/sound/core/seq/seq_clientmgr.c
1930 +++ b/sound/core/seq/seq_clientmgr.c
1931 @@ -1962,7 +1962,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
1932 * No restrictions so for a user client we can clear
1933 * the whole fifo
1934 */
1935 - if (client->type == USER_CLIENT)
1936 + if (client->type == USER_CLIENT && client->data.user.fifo)
1937 snd_seq_fifo_clear(client->data.user.fifo);
1938 }
1939
1940 diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
1941 index 81f7c109dc46..65175902a68a 100644
1942 --- a/sound/core/seq/seq_compat.c
1943 +++ b/sound/core/seq/seq_compat.c
1944 @@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
1945 struct snd_seq_port_info *data;
1946 mm_segment_t fs;
1947
1948 - data = memdup_user(data32, sizeof(*data32));
1949 - if (IS_ERR(data))
1950 - return PTR_ERR(data);
1951 + data = kmalloc(sizeof(*data), GFP_KERNEL);
1952 + if (!data)
1953 + return -ENOMEM;
1954
1955 - if (get_user(data->flags, &data32->flags) ||
1956 + if (copy_from_user(data, data32, sizeof(*data32)) ||
1957 + get_user(data->flags, &data32->flags) ||
1958 get_user(data->time_queue, &data32->time_queue))
1959 goto error;
1960 data->kernel = NULL;
1961 diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
1962 index 7dfd0f429410..0bec02e89d51 100644
1963 --- a/sound/core/seq/seq_queue.c
1964 +++ b/sound/core/seq/seq_queue.c
1965 @@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
1966 static void queue_delete(struct snd_seq_queue *q)
1967 {
1968 /* stop and release the timer */
1969 + mutex_lock(&q->timer_mutex);
1970 snd_seq_timer_stop(q->timer);
1971 snd_seq_timer_close(q);
1972 + mutex_unlock(&q->timer_mutex);
1973 /* wait until access free */
1974 snd_use_lock_sync(&q->use_lock);
1975 /* release resources... */
1976 diff --git a/sound/core/timer.c b/sound/core/timer.c
1977 index 31f40f03e5b7..0a049c4578f1 100644
1978 --- a/sound/core/timer.c
1979 +++ b/sound/core/timer.c
1980 @@ -65,6 +65,7 @@ struct snd_timer_user {
1981 int qtail;
1982 int qused;
1983 int queue_size;
1984 + bool disconnected;
1985 struct snd_timer_read *queue;
1986 struct snd_timer_tread *tqueue;
1987 spinlock_t qlock;
1988 @@ -73,7 +74,7 @@ struct snd_timer_user {
1989 struct timespec tstamp; /* trigger tstamp */
1990 wait_queue_head_t qchange_sleep;
1991 struct fasync_struct *fasync;
1992 - struct mutex tread_sem;
1993 + struct mutex ioctl_lock;
1994 };
1995
1996 /* list of timers */
1997 @@ -215,11 +216,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
1998 slave->slave_id == master->slave_id) {
1999 list_move_tail(&slave->open_list, &master->slave_list_head);
2000 spin_lock_irq(&slave_active_lock);
2001 + spin_lock(&master->timer->lock);
2002 slave->master = master;
2003 slave->timer = master->timer;
2004 if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
2005 list_add_tail(&slave->active_list,
2006 &master->slave_active_head);
2007 + spin_unlock(&master->timer->lock);
2008 spin_unlock_irq(&slave_active_lock);
2009 }
2010 }
2011 @@ -288,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
2012 mutex_unlock(&register_mutex);
2013 return -ENOMEM;
2014 }
2015 + /* take a card refcount for safe disconnection */
2016 + if (timer->card)
2017 + get_device(&timer->card->card_dev);
2018 timeri->slave_class = tid->dev_sclass;
2019 timeri->slave_id = slave_id;
2020 if (list_empty(&timer->open_list_head) && timer->hw.open)
2021 @@ -346,15 +352,21 @@ int snd_timer_close(struct snd_timer_instance *timeri)
2022 timer->hw.close)
2023 timer->hw.close(timer);
2024 /* remove slave links */
2025 + spin_lock_irq(&slave_active_lock);
2026 + spin_lock(&timer->lock);
2027 list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
2028 open_list) {
2029 - spin_lock_irq(&slave_active_lock);
2030 - _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
2031 list_move_tail(&slave->open_list, &snd_timer_slave_list);
2032 slave->master = NULL;
2033 slave->timer = NULL;
2034 - spin_unlock_irq(&slave_active_lock);
2035 + list_del_init(&slave->ack_list);
2036 + list_del_init(&slave->active_list);
2037 }
2038 + spin_unlock(&timer->lock);
2039 + spin_unlock_irq(&slave_active_lock);
2040 + /* release a card refcount for safe disconnection */
2041 + if (timer->card)
2042 + put_device(&timer->card->card_dev);
2043 mutex_unlock(&register_mutex);
2044 }
2045 out:
2046 @@ -441,9 +453,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
2047
2048 spin_lock_irqsave(&slave_active_lock, flags);
2049 timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
2050 - if (timeri->master)
2051 + if (timeri->master && timeri->timer) {
2052 + spin_lock(&timeri->timer->lock);
2053 list_add_tail(&timeri->active_list,
2054 &timeri->master->slave_active_head);
2055 + spin_unlock(&timeri->timer->lock);
2056 + }
2057 spin_unlock_irqrestore(&slave_active_lock, flags);
2058 return 1; /* delayed start */
2059 }
2060 @@ -467,6 +482,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
2061 timer = timeri->timer;
2062 if (timer == NULL)
2063 return -EINVAL;
2064 + if (timer->card && timer->card->shutdown)
2065 + return -ENODEV;
2066 spin_lock_irqsave(&timer->lock, flags);
2067 timeri->ticks = timeri->cticks = ticks;
2068 timeri->pticks = 0;
2069 @@ -489,6 +506,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
2070 if (!keep_flag) {
2071 spin_lock_irqsave(&slave_active_lock, flags);
2072 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
2073 + list_del_init(&timeri->ack_list);
2074 + list_del_init(&timeri->active_list);
2075 spin_unlock_irqrestore(&slave_active_lock, flags);
2076 }
2077 goto __end;
2078 @@ -499,6 +518,10 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
2079 spin_lock_irqsave(&timer->lock, flags);
2080 list_del_init(&timeri->ack_list);
2081 list_del_init(&timeri->active_list);
2082 + if (timer->card && timer->card->shutdown) {
2083 + spin_unlock_irqrestore(&timer->lock, flags);
2084 + return 0;
2085 + }
2086 if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
2087 !(--timer->running)) {
2088 timer->hw.stop(timer);
2089 @@ -561,6 +584,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
2090 timer = timeri->timer;
2091 if (! timer)
2092 return -EINVAL;
2093 + if (timer->card && timer->card->shutdown)
2094 + return -ENODEV;
2095 spin_lock_irqsave(&timer->lock, flags);
2096 if (!timeri->cticks)
2097 timeri->cticks = 1;
2098 @@ -624,6 +649,9 @@ static void snd_timer_tasklet(unsigned long arg)
2099 unsigned long resolution, ticks;
2100 unsigned long flags;
2101
2102 + if (timer->card && timer->card->shutdown)
2103 + return;
2104 +
2105 spin_lock_irqsave(&timer->lock, flags);
2106 /* now process all callbacks */
2107 while (!list_empty(&timer->sack_list_head)) {
2108 @@ -664,6 +692,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
2109 if (timer == NULL)
2110 return;
2111
2112 + if (timer->card && timer->card->shutdown)
2113 + return;
2114 +
2115 spin_lock_irqsave(&timer->lock, flags);
2116
2117 /* remember the current resolution */
2118 @@ -694,7 +725,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
2119 } else {
2120 ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
2121 if (--timer->running)
2122 - list_del(&ti->active_list);
2123 + list_del_init(&ti->active_list);
2124 }
2125 if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
2126 (ti->flags & SNDRV_TIMER_IFLG_FAST))
2127 @@ -874,11 +905,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
2128 return 0;
2129 }
2130
2131 +/* just for reference in snd_timer_dev_disconnect() below */
2132 +static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
2133 + int event, struct timespec *tstamp,
2134 + unsigned long resolution);
2135 +
2136 static int snd_timer_dev_disconnect(struct snd_device *device)
2137 {
2138 struct snd_timer *timer = device->device_data;
2139 + struct snd_timer_instance *ti;
2140 +
2141 mutex_lock(&register_mutex);
2142 list_del_init(&timer->device_list);
2143 + /* wake up pending sleepers */
2144 + list_for_each_entry(ti, &timer->open_list_head, open_list) {
2145 + /* FIXME: better to have a ti.disconnect() op */
2146 + if (ti->ccallback == snd_timer_user_ccallback) {
2147 + struct snd_timer_user *tu = ti->callback_data;
2148 +
2149 + tu->disconnected = true;
2150 + wake_up(&tu->qchange_sleep);
2151 + }
2152 + }
2153 mutex_unlock(&register_mutex);
2154 return 0;
2155 }
2156 @@ -889,6 +937,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
2157 unsigned long resolution = 0;
2158 struct snd_timer_instance *ti, *ts;
2159
2160 + if (timer->card && timer->card->shutdown)
2161 + return;
2162 if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
2163 return;
2164 if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
2165 @@ -1047,6 +1097,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
2166
2167 mutex_lock(&register_mutex);
2168 list_for_each_entry(timer, &snd_timer_list, device_list) {
2169 + if (timer->card && timer->card->shutdown)
2170 + continue;
2171 switch (timer->tmr_class) {
2172 case SNDRV_TIMER_CLASS_GLOBAL:
2173 snd_iprintf(buffer, "G%i: ", timer->tmr_device);
2174 @@ -1253,7 +1305,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
2175 return -ENOMEM;
2176 spin_lock_init(&tu->qlock);
2177 init_waitqueue_head(&tu->qchange_sleep);
2178 - mutex_init(&tu->tread_sem);
2179 + mutex_init(&tu->ioctl_lock);
2180 tu->ticks = 1;
2181 tu->queue_size = 128;
2182 tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
2183 @@ -1273,8 +1325,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
2184 if (file->private_data) {
2185 tu = file->private_data;
2186 file->private_data = NULL;
2187 + mutex_lock(&tu->ioctl_lock);
2188 if (tu->timeri)
2189 snd_timer_close(tu->timeri);
2190 + mutex_unlock(&tu->ioctl_lock);
2191 kfree(tu->queue);
2192 kfree(tu->tqueue);
2193 kfree(tu);
2194 @@ -1512,7 +1566,6 @@ static int snd_timer_user_tselect(struct file *file,
2195 int err = 0;
2196
2197 tu = file->private_data;
2198 - mutex_lock(&tu->tread_sem);
2199 if (tu->timeri) {
2200 snd_timer_close(tu->timeri);
2201 tu->timeri = NULL;
2202 @@ -1556,7 +1609,6 @@ static int snd_timer_user_tselect(struct file *file,
2203 }
2204
2205 __err:
2206 - mutex_unlock(&tu->tread_sem);
2207 return err;
2208 }
2209
2210 @@ -1769,7 +1821,7 @@ enum {
2211 SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
2212 };
2213
2214 -static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
2215 +static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
2216 unsigned long arg)
2217 {
2218 struct snd_timer_user *tu;
2219 @@ -1786,17 +1838,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
2220 {
2221 int xarg;
2222
2223 - mutex_lock(&tu->tread_sem);
2224 - if (tu->timeri) { /* too late */
2225 - mutex_unlock(&tu->tread_sem);
2226 + if (tu->timeri) /* too late */
2227 return -EBUSY;
2228 - }
2229 - if (get_user(xarg, p)) {
2230 - mutex_unlock(&tu->tread_sem);
2231 + if (get_user(xarg, p))
2232 return -EFAULT;
2233 - }
2234 tu->tread = xarg ? 1 : 0;
2235 - mutex_unlock(&tu->tread_sem);
2236 return 0;
2237 }
2238 case SNDRV_TIMER_IOCTL_GINFO:
2239 @@ -1829,6 +1875,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
2240 return -ENOTTY;
2241 }
2242
2243 +static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
2244 + unsigned long arg)
2245 +{
2246 + struct snd_timer_user *tu = file->private_data;
2247 + long ret;
2248 +
2249 + mutex_lock(&tu->ioctl_lock);
2250 + ret = __snd_timer_user_ioctl(file, cmd, arg);
2251 + mutex_unlock(&tu->ioctl_lock);
2252 + return ret;
2253 +}
2254 +
2255 static int snd_timer_user_fasync(int fd, struct file * file, int on)
2256 {
2257 struct snd_timer_user *tu;
2258 @@ -1866,6 +1924,10 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2259
2260 remove_wait_queue(&tu->qchange_sleep, &wait);
2261
2262 + if (tu->disconnected) {
2263 + err = -ENODEV;
2264 + break;
2265 + }
2266 if (signal_pending(current)) {
2267 err = -ERESTARTSYS;
2268 break;
2269 @@ -1915,6 +1977,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
2270 mask = 0;
2271 if (tu->qused)
2272 mask |= POLLIN | POLLRDNORM;
2273 + if (tu->disconnected)
2274 + mask |= POLLERR;
2275
2276 return mask;
2277 }
2278 diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
2279 index 70671ad65d24..6efadbfb3fe3 100644
2280 --- a/sound/pci/hda/hda_bind.c
2281 +++ b/sound/pci/hda/hda_bind.c
2282 @@ -174,14 +174,40 @@ static inline bool codec_probed(struct hda_codec *codec)
2283 return device_attach(hda_codec_dev(codec)) > 0 && codec->preset;
2284 }
2285
2286 -/* try to auto-load and bind the codec module */
2287 -static void codec_bind_module(struct hda_codec *codec)
2288 +/* try to auto-load codec module */
2289 +static void request_codec_module(struct hda_codec *codec)
2290 {
2291 #ifdef MODULE
2292 char modalias[32];
2293 + const char *mod = NULL;
2294 +
2295 + switch (codec->probe_id) {
2296 + case HDA_CODEC_ID_GENERIC_HDMI:
2297 +#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
2298 + mod = "snd-hda-codec-hdmi";
2299 +#endif
2300 + break;
2301 + case HDA_CODEC_ID_GENERIC:
2302 +#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
2303 + mod = "snd-hda-codec-generic";
2304 +#endif
2305 + break;
2306 + default:
2307 + snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
2308 + mod = modalias;
2309 + break;
2310 + }
2311 +
2312 + if (mod)
2313 + request_module(mod);
2314 +#endif /* MODULE */
2315 +}
2316
2317 - snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
2318 - request_module(modalias);
2319 +/* try to auto-load and bind the codec module */
2320 +static void codec_bind_module(struct hda_codec *codec)
2321 +{
2322 +#ifdef MODULE
2323 + request_codec_module(codec);
2324 if (codec_probed(codec))
2325 return;
2326 #endif
2327 @@ -218,17 +244,13 @@ static int codec_bind_generic(struct hda_codec *codec)
2328
2329 if (is_likely_hdmi_codec(codec)) {
2330 codec->probe_id = HDA_CODEC_ID_GENERIC_HDMI;
2331 -#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
2332 - request_module("snd-hda-codec-hdmi");
2333 -#endif
2334 + request_codec_module(codec);
2335 if (codec_probed(codec))
2336 return 0;
2337 }
2338
2339 codec->probe_id = HDA_CODEC_ID_GENERIC;
2340 -#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
2341 - request_module("snd-hda-codec-generic");
2342 -#endif
2343 + request_codec_module(codec);
2344 if (codec_probed(codec))
2345 return 0;
2346 return -ENODEV;
2347 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2348 index 3b3658297070..614baff1f5d7 100644
2349 --- a/sound/pci/hda/hda_intel.c
2350 +++ b/sound/pci/hda/hda_intel.c
2351 @@ -2126,9 +2126,17 @@ i915_power_fail:
2352 static void azx_remove(struct pci_dev *pci)
2353 {
2354 struct snd_card *card = pci_get_drvdata(pci);
2355 + struct azx *chip;
2356 + struct hda_intel *hda;
2357 +
2358 + if (card) {
2359 + /* flush the pending probing work */
2360 + chip = card->private_data;
2361 + hda = container_of(chip, struct hda_intel, chip);
2362 + flush_work(&hda->probe_work);
2363
2364 - if (card)
2365 snd_card_free(card);
2366 + }
2367 }
2368
2369 static void azx_shutdown(struct pci_dev *pci)
2370 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2371 index 3a89d82f8057..33753244f48f 100644
2372 --- a/sound/pci/hda/patch_realtek.c
2373 +++ b/sound/pci/hda/patch_realtek.c
2374 @@ -4666,6 +4666,7 @@ enum {
2375 ALC290_FIXUP_SUBWOOFER,
2376 ALC290_FIXUP_SUBWOOFER_HSJACK,
2377 ALC269_FIXUP_THINKPAD_ACPI,
2378 + ALC269_FIXUP_DMIC_THINKPAD_ACPI,
2379 ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2380 ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
2381 ALC255_FIXUP_HEADSET_MODE,
2382 @@ -5103,6 +5104,12 @@ static const struct hda_fixup alc269_fixups[] = {
2383 .type = HDA_FIXUP_FUNC,
2384 .v.func = hda_fixup_thinkpad_acpi,
2385 },
2386 + [ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
2387 + .type = HDA_FIXUP_FUNC,
2388 + .v.func = alc_fixup_inv_dmic,
2389 + .chained = true,
2390 + .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
2391 + },
2392 [ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
2393 .type = HDA_FIXUP_PINS,
2394 .v.pins = (const struct hda_pintbl[]) {
2395 @@ -5324,6 +5331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2396 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
2397 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
2398 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
2399 + SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X),
2400 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
2401 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
2402 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
2403 @@ -5332,6 +5340,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2404 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2405 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
2406 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
2407 + SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
2408 SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
2409 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
2410 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
2411 @@ -5457,6 +5466,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2412 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
2413 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
2414 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
2415 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
2416 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
2417 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
2418 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2419 @@ -5617,6 +5627,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2420 {0x21, 0x02211040}),
2421 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2422 {0x12, 0x90a60170},
2423 + {0x14, 0x90171130},
2424 + {0x21, 0x02211040}),
2425 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2426 + {0x12, 0x90a60170},
2427 {0x14, 0x90170140},
2428 {0x21, 0x02211050}),
2429 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2430 @@ -6552,6 +6566,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2431 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2432 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
2433 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
2434 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
2435 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
2436 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
2437 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
2438 diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
2439 index c04c0bc6f58a..52b9ccf6d389 100644
2440 --- a/sound/soc/codecs/wm5110.c
2441 +++ b/sound/soc/codecs/wm5110.c
2442 @@ -360,15 +360,13 @@ static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
2443
2444 static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
2445 {
2446 - struct reg_sequence clear_pga = {
2447 - ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
2448 - };
2449 + unsigned int reg = ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4;
2450 int ret;
2451
2452 - ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
2453 + ret = regmap_write(arizona->regmap, reg, 0x80);
2454 if (ret)
2455 dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
2456 - clear_pga.reg, ret);
2457 + reg, ret);
2458
2459 return ret;
2460 }
2461 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
2462 index 12a9820feac1..bb82bb966000 100644
2463 --- a/sound/soc/soc-compress.c
2464 +++ b/sound/soc/soc-compress.c
2465 @@ -630,6 +630,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
2466 struct snd_pcm *be_pcm;
2467 char new_name[64];
2468 int ret = 0, direction = 0;
2469 + int playback = 0, capture = 0;
2470
2471 if (rtd->num_codecs > 1) {
2472 dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
2473 @@ -641,11 +642,27 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
2474 rtd->dai_link->stream_name, codec_dai->name, num);
2475
2476 if (codec_dai->driver->playback.channels_min)
2477 + playback = 1;
2478 + if (codec_dai->driver->capture.channels_min)
2479 + capture = 1;
2480 +
2481 + capture = capture && cpu_dai->driver->capture.channels_min;
2482 + playback = playback && cpu_dai->driver->playback.channels_min;
2483 +
2484 + /*
2485 + * Compress devices are unidirectional so only one of the directions
2486 + * should be set, check for that (xor)
2487 + */
2488 + if (playback + capture != 1) {
2489 + dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
2490 + playback, capture);
2491 + return -EINVAL;
2492 + }
2493 +
2494 + if(playback)
2495 direction = SND_COMPRESS_PLAYBACK;
2496 - else if (codec_dai->driver->capture.channels_min)
2497 - direction = SND_COMPRESS_CAPTURE;
2498 else
2499 - return -EINVAL;
2500 + direction = SND_COMPRESS_CAPTURE;
2501
2502 compr = kzalloc(sizeof(*compr), GFP_KERNEL);
2503 if (compr == NULL) {
2504 diff --git a/sound/usb/card.c b/sound/usb/card.c
2505 index 18f56646ce86..1f09d9591276 100644
2506 --- a/sound/usb/card.c
2507 +++ b/sound/usb/card.c
2508 @@ -675,6 +675,8 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
2509
2510 void snd_usb_autosuspend(struct snd_usb_audio *chip)
2511 {
2512 + if (atomic_read(&chip->shutdown))
2513 + return;
2514 if (atomic_dec_and_test(&chip->active))
2515 usb_autopm_put_interface(chip->pm_intf);
2516 }
2517 diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
2518 index 0ce888dceed0..279025650568 100644
2519 --- a/sound/usb/mixer_quirks.c
2520 +++ b/sound/usb/mixer_quirks.c
2521 @@ -793,7 +793,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
2522 return 0;
2523
2524 kcontrol->private_value &= ~(0xff << 24);
2525 - kcontrol->private_value |= newval;
2526 + kcontrol->private_value |= (unsigned int)newval << 24;
2527 err = snd_ni_update_cur_val(list);
2528 return err < 0 ? err : 1;
2529 }
2530 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2531 index b6c0c8e3b450..23ea6d800c4c 100644
2532 --- a/sound/usb/quirks.c
2533 +++ b/sound/usb/quirks.c
2534 @@ -1269,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
2535 case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
2536 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
2537 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
2538 + case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
2539 if (fp->altsetting == 2)
2540 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
2541 break;