Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0116-4.1.17-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2761 - (show annotations) (download)
Fri Feb 12 13:40:38 2016 UTC (8 years, 2 months ago) by niro
File size: 162185 byte(s)
-linux-4.1.17
1 diff --git a/Makefile b/Makefile
2 index 7609f1dcdcb9..d398dd440bc9 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 1
8 -SUBLEVEL = 16
9 +SUBLEVEL = 17
10 EXTRAVERSION =
11 NAME = Series 4800
12
13 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
14 index 191dcfab9f60..da09ddcfcc00 100644
15 --- a/arch/arm/kvm/mmu.c
16 +++ b/arch/arm/kvm/mmu.c
17 @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
18 __kvm_flush_dcache_pud(pud);
19 }
20
21 +static bool kvm_is_device_pfn(unsigned long pfn)
22 +{
23 + return !pfn_valid(pfn);
24 +}
25 +
26 /**
27 * stage2_dissolve_pmd() - clear and flush huge PMD entry
28 * @kvm: pointer to kvm structure.
29 @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
30 kvm_tlb_flush_vmid_ipa(kvm, addr);
31
32 /* No need to invalidate the cache for device mappings */
33 - if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
34 + if (!kvm_is_device_pfn(pte_pfn(old_pte)))
35 kvm_flush_dcache_pte(old_pte);
36
37 put_page(virt_to_page(pte));
38 @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
39
40 pte = pte_offset_kernel(pmd, addr);
41 do {
42 - if (!pte_none(*pte) &&
43 - (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
44 + if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
45 kvm_flush_dcache_pte(*pte);
46 } while (pte++, addr += PAGE_SIZE, addr != end);
47 }
48 @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
49 return kvm_vcpu_dabt_iswrite(vcpu);
50 }
51
52 -static bool kvm_is_device_pfn(unsigned long pfn)
53 -{
54 - return !pfn_valid(pfn);
55 -}
56 -
57 /**
58 * stage2_wp_ptes - write protect PMD range
59 * @pmd: pointer to pmd entry
60 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
61 index e0e23582c8b4..5fe949b084ac 100644
62 --- a/arch/arm/net/bpf_jit_32.c
63 +++ b/arch/arm/net/bpf_jit_32.c
64 @@ -162,19 +162,6 @@ static inline int mem_words_used(struct jit_ctx *ctx)
65 return fls(ctx->seen & SEEN_MEM);
66 }
67
68 -static inline bool is_load_to_a(u16 inst)
69 -{
70 - switch (inst) {
71 - case BPF_LD | BPF_W | BPF_LEN:
72 - case BPF_LD | BPF_W | BPF_ABS:
73 - case BPF_LD | BPF_H | BPF_ABS:
74 - case BPF_LD | BPF_B | BPF_ABS:
75 - return true;
76 - default:
77 - return false;
78 - }
79 -}
80 -
81 static void jit_fill_hole(void *area, unsigned int size)
82 {
83 u32 *ptr;
84 @@ -186,7 +173,6 @@ static void jit_fill_hole(void *area, unsigned int size)
85 static void build_prologue(struct jit_ctx *ctx)
86 {
87 u16 reg_set = saved_regs(ctx);
88 - u16 first_inst = ctx->skf->insns[0].code;
89 u16 off;
90
91 #ifdef CONFIG_FRAME_POINTER
92 @@ -216,7 +202,7 @@ static void build_prologue(struct jit_ctx *ctx)
93 emit(ARM_MOV_I(r_X, 0), ctx);
94
95 /* do not leak kernel data to userspace */
96 - if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
97 + if (bpf_needs_clear_a(&ctx->skf->insns[0]))
98 emit(ARM_MOV_I(r_A, 0), ctx);
99
100 /* stack space for the BPF_MEM words */
101 diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
102 index 17e92f05b1fe..3ca894ecf699 100644
103 --- a/arch/arm64/include/asm/kvm_emulate.h
104 +++ b/arch/arm64/include/asm/kvm_emulate.h
105 @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
106 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
107 }
108
109 +/*
110 + * vcpu_reg should always be passed a register number coming from a
111 + * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
112 + * with banked registers.
113 + */
114 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
115 {
116 - if (vcpu_mode_is_32bit(vcpu))
117 - return vcpu_reg32(vcpu, reg_num);
118 -
119 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
120 }
121
122 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
123 index d882b833dbdb..608ac6aa497b 100644
124 --- a/arch/arm64/kernel/ptrace.c
125 +++ b/arch/arm64/kernel/ptrace.c
126 @@ -58,6 +58,12 @@
127 */
128 void ptrace_disable(struct task_struct *child)
129 {
130 + /*
131 + * This would be better off in core code, but PTRACE_DETACH has
132 + * grown its fair share of arch-specific worts and changing it
133 + * is likely to cause regressions on obscure architectures.
134 + */
135 + user_disable_single_step(child);
136 }
137
138 #ifdef CONFIG_HAVE_HW_BREAKPOINT
139 diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
140 index 74753132c3ac..bbdb53b87e13 100644
141 --- a/arch/arm64/kernel/setup.c
142 +++ b/arch/arm64/kernel/setup.c
143 @@ -523,6 +523,10 @@ static int c_show(struct seq_file *m, void *v)
144 seq_printf(m, "processor\t: %d\n", i);
145 #endif
146
147 + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
148 + loops_per_jiffy / (500000UL/HZ),
149 + loops_per_jiffy / (5000UL/HZ) % 100);
150 +
151 /*
152 * Dump out the common processor features in a single line.
153 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
154 diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
155 index 53f1f8dccf6c..357418137db7 100644
156 --- a/arch/arm64/kernel/suspend.c
157 +++ b/arch/arm64/kernel/suspend.c
158 @@ -1,3 +1,4 @@
159 +#include <linux/ftrace.h>
160 #include <linux/percpu.h>
161 #include <linux/slab.h>
162 #include <asm/cacheflush.h>
163 @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
164 local_dbg_save(flags);
165
166 /*
167 + * Function graph tracer state gets incosistent when the kernel
168 + * calls functions that never return (aka suspend finishers) hence
169 + * disable graph tracing during their execution.
170 + */
171 + pause_graph_tracing();
172 +
173 + /*
174 * mm context saved on the stack, it will be restored when
175 * the cpu comes out of reset through the identity mapped
176 * page tables, so that the thread address space is properly
177 @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
178 hw_breakpoint_restore(NULL);
179 }
180
181 + unpause_graph_tracing();
182 +
183 /*
184 * Restore pstate flags. OS lock and mdscr have been already
185 * restored, so from this point onwards, debugging is fully
186 diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
187 index 85c57158dcd9..648112e90ed5 100644
188 --- a/arch/arm64/kvm/inject_fault.c
189 +++ b/arch/arm64/kvm/inject_fault.c
190 @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
191
192 /* Note: These now point to the banked copies */
193 *vcpu_spsr(vcpu) = new_spsr_value;
194 - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
195 + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
196
197 /* Branch to exception vector */
198 if (sctlr & (1 << 13))
199 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
200 index 5b8b664422d3..cb34eb8bbb9d 100644
201 --- a/arch/arm64/mm/mmu.c
202 +++ b/arch/arm64/mm/mmu.c
203 @@ -450,6 +450,9 @@ void __init paging_init(void)
204
205 empty_zero_page = virt_to_page(zero_page);
206
207 + /* Ensure the zero page is visible to the page table walker */
208 + dsb(ishst);
209 +
210 /*
211 * TTBR0 is only used for the identity mapping at this stage. Make it
212 * point to zero page to avoid speculatively fetching new entries.
213 diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
214 index 98a26ce82d26..aee5637ea436 100644
215 --- a/arch/arm64/net/bpf_jit.h
216 +++ b/arch/arm64/net/bpf_jit.h
217 @@ -1,7 +1,7 @@
218 /*
219 * BPF JIT compiler for ARM64
220 *
221 - * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
222 + * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
223 *
224 * This program is free software; you can redistribute it and/or modify
225 * it under the terms of the GNU General Public License version 2 as
226 @@ -35,6 +35,7 @@
227 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
228 AARCH64_INSN_BRANCH_COMP_##type)
229 #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
230 +#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
231
232 /* Conditional branch (immediate) */
233 #define A64_COND_BRANCH(cond, offset) \
234 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
235 index c047598b09e0..6217f80702d2 100644
236 --- a/arch/arm64/net/bpf_jit_comp.c
237 +++ b/arch/arm64/net/bpf_jit_comp.c
238 @@ -1,7 +1,7 @@
239 /*
240 * BPF JIT compiler for ARM64
241 *
242 - * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
243 + * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
244 *
245 * This program is free software; you can redistribute it and/or modify
246 * it under the terms of the GNU General Public License version 2 as
247 @@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
248 u8 jmp_cond;
249 s32 jmp_offset;
250
251 +#define check_imm(bits, imm) do { \
252 + if ((((imm) > 0) && ((imm) >> (bits))) || \
253 + (((imm) < 0) && (~(imm) >> (bits)))) { \
254 + pr_info("[%2d] imm=%d(0x%x) out of range\n", \
255 + i, imm, imm); \
256 + return -EINVAL; \
257 + } \
258 +} while (0)
259 +#define check_imm19(imm) check_imm(19, imm)
260 +#define check_imm26(imm) check_imm(26, imm)
261 +
262 switch (code) {
263 /* dst = src */
264 case BPF_ALU | BPF_MOV | BPF_X:
265 @@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
266 break;
267 case BPF_ALU | BPF_DIV | BPF_X:
268 case BPF_ALU64 | BPF_DIV | BPF_X:
269 - emit(A64_UDIV(is64, dst, dst, src), ctx);
270 - break;
271 case BPF_ALU | BPF_MOD | BPF_X:
272 case BPF_ALU64 | BPF_MOD | BPF_X:
273 - ctx->tmp_used = 1;
274 - emit(A64_UDIV(is64, tmp, dst, src), ctx);
275 - emit(A64_MUL(is64, tmp, tmp, src), ctx);
276 - emit(A64_SUB(is64, dst, dst, tmp), ctx);
277 + {
278 + const u8 r0 = bpf2a64[BPF_REG_0];
279 +
280 + /* if (src == 0) return 0 */
281 + jmp_offset = 3; /* skip ahead to else path */
282 + check_imm19(jmp_offset);
283 + emit(A64_CBNZ(is64, src, jmp_offset), ctx);
284 + emit(A64_MOVZ(1, r0, 0, 0), ctx);
285 + jmp_offset = epilogue_offset(ctx);
286 + check_imm26(jmp_offset);
287 + emit(A64_B(jmp_offset), ctx);
288 + /* else */
289 + switch (BPF_OP(code)) {
290 + case BPF_DIV:
291 + emit(A64_UDIV(is64, dst, dst, src), ctx);
292 + break;
293 + case BPF_MOD:
294 + ctx->tmp_used = 1;
295 + emit(A64_UDIV(is64, tmp, dst, src), ctx);
296 + emit(A64_MUL(is64, tmp, tmp, src), ctx);
297 + emit(A64_SUB(is64, dst, dst, tmp), ctx);
298 + break;
299 + }
300 break;
301 + }
302 case BPF_ALU | BPF_LSH | BPF_X:
303 case BPF_ALU64 | BPF_LSH | BPF_X:
304 emit(A64_LSLV(is64, dst, dst, src), ctx);
305 @@ -393,17 +422,6 @@ emit_bswap_uxt:
306 emit(A64_ASR(is64, dst, dst, imm), ctx);
307 break;
308
309 -#define check_imm(bits, imm) do { \
310 - if ((((imm) > 0) && ((imm) >> (bits))) || \
311 - (((imm) < 0) && (~(imm) >> (bits)))) { \
312 - pr_info("[%2d] imm=%d(0x%x) out of range\n", \
313 - i, imm, imm); \
314 - return -EINVAL; \
315 - } \
316 -} while (0)
317 -#define check_imm19(imm) check_imm(19, imm)
318 -#define check_imm26(imm) check_imm(26, imm)
319 -
320 /* JUMP off */
321 case BPF_JMP | BPF_JA:
322 jmp_offset = bpf2a64_offset(i + off, i, ctx);
323 diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
324 index e23fdf2a9c80..d6d27d51d131 100644
325 --- a/arch/mips/net/bpf_jit.c
326 +++ b/arch/mips/net/bpf_jit.c
327 @@ -556,19 +556,6 @@ static inline u16 align_sp(unsigned int num)
328 return num;
329 }
330
331 -static bool is_load_to_a(u16 inst)
332 -{
333 - switch (inst) {
334 - case BPF_LD | BPF_W | BPF_LEN:
335 - case BPF_LD | BPF_W | BPF_ABS:
336 - case BPF_LD | BPF_H | BPF_ABS:
337 - case BPF_LD | BPF_B | BPF_ABS:
338 - return true;
339 - default:
340 - return false;
341 - }
342 -}
343 -
344 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
345 {
346 int i = 0, real_off = 0;
347 @@ -686,7 +673,6 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
348
349 static void build_prologue(struct jit_ctx *ctx)
350 {
351 - u16 first_inst = ctx->skf->insns[0].code;
352 int sp_off;
353
354 /* Calculate the total offset for the stack pointer */
355 @@ -700,7 +686,7 @@ static void build_prologue(struct jit_ctx *ctx)
356 emit_jit_reg_move(r_X, r_zero, ctx);
357
358 /* Do not leak kernel data to userspace */
359 - if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
360 + if (bpf_needs_clear_a(&ctx->skf->insns[0]))
361 emit_jit_reg_move(r_A, r_zero, ctx);
362 }
363
364 diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
365 index 4434b54e1d87..78ae5552fdb8 100644
366 --- a/arch/mn10300/Kconfig
367 +++ b/arch/mn10300/Kconfig
368 @@ -1,6 +1,7 @@
369 config MN10300
370 def_bool y
371 select HAVE_OPROFILE
372 + select HAVE_UID16
373 select GENERIC_IRQ_SHOW
374 select ARCH_WANT_IPC_PARSE_VERSION
375 select HAVE_ARCH_TRACEHOOK
376 @@ -37,9 +38,6 @@ config HIGHMEM
377 config NUMA
378 def_bool n
379
380 -config UID16
381 - def_bool y
382 -
383 config RWSEM_GENERIC_SPINLOCK
384 def_bool y
385
386 diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
387 index d463c68fe7f0..99897f6645c1 100644
388 --- a/arch/powerpc/include/asm/cmpxchg.h
389 +++ b/arch/powerpc/include/asm/cmpxchg.h
390 @@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
391 unsigned long prev;
392
393 __asm__ __volatile__(
394 - PPC_RELEASE_BARRIER
395 + PPC_ATOMIC_ENTRY_BARRIER
396 "1: lwarx %0,0,%2 \n"
397 PPC405_ERR77(0,%2)
398 " stwcx. %3,0,%2 \n\
399 bne- 1b"
400 - PPC_ACQUIRE_BARRIER
401 + PPC_ATOMIC_EXIT_BARRIER
402 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
403 : "r" (p), "r" (val)
404 : "cc", "memory");
405 @@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
406 unsigned long prev;
407
408 __asm__ __volatile__(
409 - PPC_RELEASE_BARRIER
410 + PPC_ATOMIC_ENTRY_BARRIER
411 "1: ldarx %0,0,%2 \n"
412 PPC405_ERR77(0,%2)
413 " stdcx. %3,0,%2 \n\
414 bne- 1b"
415 - PPC_ACQUIRE_BARRIER
416 + PPC_ATOMIC_EXIT_BARRIER
417 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
418 : "r" (p), "r" (val)
419 : "cc", "memory");
420 @@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
421 unsigned int prev;
422
423 __asm__ __volatile__ (
424 - PPC_RELEASE_BARRIER
425 + PPC_ATOMIC_ENTRY_BARRIER
426 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
427 cmpw 0,%0,%3\n\
428 bne- 2f\n"
429 PPC405_ERR77(0,%2)
430 " stwcx. %4,0,%2\n\
431 bne- 1b"
432 - PPC_ACQUIRE_BARRIER
433 + PPC_ATOMIC_EXIT_BARRIER
434 "\n\
435 2:"
436 : "=&r" (prev), "+m" (*p)
437 @@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
438 unsigned long prev;
439
440 __asm__ __volatile__ (
441 - PPC_RELEASE_BARRIER
442 + PPC_ATOMIC_ENTRY_BARRIER
443 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
444 cmpd 0,%0,%3\n\
445 bne- 2f\n\
446 stdcx. %4,0,%2\n\
447 bne- 1b"
448 - PPC_ACQUIRE_BARRIER
449 + PPC_ATOMIC_EXIT_BARRIER
450 "\n\
451 2:"
452 : "=&r" (prev), "+m" (*p)
453 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
454 index af56b5c6c81a..f4f99f01b746 100644
455 --- a/arch/powerpc/include/asm/reg.h
456 +++ b/arch/powerpc/include/asm/reg.h
457 @@ -108,6 +108,7 @@
458 #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
459 #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
460 #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
461 +#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
462 #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
463 #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
464
465 diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
466 index e682a7143edb..c50868681f9e 100644
467 --- a/arch/powerpc/include/asm/synch.h
468 +++ b/arch/powerpc/include/asm/synch.h
469 @@ -44,7 +44,7 @@ static inline void isync(void)
470 MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
471 #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
472 #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
473 -#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
474 +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
475 #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
476 #else
477 #define PPC_ACQUIRE_BARRIER
478 diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
479 index 59dad113897b..c2d21d11c2d2 100644
480 --- a/arch/powerpc/include/uapi/asm/elf.h
481 +++ b/arch/powerpc/include/uapi/asm/elf.h
482 @@ -295,6 +295,8 @@ do { \
483 #define R_PPC64_TLSLD 108
484 #define R_PPC64_TOCSAVE 109
485
486 +#define R_PPC64_ENTRY 118
487 +
488 #define R_PPC64_REL16 249
489 #define R_PPC64_REL16_LO 250
490 #define R_PPC64_REL16_HI 251
491 diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
492 index 68384514506b..59663af9315f 100644
493 --- a/arch/powerpc/kernel/module_64.c
494 +++ b/arch/powerpc/kernel/module_64.c
495 @@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
496 */
497 break;
498
499 + case R_PPC64_ENTRY:
500 + /*
501 + * Optimize ELFv2 large code model entry point if
502 + * the TOC is within 2GB range of current location.
503 + */
504 + value = my_r2(sechdrs, me) - (unsigned long)location;
505 + if (value + 0x80008000 > 0xffffffff)
506 + break;
507 + /*
508 + * Check for the large code model prolog sequence:
509 + * ld r2, ...(r12)
510 + * add r2, r2, r12
511 + */
512 + if ((((uint32_t *)location)[0] & ~0xfffc)
513 + != 0xe84c0000)
514 + break;
515 + if (((uint32_t *)location)[1] != 0x7c426214)
516 + break;
517 + /*
518 + * If found, replace it with:
519 + * addis r2, r12, (.TOC.-func)@ha
520 + * addi r2, r12, (.TOC.-func)@l
521 + */
522 + ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
523 + ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
524 + break;
525 +
526 case R_PPC64_REL16_HA:
527 /* Subtract location pointer */
528 value -= (unsigned long)location;
529 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
530 index 0596373cd1c3..c8c8275765e7 100644
531 --- a/arch/powerpc/kernel/process.c
532 +++ b/arch/powerpc/kernel/process.c
533 @@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
534 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
535 }
536
537 + /*
538 + * Use the current MSR TM suspended bit to track if we have
539 + * checkpointed state outstanding.
540 + * On signal delivery, we'd normally reclaim the checkpointed
541 + * state to obtain stack pointer (see:get_tm_stackpointer()).
542 + * This will then directly return to userspace without going
543 + * through __switch_to(). However, if the stack frame is bad,
544 + * we need to exit this thread which calls __switch_to() which
545 + * will again attempt to reclaim the already saved tm state.
546 + * Hence we need to check that we've not already reclaimed
547 + * this state.
548 + * We do this using the current MSR, rather tracking it in
549 + * some specific thread_struct bit, as it has the additional
550 + * benifit of checking for a potential TM bad thing exception.
551 + */
552 + if (!MSR_TM_SUSPENDED(mfmsr()))
553 + return;
554 +
555 tm_reclaim(thr, thr->regs->msr, cause);
556
557 /* Having done the reclaim, we now have the checkpointed
558 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
559 index da50e0c9c57e..7356c33dc897 100644
560 --- a/arch/powerpc/kernel/signal_32.c
561 +++ b/arch/powerpc/kernel/signal_32.c
562 @@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
563 return 1;
564 #endif /* CONFIG_SPE */
565
566 + /* Get the top half of the MSR from the user context */
567 + if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
568 + return 1;
569 + msr_hi <<= 32;
570 + /* If TM bits are set to the reserved value, it's an invalid context */
571 + if (MSR_TM_RESV(msr_hi))
572 + return 1;
573 + /* Pull in the MSR TM bits from the user context */
574 + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
575 /* Now, recheckpoint. This loads up all of the checkpointed (older)
576 * registers, including FP and V[S]Rs. After recheckpointing, the
577 * transactional versions should be loaded.
578 @@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
579 current->thread.tm_texasr |= TEXASR_FS;
580 /* This loads the checkpointed FP/VEC state, if used */
581 tm_recheckpoint(&current->thread, msr);
582 - /* Get the top half of the MSR */
583 - if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
584 - return 1;
585 - /* Pull in MSR TM from user context */
586 - regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
587
588 /* This loads the speculative FP/VEC state, if used */
589 if (msr & MSR_FP) {
590 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
591 index c7c24d2e2bdb..164fd6474843 100644
592 --- a/arch/powerpc/kernel/signal_64.c
593 +++ b/arch/powerpc/kernel/signal_64.c
594 @@ -427,6 +427,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
595
596 /* get MSR separately, transfer the LE bit if doing signal return */
597 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
598 + /* Don't allow reserved mode. */
599 + if (MSR_TM_RESV(msr))
600 + return -EINVAL;
601 +
602 /* pull in MSR TM from user context */
603 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
604
605 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
606 index f1e0e5522e3a..f5b3de7f7fa2 100644
607 --- a/arch/powerpc/kvm/book3s_hv.c
608 +++ b/arch/powerpc/kvm/book3s_hv.c
609 @@ -210,6 +210,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
610
611 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
612 {
613 + /*
614 + * Check for illegal transactional state bit combination
615 + * and if we find it, force the TS field to a safe state.
616 + */
617 + if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
618 + msr &= ~MSR_TS_MASK;
619 vcpu->arch.shregs.msr = msr;
620 kvmppc_end_cede(vcpu);
621 }
622 diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
623 index 17cea18a09d3..264c473c1b3c 100644
624 --- a/arch/powerpc/net/bpf_jit_comp.c
625 +++ b/arch/powerpc/net/bpf_jit_comp.c
626 @@ -78,18 +78,9 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
627 PPC_LI(r_X, 0);
628 }
629
630 - switch (filter[0].code) {
631 - case BPF_RET | BPF_K:
632 - case BPF_LD | BPF_W | BPF_LEN:
633 - case BPF_LD | BPF_W | BPF_ABS:
634 - case BPF_LD | BPF_H | BPF_ABS:
635 - case BPF_LD | BPF_B | BPF_ABS:
636 - /* first instruction sets A register (or is RET 'constant') */
637 - break;
638 - default:
639 - /* make sure we dont leak kernel information to user */
640 + /* make sure we dont leak kernel information to user */
641 + if (bpf_needs_clear_a(&filter[0]))
642 PPC_LI(r_A, 0);
643 - }
644 }
645
646 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
647 diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
648 index 2241565b0739..b831a2ee32e9 100644
649 --- a/arch/powerpc/platforms/powernv/opal.c
650 +++ b/arch/powerpc/platforms/powernv/opal.c
651 @@ -358,7 +358,7 @@ static void opal_handle_message(void)
652
653 /* Sanity check */
654 if (type >= OPAL_MSG_TYPE_MAX) {
655 - pr_warning("%s: Unknown message type: %u\n", __func__, type);
656 + pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
657 return;
658 }
659 opal_message_do_notify(type, (void *)&msg);
660 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
661 index 7931eeeb649a..8109e92cd619 100644
662 --- a/arch/sparc/net/bpf_jit_comp.c
663 +++ b/arch/sparc/net/bpf_jit_comp.c
664 @@ -420,22 +420,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
665 }
666 emit_reg_move(O7, r_saved_O7);
667
668 - switch (filter[0].code) {
669 - case BPF_RET | BPF_K:
670 - case BPF_LD | BPF_W | BPF_LEN:
671 - case BPF_LD | BPF_W | BPF_ABS:
672 - case BPF_LD | BPF_H | BPF_ABS:
673 - case BPF_LD | BPF_B | BPF_ABS:
674 - /* The first instruction sets the A register (or is
675 - * a "RET 'constant'")
676 - */
677 - break;
678 - default:
679 - /* Make sure we dont leak kernel information to the
680 - * user.
681 - */
682 + /* Make sure we dont leak kernel information to the user. */
683 + if (bpf_needs_clear_a(&filter[0]))
684 emit_clear(r_A); /* A = 0 */
685 - }
686
687 for (i = 0; i < flen; i++) {
688 unsigned int K = filter[i].k;
689 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
690 index 4fa687a47a62..6b8d6e8cd449 100644
691 --- a/arch/x86/include/asm/boot.h
692 +++ b/arch/x86/include/asm/boot.h
693 @@ -27,7 +27,7 @@
694 #define BOOT_HEAP_SIZE 0x400000
695 #else /* !CONFIG_KERNEL_BZIP2 */
696
697 -#define BOOT_HEAP_SIZE 0x8000
698 +#define BOOT_HEAP_SIZE 0x10000
699
700 #endif /* !CONFIG_KERNEL_BZIP2 */
701
702 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
703 index 80d67dd80351..73e38f14ddeb 100644
704 --- a/arch/x86/include/asm/mmu_context.h
705 +++ b/arch/x86/include/asm/mmu_context.h
706 @@ -104,8 +104,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
707 #endif
708 cpumask_set_cpu(cpu, mm_cpumask(next));
709
710 - /* Re-load page tables */
711 + /*
712 + * Re-load page tables.
713 + *
714 + * This logic has an ordering constraint:
715 + *
716 + * CPU 0: Write to a PTE for 'next'
717 + * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
718 + * CPU 1: set bit 1 in next's mm_cpumask
719 + * CPU 1: load from the PTE that CPU 0 writes (implicit)
720 + *
721 + * We need to prevent an outcome in which CPU 1 observes
722 + * the new PTE value and CPU 0 observes bit 1 clear in
723 + * mm_cpumask. (If that occurs, then the IPI will never
724 + * be sent, and CPU 0's TLB will contain a stale entry.)
725 + *
726 + * The bad outcome can occur if either CPU's load is
727 + * reordered before that CPU's store, so both CPUs must
728 + * execute full barriers to prevent this from happening.
729 + *
730 + * Thus, switch_mm needs a full barrier between the
731 + * store to mm_cpumask and any operation that could load
732 + * from next->pgd. TLB fills are special and can happen
733 + * due to instruction fetches or for no reason at all,
734 + * and neither LOCK nor MFENCE orders them.
735 + * Fortunately, load_cr3() is serializing and gives the
736 + * ordering guarantee we need.
737 + *
738 + */
739 load_cr3(next->pgd);
740 +
741 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
742
743 /* Stop flush ipis for the previous mm */
744 @@ -142,10 +170,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
745 * schedule, protecting us from simultaneous changes.
746 */
747 cpumask_set_cpu(cpu, mm_cpumask(next));
748 +
749 /*
750 * We were in lazy tlb mode and leave_mm disabled
751 * tlb flush IPI delivery. We must reload CR3
752 * to make sure to use no freed page tables.
753 + *
754 + * As above, load_cr3() is serializing and orders TLB
755 + * fills with respect to the mm_cpumask write.
756 */
757 load_cr3(next->pgd);
758 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
759 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
760 index 86db4bcd7ce5..0549ae3cb332 100644
761 --- a/arch/x86/kernel/reboot.c
762 +++ b/arch/x86/kernel/reboot.c
763 @@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
764 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
765 },
766 },
767 + { /* Handle problems with rebooting on the iMac10,1. */
768 + .callback = set_pci_reboot,
769 + .ident = "Apple iMac10,1",
770 + .matches = {
771 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
772 + DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
773 + },
774 + },
775
776 /* ASRock */
777 { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
778 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
779 index e0fd5f47fbb9..5d2e2e9af1c4 100644
780 --- a/arch/x86/kernel/signal.c
781 +++ b/arch/x86/kernel/signal.c
782 @@ -667,12 +667,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
783 signal_setup_done(failed, ksig, stepping);
784 }
785
786 -#ifdef CONFIG_X86_32
787 -#define NR_restart_syscall __NR_restart_syscall
788 -#else /* !CONFIG_X86_32 */
789 -#define NR_restart_syscall \
790 - test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
791 -#endif /* CONFIG_X86_32 */
792 +static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
793 +{
794 +#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
795 + return __NR_restart_syscall;
796 +#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
797 + return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
798 + __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
799 +#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
800 +}
801
802 /*
803 * Note that 'init' is a special process: it doesn't get signals it doesn't
804 @@ -701,7 +704,7 @@ static void do_signal(struct pt_regs *regs)
805 break;
806
807 case -ERESTART_RESTARTBLOCK:
808 - regs->ax = NR_restart_syscall;
809 + regs->ax = get_nr_restart_syscall(regs);
810 regs->ip -= 2;
811 break;
812 }
813 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
814 index 454ccb082e18..0d039cd268a8 100644
815 --- a/arch/x86/kvm/svm.c
816 +++ b/arch/x86/kvm/svm.c
817 @@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *svm)
818 set_exception_intercept(svm, UD_VECTOR);
819 set_exception_intercept(svm, MC_VECTOR);
820 set_exception_intercept(svm, AC_VECTOR);
821 + set_exception_intercept(svm, DB_VECTOR);
822
823 set_intercept(svm, INTERCEPT_INTR);
824 set_intercept(svm, INTERCEPT_NMI);
825 @@ -1638,20 +1639,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
826 mark_dirty(svm->vmcb, VMCB_SEG);
827 }
828
829 -static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
830 +static void update_bp_intercept(struct kvm_vcpu *vcpu)
831 {
832 struct vcpu_svm *svm = to_svm(vcpu);
833
834 - clr_exception_intercept(svm, DB_VECTOR);
835 clr_exception_intercept(svm, BP_VECTOR);
836
837 - if (svm->nmi_singlestep)
838 - set_exception_intercept(svm, DB_VECTOR);
839 -
840 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
841 - if (vcpu->guest_debug &
842 - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
843 - set_exception_intercept(svm, DB_VECTOR);
844 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
845 set_exception_intercept(svm, BP_VECTOR);
846 } else
847 @@ -1757,7 +1751,6 @@ static int db_interception(struct vcpu_svm *svm)
848 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
849 svm->vmcb->save.rflags &=
850 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
851 - update_db_bp_intercept(&svm->vcpu);
852 }
853
854 if (svm->vcpu.guest_debug &
855 @@ -3751,7 +3744,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
856 */
857 svm->nmi_singlestep = true;
858 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
859 - update_db_bp_intercept(vcpu);
860 }
861
862 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
863 @@ -4367,7 +4359,7 @@ static struct kvm_x86_ops svm_x86_ops = {
864 .vcpu_load = svm_vcpu_load,
865 .vcpu_put = svm_vcpu_put,
866
867 - .update_db_bp_intercept = update_db_bp_intercept,
868 + .update_db_bp_intercept = update_bp_intercept,
869 .get_msr = svm_get_msr,
870 .set_msr = svm_set_msr,
871 .get_segment_base = svm_get_segment_base,
872 diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
873 index 7c7bc8bef21f..21dda139eb3a 100644
874 --- a/arch/x86/kvm/trace.h
875 +++ b/arch/x86/kvm/trace.h
876 @@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
877 #define kvm_trace_sym_exc \
878 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
879 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
880 - EXS(MF), EXS(MC)
881 + EXS(MF), EXS(AC), EXS(MC)
882
883 /*
884 * Tracepoint for kvm interrupt injection:
885 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
886 index a243854c35d5..945f9e13f1aa 100644
887 --- a/arch/x86/kvm/vmx.c
888 +++ b/arch/x86/kvm/vmx.c
889 @@ -3652,20 +3652,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
890 if (!is_paging(vcpu)) {
891 hw_cr4 &= ~X86_CR4_PAE;
892 hw_cr4 |= X86_CR4_PSE;
893 - /*
894 - * SMEP/SMAP is disabled if CPU is in non-paging mode
895 - * in hardware. However KVM always uses paging mode to
896 - * emulate guest non-paging mode with TDP.
897 - * To emulate this behavior, SMEP/SMAP needs to be
898 - * manually disabled when guest switches to non-paging
899 - * mode.
900 - */
901 - hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
902 } else if (!(cr4 & X86_CR4_PAE)) {
903 hw_cr4 &= ~X86_CR4_PAE;
904 }
905 }
906
907 + if (!enable_unrestricted_guest && !is_paging(vcpu))
908 + /*
909 + * SMEP/SMAP is disabled if CPU is in non-paging mode in
910 + * hardware. However KVM always uses paging mode without
911 + * unrestricted guest.
912 + * To emulate this behavior, SMEP/SMAP needs to be manually
913 + * disabled when guest switches to non-paging mode.
914 + */
915 + hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
916 +
917 vmcs_writel(CR4_READ_SHADOW, cr4);
918 vmcs_writel(GUEST_CR4, hw_cr4);
919 return 0;
920 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
921 index 47a32f743a91..fed4c84eac44 100644
922 --- a/arch/x86/kvm/x86.c
923 +++ b/arch/x86/kvm/x86.c
924 @@ -940,7 +940,7 @@ static u32 msrs_to_save[] = {
925 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
926 #endif
927 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
928 - MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
929 + MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
930 };
931
932 static unsigned num_msrs_to_save;
933 @@ -4117,16 +4117,17 @@ static void kvm_init_msr_list(void)
934
935 /*
936 * Even MSRs that are valid in the host may not be exposed
937 - * to the guests in some cases. We could work around this
938 - * in VMX with the generic MSR save/load machinery, but it
939 - * is not really worthwhile since it will really only
940 - * happen with nested virtualization.
941 + * to the guests in some cases.
942 */
943 switch (msrs_to_save[i]) {
944 case MSR_IA32_BNDCFGS:
945 if (!kvm_x86_ops->mpx_supported())
946 continue;
947 break;
948 + case MSR_TSC_AUX:
949 + if (!kvm_x86_ops->rdtscp_supported())
950 + continue;
951 + break;
952 default:
953 break;
954 }
955 diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
956 index 4d1c11c07fe1..f738c61bc891 100644
957 --- a/arch/x86/mm/mpx.c
958 +++ b/arch/x86/mm/mpx.c
959 @@ -120,19 +120,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
960 switch (type) {
961 case REG_TYPE_RM:
962 regno = X86_MODRM_RM(insn->modrm.value);
963 - if (X86_REX_B(insn->rex_prefix.value) == 1)
964 + if (X86_REX_B(insn->rex_prefix.value))
965 regno += 8;
966 break;
967
968 case REG_TYPE_INDEX:
969 regno = X86_SIB_INDEX(insn->sib.value);
970 - if (X86_REX_X(insn->rex_prefix.value) == 1)
971 + if (X86_REX_X(insn->rex_prefix.value))
972 regno += 8;
973 break;
974
975 case REG_TYPE_BASE:
976 regno = X86_SIB_BASE(insn->sib.value);
977 - if (X86_REX_B(insn->rex_prefix.value) == 1)
978 + if (X86_REX_B(insn->rex_prefix.value))
979 regno += 8;
980 break;
981
982 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
983 index 90b924acd982..061e0114005e 100644
984 --- a/arch/x86/mm/tlb.c
985 +++ b/arch/x86/mm/tlb.c
986 @@ -160,7 +160,10 @@ void flush_tlb_current_task(void)
987 preempt_disable();
988
989 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
990 +
991 + /* This is an implicit full barrier that synchronizes with switch_mm. */
992 local_flush_tlb();
993 +
994 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
995 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
996 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
997 @@ -187,17 +190,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
998 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
999
1000 preempt_disable();
1001 - if (current->active_mm != mm)
1002 + if (current->active_mm != mm) {
1003 + /* Synchronize with switch_mm. */
1004 + smp_mb();
1005 +
1006 goto out;
1007 + }
1008
1009 if (!current->mm) {
1010 leave_mm(smp_processor_id());
1011 +
1012 + /* Synchronize with switch_mm. */
1013 + smp_mb();
1014 +
1015 goto out;
1016 }
1017
1018 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
1019 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
1020
1021 + /*
1022 + * Both branches below are implicit full barriers (MOV to CR or
1023 + * INVLPG) that synchronize with switch_mm.
1024 + */
1025 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
1026 base_pages_to_flush = TLB_FLUSH_ALL;
1027 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1028 @@ -227,10 +242,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
1029 preempt_disable();
1030
1031 if (current->active_mm == mm) {
1032 - if (current->mm)
1033 + if (current->mm) {
1034 + /*
1035 + * Implicit full barrier (INVLPG) that synchronizes
1036 + * with switch_mm.
1037 + */
1038 __flush_tlb_one(start);
1039 - else
1040 + } else {
1041 leave_mm(smp_processor_id());
1042 +
1043 + /* Synchronize with switch_mm. */
1044 + smp_mb();
1045 + }
1046 }
1047
1048 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
1049 diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1050 index 53b4c0811f4f..6d3415144dab 100644
1051 --- a/arch/x86/xen/suspend.c
1052 +++ b/arch/x86/xen/suspend.c
1053 @@ -32,7 +32,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
1054 {
1055 #ifdef CONFIG_XEN_PVHVM
1056 int cpu;
1057 - xen_hvm_init_shared_info();
1058 + if (!suspend_cancelled)
1059 + xen_hvm_init_shared_info();
1060 xen_callback_vector();
1061 xen_unplug_emulated_devices();
1062 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
1063 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1064 index 8a45e92ff60c..05222706dc66 100644
1065 --- a/drivers/char/ipmi/ipmi_si_intf.c
1066 +++ b/drivers/char/ipmi/ipmi_si_intf.c
1067 @@ -404,18 +404,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
1068 return rv;
1069 }
1070
1071 -static void start_check_enables(struct smi_info *smi_info)
1072 +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1073 +{
1074 + smi_info->last_timeout_jiffies = jiffies;
1075 + mod_timer(&smi_info->si_timer, new_val);
1076 + smi_info->timer_running = true;
1077 +}
1078 +
1079 +/*
1080 + * Start a new message and (re)start the timer and thread.
1081 + */
1082 +static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
1083 + unsigned int size)
1084 +{
1085 + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1086 +
1087 + if (smi_info->thread)
1088 + wake_up_process(smi_info->thread);
1089 +
1090 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
1091 +}
1092 +
1093 +static void start_check_enables(struct smi_info *smi_info, bool start_timer)
1094 {
1095 unsigned char msg[2];
1096
1097 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1098 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1099
1100 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1101 + if (start_timer)
1102 + start_new_msg(smi_info, msg, 2);
1103 + else
1104 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1105 smi_info->si_state = SI_CHECKING_ENABLES;
1106 }
1107
1108 -static void start_clear_flags(struct smi_info *smi_info)
1109 +static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
1110 {
1111 unsigned char msg[3];
1112
1113 @@ -424,7 +448,10 @@ static void start_clear_flags(struct smi_info *smi_info)
1114 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
1115 msg[2] = WDT_PRE_TIMEOUT_INT;
1116
1117 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1118 + if (start_timer)
1119 + start_new_msg(smi_info, msg, 3);
1120 + else
1121 + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1122 smi_info->si_state = SI_CLEARING_FLAGS;
1123 }
1124
1125 @@ -434,10 +461,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
1126 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
1127 smi_info->curr_msg->data_size = 2;
1128
1129 - smi_info->handlers->start_transaction(
1130 - smi_info->si_sm,
1131 - smi_info->curr_msg->data,
1132 - smi_info->curr_msg->data_size);
1133 + start_new_msg(smi_info, smi_info->curr_msg->data,
1134 + smi_info->curr_msg->data_size);
1135 smi_info->si_state = SI_GETTING_MESSAGES;
1136 }
1137
1138 @@ -447,20 +472,11 @@ static void start_getting_events(struct smi_info *smi_info)
1139 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
1140 smi_info->curr_msg->data_size = 2;
1141
1142 - smi_info->handlers->start_transaction(
1143 - smi_info->si_sm,
1144 - smi_info->curr_msg->data,
1145 - smi_info->curr_msg->data_size);
1146 + start_new_msg(smi_info, smi_info->curr_msg->data,
1147 + smi_info->curr_msg->data_size);
1148 smi_info->si_state = SI_GETTING_EVENTS;
1149 }
1150
1151 -static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1152 -{
1153 - smi_info->last_timeout_jiffies = jiffies;
1154 - mod_timer(&smi_info->si_timer, new_val);
1155 - smi_info->timer_running = true;
1156 -}
1157 -
1158 /*
1159 * When we have a situtaion where we run out of memory and cannot
1160 * allocate messages, we just leave them in the BMC and run the system
1161 @@ -470,11 +486,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1162 * Note that we cannot just use disable_irq(), since the interrupt may
1163 * be shared.
1164 */
1165 -static inline bool disable_si_irq(struct smi_info *smi_info)
1166 +static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
1167 {
1168 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1169 smi_info->interrupt_disabled = true;
1170 - start_check_enables(smi_info);
1171 + start_check_enables(smi_info, start_timer);
1172 return true;
1173 }
1174 return false;
1175 @@ -484,7 +500,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
1176 {
1177 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
1178 smi_info->interrupt_disabled = false;
1179 - start_check_enables(smi_info);
1180 + start_check_enables(smi_info, true);
1181 return true;
1182 }
1183 return false;
1184 @@ -502,7 +518,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
1185
1186 msg = ipmi_alloc_smi_msg();
1187 if (!msg) {
1188 - if (!disable_si_irq(smi_info))
1189 + if (!disable_si_irq(smi_info, true))
1190 smi_info->si_state = SI_NORMAL;
1191 } else if (enable_si_irq(smi_info)) {
1192 ipmi_free_smi_msg(msg);
1193 @@ -518,7 +534,7 @@ static void handle_flags(struct smi_info *smi_info)
1194 /* Watchdog pre-timeout */
1195 smi_inc_stat(smi_info, watchdog_pretimeouts);
1196
1197 - start_clear_flags(smi_info);
1198 + start_clear_flags(smi_info, true);
1199 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
1200 if (smi_info->intf)
1201 ipmi_smi_watchdog_pretimeout(smi_info->intf);
1202 @@ -870,8 +886,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
1203 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1204 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
1205
1206 - smi_info->handlers->start_transaction(
1207 - smi_info->si_sm, msg, 2);
1208 + start_new_msg(smi_info, msg, 2);
1209 smi_info->si_state = SI_GETTING_FLAGS;
1210 goto restart;
1211 }
1212 @@ -901,7 +916,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
1213 * disable and messages disabled.
1214 */
1215 if (smi_info->supports_event_msg_buff || smi_info->irq) {
1216 - start_check_enables(smi_info);
1217 + start_check_enables(smi_info, true);
1218 } else {
1219 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
1220 if (!smi_info->curr_msg)
1221 @@ -1203,14 +1218,14 @@ static int smi_start_processing(void *send_info,
1222
1223 new_smi->intf = intf;
1224
1225 - /* Try to claim any interrupts. */
1226 - if (new_smi->irq_setup)
1227 - new_smi->irq_setup(new_smi);
1228 -
1229 /* Set up the timer that drives the interface. */
1230 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1231 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1232
1233 + /* Try to claim any interrupts. */
1234 + if (new_smi->irq_setup)
1235 + new_smi->irq_setup(new_smi);
1236 +
1237 /*
1238 * Check if the user forcefully enabled the daemon.
1239 */
1240 @@ -3515,7 +3530,7 @@ static int try_smi_init(struct smi_info *new_smi)
1241 * Start clearing the flags before we enable interrupts or the
1242 * timer to avoid racing with the timer.
1243 */
1244 - start_clear_flags(new_smi);
1245 + start_clear_flags(new_smi, false);
1246
1247 /*
1248 * IRQ is defined to be set when non-zero. req_events will
1249 @@ -3817,7 +3832,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
1250 poll(to_clean);
1251 schedule_timeout_uninterruptible(1);
1252 }
1253 - disable_si_irq(to_clean);
1254 + disable_si_irq(to_clean, false);
1255 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1256 poll(to_clean);
1257 schedule_timeout_uninterruptible(1);
1258 diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
1259 index 30f522848c73..c19e7fc717c3 100644
1260 --- a/drivers/connector/connector.c
1261 +++ b/drivers/connector/connector.c
1262 @@ -178,26 +178,21 @@ static int cn_call_callback(struct sk_buff *skb)
1263 *
1264 * It checks skb, netlink header and msg sizes, and calls callback helper.
1265 */
1266 -static void cn_rx_skb(struct sk_buff *__skb)
1267 +static void cn_rx_skb(struct sk_buff *skb)
1268 {
1269 struct nlmsghdr *nlh;
1270 - struct sk_buff *skb;
1271 int len, err;
1272
1273 - skb = skb_get(__skb);
1274 -
1275 if (skb->len >= NLMSG_HDRLEN) {
1276 nlh = nlmsg_hdr(skb);
1277 len = nlmsg_len(nlh);
1278
1279 if (len < (int)sizeof(struct cn_msg) ||
1280 skb->len < nlh->nlmsg_len ||
1281 - len > CONNECTOR_MAX_MSG_SIZE) {
1282 - kfree_skb(skb);
1283 + len > CONNECTOR_MAX_MSG_SIZE)
1284 return;
1285 - }
1286
1287 - err = cn_call_callback(skb);
1288 + err = cn_call_callback(skb_get(skb));
1289 if (err < 0)
1290 kfree_skb(skb);
1291 }
1292 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1293 index 722a925795a2..9ce9dfeb1258 100644
1294 --- a/drivers/hid/hid-core.c
1295 +++ b/drivers/hid/hid-core.c
1296 @@ -1589,7 +1589,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
1297 "Multi-Axis Controller"
1298 };
1299 const char *type, *bus;
1300 - char buf[64];
1301 + char buf[64] = "";
1302 unsigned int i;
1303 int len;
1304 int ret;
1305 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1306 index 8b0178db6a04..b85a8614c128 100644
1307 --- a/drivers/iommu/intel-iommu.c
1308 +++ b/drivers/iommu/intel-iommu.c
1309 @@ -3928,14 +3928,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
1310 dev = pci_physfn(dev);
1311 for (bus = dev->bus; bus; bus = bus->parent) {
1312 bridge = bus->self;
1313 - if (!bridge || !pci_is_pcie(bridge) ||
1314 + /* If it's an integrated device, allow ATS */
1315 + if (!bridge)
1316 + return 1;
1317 + /* Connected via non-PCIe: no ATS */
1318 + if (!pci_is_pcie(bridge) ||
1319 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
1320 return 0;
1321 + /* If we found the root port, look it up in the ATSR */
1322 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
1323 break;
1324 }
1325 - if (!bridge)
1326 - return 0;
1327
1328 rcu_read_lock();
1329 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
1330 diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
1331 index c4198fa490bf..9c1e8adaf4fc 100644
1332 --- a/drivers/isdn/i4l/isdn_ppp.c
1333 +++ b/drivers/isdn/i4l/isdn_ppp.c
1334 @@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
1335 is->compflags = 0;
1336
1337 is->reset = isdn_ppp_ccp_reset_alloc(is);
1338 + if (!is->reset)
1339 + return -ENOMEM;
1340
1341 is->lp = NULL;
1342 is->mp_seqno = 0; /* MP sequence number */
1343 @@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
1344 * VJ header compression init
1345 */
1346 is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
1347 + if (IS_ERR(is->slcomp)) {
1348 + isdn_ppp_ccp_reset_free(is);
1349 + return PTR_ERR(is->slcomp);
1350 + }
1351 #endif
1352 #ifdef CONFIG_IPPP_FILTER
1353 is->pass_filter = NULL;
1354 @@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
1355 is->maxcid = val;
1356 #ifdef CONFIG_ISDN_PPP_VJ
1357 sltmp = slhc_init(16, val);
1358 - if (!sltmp) {
1359 - printk(KERN_ERR "ippp, can't realloc slhc struct\n");
1360 - return -ENOMEM;
1361 - }
1362 + if (IS_ERR(sltmp))
1363 + return PTR_ERR(sltmp);
1364 if (is->slcomp)
1365 slhc_free(is->slcomp);
1366 is->slcomp = sltmp;
1367 diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
1368 index 084d346fb4c4..e15eef6a94e5 100644
1369 --- a/drivers/media/platform/vivid/vivid-osd.c
1370 +++ b/drivers/media/platform/vivid/vivid-osd.c
1371 @@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
1372 case FBIOGET_VBLANK: {
1373 struct fb_vblank vblank;
1374
1375 + memset(&vblank, 0, sizeof(vblank));
1376 vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
1377 FB_VBLANK_HAVE_VSYNC;
1378 vblank.count = 0;
1379 diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
1380 index 4069234abed5..a50750ce511d 100644
1381 --- a/drivers/media/usb/airspy/airspy.c
1382 +++ b/drivers/media/usb/airspy/airspy.c
1383 @@ -132,7 +132,7 @@ struct airspy {
1384 int urbs_submitted;
1385
1386 /* USB control message buffer */
1387 - #define BUF_SIZE 24
1388 + #define BUF_SIZE 128
1389 u8 buf[BUF_SIZE];
1390
1391 /* Current configuration */
1392 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1393 index 16d87bf8ac3c..72ba774df7a7 100644
1394 --- a/drivers/net/bonding/bond_main.c
1395 +++ b/drivers/net/bonding/bond_main.c
1396 @@ -1194,7 +1194,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
1397 err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
1398 if (err)
1399 return err;
1400 - slave_dev->flags |= IFF_SLAVE;
1401 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
1402 return 0;
1403 }
1404 @@ -1452,6 +1451,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1405 }
1406 }
1407
1408 + /* set slave flag before open to prevent IPv6 addrconf */
1409 + slave_dev->flags |= IFF_SLAVE;
1410 +
1411 /* open the slave since the application closed it */
1412 res = dev_open(slave_dev);
1413 if (res) {
1414 @@ -1712,6 +1714,7 @@ err_close:
1415 dev_close(slave_dev);
1416
1417 err_restore_mac:
1418 + slave_dev->flags &= ~IFF_SLAVE;
1419 if (!bond->params.fail_over_mac ||
1420 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1421 /* XXX TODO - fom follow mode needs to change master's
1422 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1423 index 9d15566521a7..cfe49a07c7c1 100644
1424 --- a/drivers/net/ppp/ppp_generic.c
1425 +++ b/drivers/net/ppp/ppp_generic.c
1426 @@ -715,10 +715,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1427 val &= 0xffff;
1428 }
1429 vj = slhc_init(val2+1, val+1);
1430 - if (!vj) {
1431 - netdev_err(ppp->dev,
1432 - "PPP: no memory (VJ compressor)\n");
1433 - err = -ENOMEM;
1434 + if (IS_ERR(vj)) {
1435 + err = PTR_ERR(vj);
1436 break;
1437 }
1438 ppp_lock(ppp);
1439 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
1440 index 079f7adfcde5..27ed25252aac 100644
1441 --- a/drivers/net/slip/slhc.c
1442 +++ b/drivers/net/slip/slhc.c
1443 @@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
1444 static unsigned char * put16(unsigned char *cp, unsigned short x);
1445 static unsigned short pull16(unsigned char **cpp);
1446
1447 -/* Initialize compression data structure
1448 +/* Allocate compression data structure
1449 * slots must be in range 0 to 255 (zero meaning no compression)
1450 + * Returns pointer to structure or ERR_PTR() on error.
1451 */
1452 struct slcompress *
1453 slhc_init(int rslots, int tslots)
1454 @@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
1455 register struct cstate *ts;
1456 struct slcompress *comp;
1457
1458 + if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
1459 + return ERR_PTR(-EINVAL);
1460 +
1461 comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
1462 if (! comp)
1463 goto out_fail;
1464
1465 - if ( rslots > 0 && rslots < 256 ) {
1466 + if (rslots > 0) {
1467 size_t rsize = rslots * sizeof(struct cstate);
1468 comp->rstate = kzalloc(rsize, GFP_KERNEL);
1469 if (! comp->rstate)
1470 @@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
1471 comp->rslot_limit = rslots - 1;
1472 }
1473
1474 - if ( tslots > 0 && tslots < 256 ) {
1475 + if (tslots > 0) {
1476 size_t tsize = tslots * sizeof(struct cstate);
1477 comp->tstate = kzalloc(tsize, GFP_KERNEL);
1478 if (! comp->tstate)
1479 @@ -141,7 +145,7 @@ out_free2:
1480 out_free:
1481 kfree(comp);
1482 out_fail:
1483 - return NULL;
1484 + return ERR_PTR(-ENOMEM);
1485 }
1486
1487
1488 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
1489 index 05387b1e2e95..a17d86a57734 100644
1490 --- a/drivers/net/slip/slip.c
1491 +++ b/drivers/net/slip/slip.c
1492 @@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
1493 if (cbuff == NULL)
1494 goto err_exit;
1495 slcomp = slhc_init(16, 16);
1496 - if (slcomp == NULL)
1497 + if (IS_ERR(slcomp))
1498 goto err_exit;
1499 #endif
1500 spin_lock_bh(&sl->lock);
1501 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1502 index 6928448f6b7f..2b45d0168c3c 100644
1503 --- a/drivers/net/team/team.c
1504 +++ b/drivers/net/team/team.c
1505 @@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1506 struct team *team = netdev_priv(dev);
1507 struct team_port *port;
1508
1509 - rcu_read_lock();
1510 - list_for_each_entry_rcu(port, &team->port_list, list)
1511 + mutex_lock(&team->lock);
1512 + list_for_each_entry(port, &team->port_list, list)
1513 vlan_vid_del(port->dev, proto, vid);
1514 - rcu_read_unlock();
1515 + mutex_unlock(&team->lock);
1516
1517 return 0;
1518 }
1519 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
1520 index e4b7a47a825c..5efaa9ab5af5 100644
1521 --- a/drivers/net/usb/cdc_mbim.c
1522 +++ b/drivers/net/usb/cdc_mbim.c
1523 @@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
1524 .ndo_stop = usbnet_stop,
1525 .ndo_start_xmit = usbnet_start_xmit,
1526 .ndo_tx_timeout = usbnet_tx_timeout,
1527 - .ndo_change_mtu = usbnet_change_mtu,
1528 + .ndo_change_mtu = cdc_ncm_change_mtu,
1529 .ndo_set_mac_address = eth_mac_addr,
1530 .ndo_validate_addr = eth_validate_addr,
1531 .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
1532 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
1533 index 8067b8fbb0ee..0b481c30979b 100644
1534 --- a/drivers/net/usb/cdc_ncm.c
1535 +++ b/drivers/net/usb/cdc_ncm.c
1536 @@ -41,6 +41,7 @@
1537 #include <linux/module.h>
1538 #include <linux/netdevice.h>
1539 #include <linux/ctype.h>
1540 +#include <linux/etherdevice.h>
1541 #include <linux/ethtool.h>
1542 #include <linux/workqueue.h>
1543 #include <linux/mii.h>
1544 @@ -687,6 +688,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
1545 kfree(ctx);
1546 }
1547
1548 +/* we need to override the usbnet change_mtu ndo for two reasons:
1549 + * - respect the negotiated maximum datagram size
1550 + * - avoid unwanted changes to rx and tx buffers
1551 + */
1552 +int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
1553 +{
1554 + struct usbnet *dev = netdev_priv(net);
1555 + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
1556 + int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
1557 +
1558 + if (new_mtu <= 0 || new_mtu > maxmtu)
1559 + return -EINVAL;
1560 + net->mtu = new_mtu;
1561 + return 0;
1562 +}
1563 +EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
1564 +
1565 +static const struct net_device_ops cdc_ncm_netdev_ops = {
1566 + .ndo_open = usbnet_open,
1567 + .ndo_stop = usbnet_stop,
1568 + .ndo_start_xmit = usbnet_start_xmit,
1569 + .ndo_tx_timeout = usbnet_tx_timeout,
1570 + .ndo_change_mtu = cdc_ncm_change_mtu,
1571 + .ndo_set_mac_address = eth_mac_addr,
1572 + .ndo_validate_addr = eth_validate_addr,
1573 +};
1574 +
1575 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
1576 {
1577 const struct usb_cdc_union_desc *union_desc = NULL;
1578 @@ -861,6 +889,9 @@ advance:
1579 /* add our sysfs attrs */
1580 dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
1581
1582 + /* must handle MTU changes */
1583 + dev->net->netdev_ops = &cdc_ncm_netdev_ops;
1584 +
1585 return 0;
1586
1587 error2:
1588 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
1589 index c8186ffda1a3..2e61a799f32a 100644
1590 --- a/drivers/net/veth.c
1591 +++ b/drivers/net/veth.c
1592 @@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
1593 kfree_skb(skb);
1594 goto drop;
1595 }
1596 - /* don't change ip_summed == CHECKSUM_PARTIAL, as that
1597 - * will cause bad checksum on forwarded packets
1598 - */
1599 - if (skb->ip_summed == CHECKSUM_NONE &&
1600 - rcv->features & NETIF_F_RXCSUM)
1601 - skb->ip_summed = CHECKSUM_UNNECESSARY;
1602
1603 if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
1604 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
1605 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1606 index 0085b8df83e2..940f78e41993 100644
1607 --- a/drivers/net/vxlan.c
1608 +++ b/drivers/net/vxlan.c
1609 @@ -2581,7 +2581,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
1610 struct nlattr *tb[], struct nlattr *data[])
1611 {
1612 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
1613 - struct vxlan_dev *vxlan = netdev_priv(dev);
1614 + struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
1615 struct vxlan_rdst *dst = &vxlan->default_dst;
1616 __u32 vni;
1617 int err;
1618 @@ -2714,9 +2714,13 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
1619 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
1620 vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
1621
1622 - if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
1623 - vxlan->dst_port, vxlan->flags)) {
1624 - pr_info("duplicate VNI %u\n", vni);
1625 + list_for_each_entry(tmp, &vn->vxlan_list, next) {
1626 + if (tmp->default_dst.remote_vni == vni &&
1627 + (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
1628 + tmp->saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
1629 + tmp->dst_port == vxlan->dst_port &&
1630 + (tmp->flags & VXLAN_F_RCV_FLAGS) ==
1631 + (vxlan->flags & VXLAN_F_RCV_FLAGS))
1632 return -EEXIST;
1633 }
1634
1635 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1636 index 0866c5dfdf87..5e5b6184e720 100644
1637 --- a/drivers/net/xen-netback/netback.c
1638 +++ b/drivers/net/xen-netback/netback.c
1639 @@ -2007,8 +2007,11 @@ static int __init netback_init(void)
1640 if (!xen_domain())
1641 return -ENODEV;
1642
1643 - /* Allow as many queues as there are CPUs, by default */
1644 - xenvif_max_queues = num_online_cpus();
1645 + /* Allow as many queues as there are CPUs if user has not
1646 + * specified a value.
1647 + */
1648 + if (xenvif_max_queues == 0)
1649 + xenvif_max_queues = num_online_cpus();
1650
1651 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1652 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1653 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1654 index 52f081f4dfd5..fd51626e859e 100644
1655 --- a/drivers/net/xen-netfront.c
1656 +++ b/drivers/net/xen-netfront.c
1657 @@ -1710,19 +1710,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
1658 }
1659
1660 static int xennet_create_queues(struct netfront_info *info,
1661 - unsigned int num_queues)
1662 + unsigned int *num_queues)
1663 {
1664 unsigned int i;
1665 int ret;
1666
1667 - info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1668 + info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1669 GFP_KERNEL);
1670 if (!info->queues)
1671 return -ENOMEM;
1672
1673 rtnl_lock();
1674
1675 - for (i = 0; i < num_queues; i++) {
1676 + for (i = 0; i < *num_queues; i++) {
1677 struct netfront_queue *queue = &info->queues[i];
1678
1679 queue->id = i;
1680 @@ -1732,7 +1732,7 @@ static int xennet_create_queues(struct netfront_info *info,
1681 if (ret < 0) {
1682 dev_warn(&info->netdev->dev,
1683 "only created %d queues\n", i);
1684 - num_queues = i;
1685 + *num_queues = i;
1686 break;
1687 }
1688
1689 @@ -1742,11 +1742,11 @@ static int xennet_create_queues(struct netfront_info *info,
1690 napi_enable(&queue->napi);
1691 }
1692
1693 - netif_set_real_num_tx_queues(info->netdev, num_queues);
1694 + netif_set_real_num_tx_queues(info->netdev, *num_queues);
1695
1696 rtnl_unlock();
1697
1698 - if (num_queues == 0) {
1699 + if (*num_queues == 0) {
1700 dev_err(&info->netdev->dev, "no queues\n");
1701 return -EINVAL;
1702 }
1703 @@ -1792,7 +1792,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1704 if (info->queues)
1705 xennet_destroy_queues(info);
1706
1707 - err = xennet_create_queues(info, num_queues);
1708 + err = xennet_create_queues(info, &num_queues);
1709 if (err < 0)
1710 goto destroy_ring;
1711
1712 @@ -2140,8 +2140,11 @@ static int __init netif_init(void)
1713
1714 pr_info("Initialising Xen virtual ethernet driver\n");
1715
1716 - /* Allow as many queues as there are CPUs, by default */
1717 - xennet_max_queues = num_online_cpus();
1718 + /* Allow as many queues as there are CPUs if user has not
1719 + * specified a value.
1720 + */
1721 + if (xennet_max_queues == 0)
1722 + xennet_max_queues = num_online_cpus();
1723
1724 return xenbus_register_frontend(&netfront_driver);
1725 }
1726 diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
1727 index 761e77bfce5d..e56f1569f6c3 100644
1728 --- a/drivers/parisc/iommu-helpers.h
1729 +++ b/drivers/parisc/iommu-helpers.h
1730 @@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
1731 struct scatterlist *contig_sg; /* contig chunk head */
1732 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1733 unsigned int n_mappings = 0;
1734 - unsigned int max_seg_size = dma_get_max_seg_size(dev);
1735 + unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
1736 + (unsigned)DMA_CHUNK_SIZE);
1737 + unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
1738 + if (max_seg_boundary) /* check if the addition above didn't overflow */
1739 + max_seg_size = min(max_seg_size, max_seg_boundary);
1740
1741 while (nents > 0) {
1742
1743 @@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
1744
1745 /*
1746 ** First make sure current dma stream won't
1747 - ** exceed DMA_CHUNK_SIZE if we coalesce the
1748 + ** exceed max_seg_size if we coalesce the
1749 ** next entry.
1750 */
1751 - if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
1752 - IOVP_SIZE) > DMA_CHUNK_SIZE))
1753 - break;
1754 -
1755 - if (startsg->length + dma_len > max_seg_size)
1756 + if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
1757 + max_seg_size))
1758 break;
1759
1760 /*
1761 diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
1762 index d542e06d6cd3..10e520d6bb75 100644
1763 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
1764 +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
1765 @@ -1268,6 +1268,7 @@ static int
1766 echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1767 {
1768 struct lov_stripe_md *ulsm = _ulsm;
1769 + struct lov_oinfo **p;
1770 int nob, i;
1771
1772 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
1773 @@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1774 if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
1775 return -EFAULT;
1776
1777 - for (i = 0; i < lsm->lsm_stripe_count; i++) {
1778 - if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
1779 - sizeof(lsm->lsm_oinfo[0])))
1780 + for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1781 + struct lov_oinfo __user *up;
1782 + if (get_user(up, ulsm->lsm_oinfo + i) ||
1783 + copy_to_user(up, *p, sizeof(struct lov_oinfo)))
1784 return -EFAULT;
1785 }
1786 return 0;
1787 @@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1788
1789 static int
1790 echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1791 - void *ulsm, int ulsm_nob)
1792 + struct lov_stripe_md __user *ulsm, int ulsm_nob)
1793 {
1794 struct echo_client_obd *ec = ed->ed_ec;
1795 + struct lov_oinfo **p;
1796 int i;
1797
1798 if (ulsm_nob < sizeof(*lsm))
1799 @@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1800 return -EINVAL;
1801
1802
1803 - for (i = 0; i < lsm->lsm_stripe_count; i++) {
1804 - if (copy_from_user(lsm->lsm_oinfo[i],
1805 - ((struct lov_stripe_md *)ulsm)-> \
1806 - lsm_oinfo[i],
1807 - sizeof(lsm->lsm_oinfo[0])))
1808 + for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1809 + struct lov_oinfo __user *up;
1810 + if (get_user(up, ulsm->lsm_oinfo + i) ||
1811 + copy_from_user(*p, up, sizeof(struct lov_oinfo)))
1812 return -EFAULT;
1813 }
1814 return 0;
1815 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1816 index d68c4a4db682..ee11b301f3da 100644
1817 --- a/drivers/usb/core/hub.c
1818 +++ b/drivers/usb/core/hub.c
1819 @@ -1034,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1820 unsigned delay;
1821
1822 /* Continue a partial initialization */
1823 - if (type == HUB_INIT2)
1824 - goto init2;
1825 - if (type == HUB_INIT3)
1826 + if (type == HUB_INIT2 || type == HUB_INIT3) {
1827 + device_lock(hub->intfdev);
1828 +
1829 + /* Was the hub disconnected while we were waiting? */
1830 + if (hub->disconnected) {
1831 + device_unlock(hub->intfdev);
1832 + kref_put(&hub->kref, hub_release);
1833 + return;
1834 + }
1835 + if (type == HUB_INIT2)
1836 + goto init2;
1837 goto init3;
1838 + }
1839 + kref_get(&hub->kref);
1840
1841 /* The superspeed hub except for root hub has to use Hub Depth
1842 * value as an offset into the route string to locate the bits
1843 @@ -1235,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1844 queue_delayed_work(system_power_efficient_wq,
1845 &hub->init_work,
1846 msecs_to_jiffies(delay));
1847 + device_unlock(hub->intfdev);
1848 return; /* Continues at init3: below */
1849 } else {
1850 msleep(delay);
1851 @@ -1256,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1852 /* Allow autosuspend if it was suppressed */
1853 if (type <= HUB_INIT3)
1854 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
1855 +
1856 + if (type == HUB_INIT2 || type == HUB_INIT3)
1857 + device_unlock(hub->intfdev);
1858 +
1859 + kref_put(&hub->kref, hub_release);
1860 }
1861
1862 /* Implement the continuations for the delays above */
1863 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1864 index 1e6d7579709e..f6bb118e4501 100644
1865 --- a/drivers/usb/host/xhci.c
1866 +++ b/drivers/usb/host/xhci.c
1867 @@ -4794,8 +4794,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1868 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1869 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
1870 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
1871 + /*
1872 + * refer to section 6.2.2: MTT should be 0 for full speed hub,
1873 + * but it may be already set to 1 when setup an xHCI virtual
1874 + * device, so clear it anyway.
1875 + */
1876 if (tt->multi)
1877 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1878 + else if (hdev->speed == USB_SPEED_FULL)
1879 + slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
1880 +
1881 if (xhci->hci_version > 0x95) {
1882 xhci_dbg(xhci, "xHCI version %x needs hub "
1883 "TT think time and number of ports\n",
1884 @@ -5046,6 +5054,10 @@ static int __init xhci_hcd_init(void)
1885 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1886 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1887 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1888 +
1889 + if (usb_disabled())
1890 + return -ENODEV;
1891 +
1892 return 0;
1893 }
1894
1895 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1896 index 7d4f51a32e66..59b2126b21a3 100644
1897 --- a/drivers/usb/serial/cp210x.c
1898 +++ b/drivers/usb/serial/cp210x.c
1899 @@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
1900 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1901 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1902 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1903 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
1904 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1905 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1906 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
1907 diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
1908 index f51a5d52c0ed..ec1b8f2c1183 100644
1909 --- a/drivers/usb/serial/ipaq.c
1910 +++ b/drivers/usb/serial/ipaq.c
1911 @@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
1912 * through. Since this has a reasonably high failure rate, we retry
1913 * several times.
1914 */
1915 - while (retries--) {
1916 + while (retries) {
1917 + retries--;
1918 result = usb_control_msg(serial->dev,
1919 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
1920 0x1, 0, NULL, 0, 100);
1921 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
1922 index 4bd23bba816f..ee71baddbb10 100644
1923 --- a/drivers/xen/gntdev.c
1924 +++ b/drivers/xen/gntdev.c
1925 @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1926
1927 vma->vm_ops = &gntdev_vmops;
1928
1929 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1930 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1931
1932 if (use_ptemod)
1933 vma->vm_flags |= VM_DONTCOPY;
1934 diff --git a/fs/direct-io.c b/fs/direct-io.c
1935 index 745d2342651a..d83a021a659f 100644
1936 --- a/fs/direct-io.c
1937 +++ b/fs/direct-io.c
1938 @@ -1159,6 +1159,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1939 }
1940 }
1941
1942 + /* Once we sampled i_size check for reads beyond EOF */
1943 + dio->i_size = i_size_read(inode);
1944 + if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1945 + if (dio->flags & DIO_LOCKING)
1946 + mutex_unlock(&inode->i_mutex);
1947 + kmem_cache_free(dio_cache, dio);
1948 + retval = 0;
1949 + goto out;
1950 + }
1951 +
1952 /*
1953 * For file extending writes updating i_size before data writeouts
1954 * complete can expose uninitialized blocks in dumb filesystems.
1955 @@ -1212,7 +1222,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1956 sdio.next_block_for_io = -1;
1957
1958 dio->iocb = iocb;
1959 - dio->i_size = i_size_read(inode);
1960
1961 spin_lock_init(&dio->bio_lock);
1962 dio->refcount = 1;
1963 diff --git a/include/linux/filter.h b/include/linux/filter.h
1964 index fa11b3a367be..1ce6e1049a3b 100644
1965 --- a/include/linux/filter.h
1966 +++ b/include/linux/filter.h
1967 @@ -428,6 +428,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp)
1968
1969 #define BPF_ANC BIT(15)
1970
1971 +static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1972 +{
1973 + switch (first->code) {
1974 + case BPF_RET | BPF_K:
1975 + case BPF_LD | BPF_W | BPF_LEN:
1976 + return false;
1977 +
1978 + case BPF_LD | BPF_W | BPF_ABS:
1979 + case BPF_LD | BPF_H | BPF_ABS:
1980 + case BPF_LD | BPF_B | BPF_ABS:
1981 + if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1982 + return true;
1983 + return false;
1984 +
1985 + default:
1986 + return true;
1987 + }
1988 +}
1989 +
1990 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1991 {
1992 BUG_ON(ftest->code & BPF_ANC);
1993 diff --git a/include/linux/sched.h b/include/linux/sched.h
1994 index 61f4f2d5c882..9128b4e9f541 100644
1995 --- a/include/linux/sched.h
1996 +++ b/include/linux/sched.h
1997 @@ -802,6 +802,7 @@ struct user_struct {
1998 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
1999 #endif
2000 unsigned long locked_shm; /* How many pages of mlocked shm ? */
2001 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
2002
2003 #ifdef CONFIG_KEYS
2004 struct key *uid_keyring; /* UID specific keyring */
2005 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2006 index 4307e20a4a4a..1f17abe23725 100644
2007 --- a/include/linux/skbuff.h
2008 +++ b/include/linux/skbuff.h
2009 @@ -3320,7 +3320,8 @@ struct skb_gso_cb {
2010 int encap_level;
2011 __u16 csum_start;
2012 };
2013 -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2014 +#define SKB_SGO_CB_OFFSET 32
2015 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
2016
2017 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2018 {
2019 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
2020 index 76d1e38aabe1..0c53fd51bf9b 100644
2021 --- a/include/linux/syscalls.h
2022 +++ b/include/linux/syscalls.h
2023 @@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
2024 asmlinkage long sys_lchown(const char __user *filename,
2025 uid_t user, gid_t group);
2026 asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
2027 -#ifdef CONFIG_UID16
2028 +#ifdef CONFIG_HAVE_UID16
2029 asmlinkage long sys_chown16(const char __user *filename,
2030 old_uid_t user, old_gid_t group);
2031 asmlinkage long sys_lchown16(const char __user *filename,
2032 diff --git a/include/linux/types.h b/include/linux/types.h
2033 index 8715287c3b1f..69c44d981da3 100644
2034 --- a/include/linux/types.h
2035 +++ b/include/linux/types.h
2036 @@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
2037
2038 typedef unsigned long uintptr_t;
2039
2040 -#ifdef CONFIG_UID16
2041 +#ifdef CONFIG_HAVE_UID16
2042 /* This is defined by include/asm-{arch}/posix_types.h */
2043 typedef __kernel_old_uid_t old_uid_t;
2044 typedef __kernel_old_gid_t old_gid_t;
2045 diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
2046 index 7c9b484735c5..e7827ae2462c 100644
2047 --- a/include/linux/usb/cdc_ncm.h
2048 +++ b/include/linux/usb/cdc_ncm.h
2049 @@ -133,6 +133,7 @@ struct cdc_ncm_ctx {
2050 };
2051
2052 u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
2053 +int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
2054 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
2055 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
2056 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
2057 diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
2058 index 84b20835b736..0dc0a51da38f 100644
2059 --- a/include/net/inet_ecn.h
2060 +++ b/include/net/inet_ecn.h
2061 @@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
2062
2063 struct ipv6hdr;
2064
2065 -static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
2066 +/* Note:
2067 + * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
2068 + * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
2069 + * In IPv6 case, no checksum compensates the change in IPv6 header,
2070 + * so we have to update skb->csum.
2071 + */
2072 +static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
2073 {
2074 + __be32 from, to;
2075 +
2076 if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
2077 return 0;
2078 - *(__be32*)iph |= htonl(INET_ECN_CE << 20);
2079 +
2080 + from = *(__be32 *)iph;
2081 + to = from | htonl(INET_ECN_CE << 20);
2082 + *(__be32 *)iph = to;
2083 + if (skb->ip_summed == CHECKSUM_COMPLETE)
2084 + skb->csum = csum_add(csum_sub(skb->csum, from), to);
2085 return 1;
2086 }
2087
2088 @@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
2089 case cpu_to_be16(ETH_P_IPV6):
2090 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
2091 skb_tail_pointer(skb))
2092 - return IP6_ECN_set_ce(ipv6_hdr(skb));
2093 + return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
2094 break;
2095 }
2096
2097 diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
2098 index 360c4802288d..7682cb2ae237 100644
2099 --- a/include/net/inet_timewait_sock.h
2100 +++ b/include/net/inet_timewait_sock.h
2101 @@ -112,7 +112,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
2102 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
2103 struct inet_hashinfo *hashinfo);
2104
2105 -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
2106 +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
2107 + bool rearm);
2108 +
2109 +static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
2110 +{
2111 + __inet_twsk_schedule(tw, timeo, false);
2112 +}
2113 +
2114 +static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
2115 +{
2116 + __inet_twsk_schedule(tw, timeo, true);
2117 +}
2118 +
2119 void inet_twsk_deschedule(struct inet_timewait_sock *tw);
2120
2121 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
2122 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2123 index 47dcd3aa6e23..141d562064a7 100644
2124 --- a/kernel/bpf/verifier.c
2125 +++ b/kernel/bpf/verifier.c
2126 @@ -1019,6 +1019,16 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
2127 return -EINVAL;
2128 }
2129
2130 + if ((opcode == BPF_LSH || opcode == BPF_RSH ||
2131 + opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
2132 + int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
2133 +
2134 + if (insn->imm < 0 || insn->imm >= size) {
2135 + verbose("invalid shift %d\n", insn->imm);
2136 + return -EINVAL;
2137 + }
2138 + }
2139 +
2140 /* pattern match 'bpf_add Rx, imm' instruction */
2141 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
2142 regs[insn->dst_reg].type == FRAME_PTR &&
2143 diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
2144 index ac4b96eccade..bd3357e69c5c 100644
2145 --- a/net/batman-adv/bridge_loop_avoidance.c
2146 +++ b/net/batman-adv/bridge_loop_avoidance.c
2147 @@ -112,21 +112,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
2148 }
2149
2150 /* finally deinitialize the claim */
2151 -static void batadv_claim_free_rcu(struct rcu_head *rcu)
2152 +static void batadv_claim_release(struct batadv_bla_claim *claim)
2153 {
2154 - struct batadv_bla_claim *claim;
2155 -
2156 - claim = container_of(rcu, struct batadv_bla_claim, rcu);
2157 -
2158 batadv_backbone_gw_free_ref(claim->backbone_gw);
2159 - kfree(claim);
2160 + kfree_rcu(claim, rcu);
2161 }
2162
2163 /* free a claim, call claim_free_rcu if its the last reference */
2164 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
2165 {
2166 if (atomic_dec_and_test(&claim->refcount))
2167 - call_rcu(&claim->rcu, batadv_claim_free_rcu);
2168 + batadv_claim_release(claim);
2169 }
2170
2171 /**
2172 diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
2173 index 1918cd50b62e..b6bff9c1877a 100644
2174 --- a/net/batman-adv/hard-interface.h
2175 +++ b/net/batman-adv/hard-interface.h
2176 @@ -64,18 +64,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
2177 call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
2178 }
2179
2180 -/**
2181 - * batadv_hardif_free_ref_now - decrement the hard interface refcounter and
2182 - * possibly free it (without rcu callback)
2183 - * @hard_iface: the hard interface to free
2184 - */
2185 -static inline void
2186 -batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
2187 -{
2188 - if (atomic_dec_and_test(&hard_iface->refcount))
2189 - batadv_hardif_free_rcu(&hard_iface->rcu);
2190 -}
2191 -
2192 static inline struct batadv_hard_iface *
2193 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
2194 {
2195 diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
2196 index a449195c5b2b..2fbd3a6bde9a 100644
2197 --- a/net/batman-adv/network-coding.c
2198 +++ b/net/batman-adv/network-coding.c
2199 @@ -175,28 +175,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
2200 }
2201
2202 /**
2203 - * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
2204 - * its refcount on the orig_node
2205 - * @rcu: rcu pointer of the nc node
2206 + * batadv_nc_node_release - release nc_node from lists and queue for free after
2207 + * rcu grace period
2208 + * @nc_node: the nc node to free
2209 */
2210 -static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
2211 +static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
2212 {
2213 - struct batadv_nc_node *nc_node;
2214 -
2215 - nc_node = container_of(rcu, struct batadv_nc_node, rcu);
2216 batadv_orig_node_free_ref(nc_node->orig_node);
2217 - kfree(nc_node);
2218 + kfree_rcu(nc_node, rcu);
2219 }
2220
2221 /**
2222 - * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
2223 - * frees it
2224 + * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
2225 + * release it
2226 * @nc_node: the nc node to free
2227 */
2228 static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
2229 {
2230 if (atomic_dec_and_test(&nc_node->refcount))
2231 - call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
2232 + batadv_nc_node_release(nc_node);
2233 }
2234
2235 /**
2236 diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
2237 index dfae97408628..77ea1d4de2ba 100644
2238 --- a/net/batman-adv/originator.c
2239 +++ b/net/batman-adv/originator.c
2240 @@ -150,86 +150,58 @@ err:
2241 }
2242
2243 /**
2244 - * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
2245 - * @rcu: rcu pointer of the neigh_ifinfo object
2246 - */
2247 -static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
2248 -{
2249 - struct batadv_neigh_ifinfo *neigh_ifinfo;
2250 -
2251 - neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
2252 -
2253 - if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
2254 - batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
2255 -
2256 - kfree(neigh_ifinfo);
2257 -}
2258 -
2259 -/**
2260 - * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
2261 - * the neigh_ifinfo (without rcu callback)
2262 + * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
2263 + * free after rcu grace period
2264 * @neigh_ifinfo: the neigh_ifinfo object to release
2265 */
2266 static void
2267 -batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
2268 +batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
2269 {
2270 - if (atomic_dec_and_test(&neigh_ifinfo->refcount))
2271 - batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
2272 + if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
2273 + batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
2274 +
2275 + kfree_rcu(neigh_ifinfo, rcu);
2276 }
2277
2278 /**
2279 - * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
2280 + * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
2281 * the neigh_ifinfo
2282 * @neigh_ifinfo: the neigh_ifinfo object to release
2283 */
2284 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
2285 {
2286 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
2287 - call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
2288 + batadv_neigh_ifinfo_release(neigh_ifinfo);
2289 }
2290
2291 /**
2292 - * batadv_neigh_node_free_rcu - free the neigh_node
2293 - * @rcu: rcu pointer of the neigh_node
2294 + * batadv_neigh_node_release - release neigh_node from lists and queue for
2295 + * free after rcu grace period
2296 + * @neigh_node: neigh neighbor to free
2297 */
2298 -static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
2299 +static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
2300 {
2301 struct hlist_node *node_tmp;
2302 - struct batadv_neigh_node *neigh_node;
2303 struct batadv_neigh_ifinfo *neigh_ifinfo;
2304
2305 - neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
2306 -
2307 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
2308 &neigh_node->ifinfo_list, list) {
2309 - batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
2310 + batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
2311 }
2312 - batadv_hardif_free_ref_now(neigh_node->if_incoming);
2313 + batadv_hardif_free_ref(neigh_node->if_incoming);
2314
2315 - kfree(neigh_node);
2316 -}
2317 -
2318 -/**
2319 - * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
2320 - * and possibly free it (without rcu callback)
2321 - * @neigh_node: neigh neighbor to free
2322 - */
2323 -static void
2324 -batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
2325 -{
2326 - if (atomic_dec_and_test(&neigh_node->refcount))
2327 - batadv_neigh_node_free_rcu(&neigh_node->rcu);
2328 + kfree_rcu(neigh_node, rcu);
2329 }
2330
2331 /**
2332 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
2333 - * and possibly free it
2334 + * and possibly release it
2335 * @neigh_node: neigh neighbor to free
2336 */
2337 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
2338 {
2339 if (atomic_dec_and_test(&neigh_node->refcount))
2340 - call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
2341 + batadv_neigh_node_release(neigh_node);
2342 }
2343
2344 /**
2345 @@ -495,108 +467,99 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
2346 }
2347
2348 /**
2349 - * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
2350 - * @rcu: rcu pointer of the orig_ifinfo object
2351 + * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
2352 + * free after rcu grace period
2353 + * @orig_ifinfo: the orig_ifinfo object to release
2354 */
2355 -static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
2356 +static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
2357 {
2358 - struct batadv_orig_ifinfo *orig_ifinfo;
2359 struct batadv_neigh_node *router;
2360
2361 - orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
2362 -
2363 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
2364 - batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
2365 + batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
2366
2367 /* this is the last reference to this object */
2368 router = rcu_dereference_protected(orig_ifinfo->router, true);
2369 if (router)
2370 - batadv_neigh_node_free_ref_now(router);
2371 - kfree(orig_ifinfo);
2372 + batadv_neigh_node_free_ref(router);
2373 +
2374 + kfree_rcu(orig_ifinfo, rcu);
2375 }
2376
2377 /**
2378 - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
2379 - * the orig_ifinfo (without rcu callback)
2380 + * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
2381 + * the orig_ifinfo
2382 * @orig_ifinfo: the orig_ifinfo object to release
2383 */
2384 -static void
2385 -batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
2386 +void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
2387 {
2388 if (atomic_dec_and_test(&orig_ifinfo->refcount))
2389 - batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
2390 + batadv_orig_ifinfo_release(orig_ifinfo);
2391 }
2392
2393 /**
2394 - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
2395 - * the orig_ifinfo
2396 - * @orig_ifinfo: the orig_ifinfo object to release
2397 + * batadv_orig_node_free_rcu - free the orig_node
2398 + * @rcu: rcu pointer of the orig_node
2399 */
2400 -void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
2401 +static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
2402 {
2403 - if (atomic_dec_and_test(&orig_ifinfo->refcount))
2404 - call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
2405 + struct batadv_orig_node *orig_node;
2406 +
2407 + orig_node = container_of(rcu, struct batadv_orig_node, rcu);
2408 +
2409 + batadv_mcast_purge_orig(orig_node);
2410 +
2411 + batadv_frag_purge_orig(orig_node, NULL);
2412 +
2413 + if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
2414 + orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
2415 +
2416 + kfree(orig_node->tt_buff);
2417 + kfree(orig_node);
2418 }
2419
2420 -static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
2421 +/**
2422 + * batadv_orig_node_release - release orig_node from lists and queue for
2423 + * free after rcu grace period
2424 + * @orig_node: the orig node to free
2425 + */
2426 +static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
2427 {
2428 struct hlist_node *node_tmp;
2429 struct batadv_neigh_node *neigh_node;
2430 - struct batadv_orig_node *orig_node;
2431 struct batadv_orig_ifinfo *orig_ifinfo;
2432
2433 - orig_node = container_of(rcu, struct batadv_orig_node, rcu);
2434 -
2435 spin_lock_bh(&orig_node->neigh_list_lock);
2436
2437 /* for all neighbors towards this originator ... */
2438 hlist_for_each_entry_safe(neigh_node, node_tmp,
2439 &orig_node->neigh_list, list) {
2440 hlist_del_rcu(&neigh_node->list);
2441 - batadv_neigh_node_free_ref_now(neigh_node);
2442 + batadv_neigh_node_free_ref(neigh_node);
2443 }
2444
2445 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
2446 &orig_node->ifinfo_list, list) {
2447 hlist_del_rcu(&orig_ifinfo->list);
2448 - batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
2449 + batadv_orig_ifinfo_free_ref(orig_ifinfo);
2450 }
2451 spin_unlock_bh(&orig_node->neigh_list_lock);
2452
2453 - batadv_mcast_purge_orig(orig_node);
2454 -
2455 /* Free nc_nodes */
2456 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
2457
2458 - batadv_frag_purge_orig(orig_node, NULL);
2459 -
2460 - if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
2461 - orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
2462 -
2463 - kfree(orig_node->tt_buff);
2464 - kfree(orig_node);
2465 + call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
2466 }
2467
2468 /**
2469 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
2470 - * schedule an rcu callback for freeing it
2471 + * release it
2472 * @orig_node: the orig node to free
2473 */
2474 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
2475 {
2476 if (atomic_dec_and_test(&orig_node->refcount))
2477 - call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
2478 -}
2479 -
2480 -/**
2481 - * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
2482 - * possibly free it (without rcu callback)
2483 - * @orig_node: the orig node to free
2484 - */
2485 -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
2486 -{
2487 - if (atomic_dec_and_test(&orig_node->refcount))
2488 - batadv_orig_node_free_rcu(&orig_node->rcu);
2489 + batadv_orig_node_release(orig_node);
2490 }
2491
2492 void batadv_originator_free(struct batadv_priv *bat_priv)
2493 diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
2494 index aa4a43696295..28b751ad549c 100644
2495 --- a/net/batman-adv/originator.h
2496 +++ b/net/batman-adv/originator.h
2497 @@ -25,7 +25,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
2498 void batadv_originator_free(struct batadv_priv *bat_priv);
2499 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
2500 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
2501 -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
2502 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
2503 const uint8_t *addr);
2504 struct batadv_neigh_node *
2505 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
2506 index 4f2a9d2c56db..ddd62c9af5b4 100644
2507 --- a/net/batman-adv/translation-table.c
2508 +++ b/net/batman-adv/translation-table.c
2509 @@ -219,20 +219,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
2510 return count;
2511 }
2512
2513 -static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
2514 -{
2515 - struct batadv_tt_orig_list_entry *orig_entry;
2516 -
2517 - orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
2518 -
2519 - /* We are in an rcu callback here, therefore we cannot use
2520 - * batadv_orig_node_free_ref() and its call_rcu():
2521 - * An rcu_barrier() wouldn't wait for that to finish
2522 - */
2523 - batadv_orig_node_free_ref_now(orig_entry->orig_node);
2524 - kfree(orig_entry);
2525 -}
2526 -
2527 /**
2528 * batadv_tt_local_size_mod - change the size by v of the local table identified
2529 * by vid
2530 @@ -328,13 +314,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
2531 batadv_tt_global_size_mod(orig_node, vid, -1);
2532 }
2533
2534 +/**
2535 + * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
2536 + * queue for free after rcu grace period
2537 + * @orig_entry: tt orig entry to be free'd
2538 + */
2539 +static void
2540 +batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
2541 +{
2542 + batadv_orig_node_free_ref(orig_entry->orig_node);
2543 + kfree_rcu(orig_entry, rcu);
2544 +}
2545 +
2546 static void
2547 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
2548 {
2549 if (!atomic_dec_and_test(&orig_entry->refcount))
2550 return;
2551
2552 - call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
2553 + batadv_tt_orig_list_entry_release(orig_entry);
2554 }
2555
2556 /**
2557 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
2558 index 4ff77a16956c..3d6c8e222391 100644
2559 --- a/net/bridge/br_device.c
2560 +++ b/net/bridge/br_device.c
2561 @@ -28,6 +28,8 @@
2562 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
2563 EXPORT_SYMBOL_GPL(nf_br_ops);
2564
2565 +static struct lock_class_key bridge_netdev_addr_lock_key;
2566 +
2567 /* net device transmit always called with BH disabled */
2568 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
2569 {
2570 @@ -87,6 +89,11 @@ out:
2571 return NETDEV_TX_OK;
2572 }
2573
2574 +static void br_set_lockdep_class(struct net_device *dev)
2575 +{
2576 + lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
2577 +}
2578 +
2579 static int br_dev_init(struct net_device *dev)
2580 {
2581 struct net_bridge *br = netdev_priv(dev);
2582 @@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev)
2583 err = br_vlan_init(br);
2584 if (err)
2585 free_percpu(br->stats);
2586 + br_set_lockdep_class(dev);
2587
2588 return err;
2589 }
2590 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
2591 index 7832d07f48f6..ce658abdc2c8 100644
2592 --- a/net/bridge/br_stp_if.c
2593 +++ b/net/bridge/br_stp_if.c
2594 @@ -128,7 +128,10 @@ static void br_stp_start(struct net_bridge *br)
2595 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
2596 char *envp[] = { NULL };
2597
2598 - r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
2599 + if (net_eq(dev_net(br->dev), &init_net))
2600 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
2601 + else
2602 + r = -ENOENT;
2603
2604 spin_lock_bh(&br->lock);
2605
2606 diff --git a/net/core/dev.c b/net/core/dev.c
2607 index a42b232805a5..185a3398c651 100644
2608 --- a/net/core/dev.c
2609 +++ b/net/core/dev.c
2610 @@ -2479,6 +2479,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2611 *
2612 * It may return NULL if the skb requires no segmentation. This is
2613 * only possible when GSO is used for verifying header integrity.
2614 + *
2615 + * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2616 */
2617 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2618 netdev_features_t features, bool tx_path)
2619 @@ -2493,6 +2495,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2620 return ERR_PTR(err);
2621 }
2622
2623 + BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2624 + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2625 +
2626 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2627 SKB_GSO_CB(skb)->encap_level = 0;
2628
2629 diff --git a/net/core/dst.c b/net/core/dst.c
2630 index f8db4032d45a..540066cb33ef 100644
2631 --- a/net/core/dst.c
2632 +++ b/net/core/dst.c
2633 @@ -282,10 +282,11 @@ void dst_release(struct dst_entry *dst)
2634 {
2635 if (dst) {
2636 int newrefcnt;
2637 + unsigned short nocache = dst->flags & DST_NOCACHE;
2638
2639 newrefcnt = atomic_dec_return(&dst->__refcnt);
2640 WARN_ON(newrefcnt < 0);
2641 - if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
2642 + if (!newrefcnt && unlikely(nocache))
2643 call_rcu(&dst->rcu_head, dst_destroy_rcu);
2644 }
2645 }
2646 diff --git a/net/core/filter.c b/net/core/filter.c
2647 index 0fa2613b5e35..238bb3f9c51d 100644
2648 --- a/net/core/filter.c
2649 +++ b/net/core/filter.c
2650 @@ -775,6 +775,11 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
2651 if (ftest->k == 0)
2652 return -EINVAL;
2653 break;
2654 + case BPF_ALU | BPF_LSH | BPF_K:
2655 + case BPF_ALU | BPF_RSH | BPF_K:
2656 + if (ftest->k >= 32)
2657 + return -EINVAL;
2658 + break;
2659 case BPF_LD | BPF_MEM:
2660 case BPF_LDX | BPF_MEM:
2661 case BPF_ST:
2662 diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
2663 index 30addee2dd03..838f524cf11a 100644
2664 --- a/net/dccp/minisocks.c
2665 +++ b/net/dccp/minisocks.c
2666 @@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
2667 tw->tw_ipv6only = sk->sk_ipv6only;
2668 }
2669 #endif
2670 - /* Linkage updates. */
2671 - __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
2672
2673 /* Get the TIME_WAIT timeout firing. */
2674 if (timeo < rto)
2675 @@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
2676 timeo = DCCP_TIMEWAIT_LEN;
2677
2678 inet_twsk_schedule(tw, timeo);
2679 + /* Linkage updates. */
2680 + __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
2681 inet_twsk_put(tw);
2682 } else {
2683 /* Sorry, if we're out of memory, just CLOSE this
2684 diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
2685 index 00ec8d5d7e7e..bb96c1c4edd6 100644
2686 --- a/net/ipv4/inet_timewait_sock.c
2687 +++ b/net/ipv4/inet_timewait_sock.c
2688 @@ -153,13 +153,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
2689 /*
2690 * Step 2: Hash TW into tcp ehash chain.
2691 * Notes :
2692 - * - tw_refcnt is set to 3 because :
2693 + * - tw_refcnt is set to 4 because :
2694 * - We have one reference from bhash chain.
2695 * - We have one reference from ehash chain.
2696 + * - We have one reference from timer.
2697 + * - One reference for ourself (our caller will release it).
2698 * We can use atomic_set() because prior spin_lock()/spin_unlock()
2699 * committed into memory all tw fields.
2700 */
2701 - atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
2702 + atomic_set(&tw->tw_refcnt, 4);
2703 inet_twsk_add_node_rcu(tw, &ehead->chain);
2704
2705 /* Step 3: Remove SK from hash chain */
2706 @@ -243,7 +245,7 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw)
2707 }
2708 EXPORT_SYMBOL(inet_twsk_deschedule);
2709
2710 -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
2711 +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
2712 {
2713 /* timeout := RTO * 3.5
2714 *
2715 @@ -271,12 +273,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
2716 */
2717
2718 tw->tw_kill = timeo <= 4*HZ;
2719 - if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
2720 - atomic_inc(&tw->tw_refcnt);
2721 + if (!rearm) {
2722 + BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
2723 atomic_inc(&tw->tw_dr->tw_count);
2724 + } else {
2725 + mod_timer_pending(&tw->tw_timer, jiffies + timeo);
2726 }
2727 }
2728 -EXPORT_SYMBOL_GPL(inet_twsk_schedule);
2729 +EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
2730
2731 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
2732 struct inet_timewait_death_row *twdr, int family)
2733 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2734 index c65b93a7b711..51573f8a39bc 100644
2735 --- a/net/ipv4/ip_output.c
2736 +++ b/net/ipv4/ip_output.c
2737 @@ -235,6 +235,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
2738 * from host network stack.
2739 */
2740 features = netif_skb_features(skb);
2741 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
2742 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
2743 if (IS_ERR_OR_NULL(segs)) {
2744 kfree_skb(skb);
2745 @@ -893,7 +894,7 @@ static int __ip_append_data(struct sock *sk,
2746 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
2747 (sk->sk_protocol == IPPROTO_UDP) &&
2748 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2749 - (sk->sk_type == SOCK_DGRAM)) {
2750 + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
2751 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
2752 hh_len, fragheaderlen, transhdrlen,
2753 maxfraglen, flags);
2754 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
2755 index 17e7339ee5ca..fec2907b85e8 100644
2756 --- a/net/ipv4/tcp_minisocks.c
2757 +++ b/net/ipv4/tcp_minisocks.c
2758 @@ -163,9 +163,9 @@ kill_with_rst:
2759 if (tcp_death_row.sysctl_tw_recycle &&
2760 tcptw->tw_ts_recent_stamp &&
2761 tcp_tw_remember_stamp(tw))
2762 - inet_twsk_schedule(tw, tw->tw_timeout);
2763 + inet_twsk_reschedule(tw, tw->tw_timeout);
2764 else
2765 - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
2766 + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2767 return TCP_TW_ACK;
2768 }
2769
2770 @@ -203,7 +203,7 @@ kill:
2771 return TCP_TW_SUCCESS;
2772 }
2773 }
2774 - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
2775 + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2776
2777 if (tmp_opt.saw_tstamp) {
2778 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
2779 @@ -253,7 +253,7 @@ kill:
2780 * Do not reschedule in the last case.
2781 */
2782 if (paws_reject || th->ack)
2783 - inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
2784 + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2785
2786 return tcp_timewait_check_oow_rate_limit(
2787 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
2788 @@ -324,9 +324,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
2789 } while (0);
2790 #endif
2791
2792 - /* Linkage updates. */
2793 - __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
2794 -
2795 /* Get the TIME_WAIT timeout firing. */
2796 if (timeo < rto)
2797 timeo = rto;
2798 @@ -340,6 +337,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
2799 }
2800
2801 inet_twsk_schedule(tw, timeo);
2802 + /* Linkage updates. */
2803 + __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
2804 inet_twsk_put(tw);
2805 } else {
2806 /* Sorry, if we're out of memory, just CLOSE this
2807 diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
2808 index 17d35662930d..3e6a472e6b88 100644
2809 --- a/net/ipv4/tcp_yeah.c
2810 +++ b/net/ipv4/tcp_yeah.c
2811 @@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
2812 yeah->fast_count = 0;
2813 yeah->reno_count = max(yeah->reno_count>>1, 2U);
2814
2815 - return tp->snd_cwnd - reduction;
2816 + return max_t(int, tp->snd_cwnd - reduction, 2);
2817 }
2818
2819 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
2820 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
2821 index bff69746e05f..78526087126d 100644
2822 --- a/net/ipv4/xfrm4_policy.c
2823 +++ b/net/ipv4/xfrm4_policy.c
2824 @@ -230,7 +230,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
2825 xfrm_dst_ifdown(dst, dev);
2826 }
2827
2828 -static struct dst_ops xfrm4_dst_ops = {
2829 +static struct dst_ops xfrm4_dst_ops_template = {
2830 .family = AF_INET,
2831 .gc = xfrm4_garbage_collect,
2832 .update_pmtu = xfrm4_update_pmtu,
2833 @@ -244,7 +244,7 @@ static struct dst_ops xfrm4_dst_ops = {
2834
2835 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
2836 .family = AF_INET,
2837 - .dst_ops = &xfrm4_dst_ops,
2838 + .dst_ops = &xfrm4_dst_ops_template,
2839 .dst_lookup = xfrm4_dst_lookup,
2840 .get_saddr = xfrm4_get_saddr,
2841 .decode_session = _decode_session4,
2842 @@ -266,7 +266,7 @@ static struct ctl_table xfrm4_policy_table[] = {
2843 { }
2844 };
2845
2846 -static int __net_init xfrm4_net_init(struct net *net)
2847 +static int __net_init xfrm4_net_sysctl_init(struct net *net)
2848 {
2849 struct ctl_table *table;
2850 struct ctl_table_header *hdr;
2851 @@ -294,7 +294,7 @@ err_alloc:
2852 return -ENOMEM;
2853 }
2854
2855 -static void __net_exit xfrm4_net_exit(struct net *net)
2856 +static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
2857 {
2858 struct ctl_table *table;
2859
2860 @@ -306,12 +306,44 @@ static void __net_exit xfrm4_net_exit(struct net *net)
2861 if (!net_eq(net, &init_net))
2862 kfree(table);
2863 }
2864 +#else /* CONFIG_SYSCTL */
2865 +static int inline xfrm4_net_sysctl_init(struct net *net)
2866 +{
2867 + return 0;
2868 +}
2869 +
2870 +static void inline xfrm4_net_sysctl_exit(struct net *net)
2871 +{
2872 +}
2873 +#endif
2874 +
2875 +static int __net_init xfrm4_net_init(struct net *net)
2876 +{
2877 + int ret;
2878 +
2879 + memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
2880 + sizeof(xfrm4_dst_ops_template));
2881 + ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
2882 + if (ret)
2883 + return ret;
2884 +
2885 + ret = xfrm4_net_sysctl_init(net);
2886 + if (ret)
2887 + dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
2888 +
2889 + return ret;
2890 +}
2891 +
2892 +static void __net_exit xfrm4_net_exit(struct net *net)
2893 +{
2894 + xfrm4_net_sysctl_exit(net);
2895 + dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
2896 +}
2897
2898 static struct pernet_operations __net_initdata xfrm4_net_ops = {
2899 .init = xfrm4_net_init,
2900 .exit = xfrm4_net_exit,
2901 };
2902 -#endif
2903
2904 static void __init xfrm4_policy_init(void)
2905 {
2906 @@ -320,13 +352,9 @@ static void __init xfrm4_policy_init(void)
2907
2908 void __init xfrm4_init(void)
2909 {
2910 - dst_entries_init(&xfrm4_dst_ops);
2911 -
2912 xfrm4_state_init();
2913 xfrm4_policy_init();
2914 xfrm4_protocol_init();
2915 -#ifdef CONFIG_SYSCTL
2916 register_pernet_subsys(&xfrm4_net_ops);
2917 -#endif
2918 }
2919
2920 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2921 index a2d685030a34..f4795b0d6e6e 100644
2922 --- a/net/ipv6/addrconf.c
2923 +++ b/net/ipv6/addrconf.c
2924 @@ -5267,13 +5267,10 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
2925 goto out;
2926 }
2927
2928 - if (!write) {
2929 - err = snprintf(str, sizeof(str), "%pI6",
2930 - &secret->secret);
2931 - if (err >= sizeof(str)) {
2932 - err = -EIO;
2933 - goto out;
2934 - }
2935 + err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
2936 + if (err >= sizeof(str)) {
2937 + err = -EIO;
2938 + goto out;
2939 }
2940
2941 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
2942 diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
2943 index 882124ebb438..a8f6986dcbe5 100644
2944 --- a/net/ipv6/addrlabel.c
2945 +++ b/net/ipv6/addrlabel.c
2946 @@ -552,7 +552,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2947
2948 rcu_read_lock();
2949 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
2950 - if (p && ip6addrlbl_hold(p))
2951 + if (p && !ip6addrlbl_hold(p))
2952 p = NULL;
2953 lseq = ip6addrlbl_table.seq;
2954 rcu_read_unlock();
2955 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2956 index bc09cb97b840..f50228b0abe5 100644
2957 --- a/net/ipv6/ip6_output.c
2958 +++ b/net/ipv6/ip6_output.c
2959 @@ -1329,7 +1329,7 @@ emsgsize:
2960 (skb && skb_is_gso(skb))) &&
2961 (sk->sk_protocol == IPPROTO_UDP) &&
2962 (rt->dst.dev->features & NETIF_F_UFO) &&
2963 - (sk->sk_type == SOCK_DGRAM)) {
2964 + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
2965 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
2966 hh_len, fragheaderlen,
2967 transhdrlen, mtu, flags, rt);
2968 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2969 index c1938ad39f8c..c1147acbc8c4 100644
2970 --- a/net/ipv6/tcp_ipv6.c
2971 +++ b/net/ipv6/tcp_ipv6.c
2972 @@ -465,8 +465,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
2973 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
2974
2975 skb_set_queue_mapping(skb, queue_mapping);
2976 + rcu_read_lock();
2977 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
2978 np->tclass);
2979 + rcu_read_unlock();
2980 err = net_xmit_eval(err);
2981 }
2982
2983 diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
2984 index 901ef6f8addc..5266ad2d6419 100644
2985 --- a/net/ipv6/xfrm6_mode_tunnel.c
2986 +++ b/net/ipv6/xfrm6_mode_tunnel.c
2987 @@ -24,7 +24,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
2988 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
2989
2990 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
2991 - IP6_ECN_set_ce(inner_iph);
2992 + IP6_ECN_set_ce(skb, inner_iph);
2993 }
2994
2995 /* Add encapsulation header.
2996 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
2997 index f337a908a76a..4fb94f6ee15b 100644
2998 --- a/net/ipv6/xfrm6_policy.c
2999 +++ b/net/ipv6/xfrm6_policy.c
3000 @@ -289,7 +289,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
3001 xfrm_dst_ifdown(dst, dev);
3002 }
3003
3004 -static struct dst_ops xfrm6_dst_ops = {
3005 +static struct dst_ops xfrm6_dst_ops_template = {
3006 .family = AF_INET6,
3007 .gc = xfrm6_garbage_collect,
3008 .update_pmtu = xfrm6_update_pmtu,
3009 @@ -303,7 +303,7 @@ static struct dst_ops xfrm6_dst_ops = {
3010
3011 static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
3012 .family = AF_INET6,
3013 - .dst_ops = &xfrm6_dst_ops,
3014 + .dst_ops = &xfrm6_dst_ops_template,
3015 .dst_lookup = xfrm6_dst_lookup,
3016 .get_saddr = xfrm6_get_saddr,
3017 .decode_session = _decode_session6,
3018 @@ -336,7 +336,7 @@ static struct ctl_table xfrm6_policy_table[] = {
3019 { }
3020 };
3021
3022 -static int __net_init xfrm6_net_init(struct net *net)
3023 +static int __net_init xfrm6_net_sysctl_init(struct net *net)
3024 {
3025 struct ctl_table *table;
3026 struct ctl_table_header *hdr;
3027 @@ -364,7 +364,7 @@ err_alloc:
3028 return -ENOMEM;
3029 }
3030
3031 -static void __net_exit xfrm6_net_exit(struct net *net)
3032 +static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
3033 {
3034 struct ctl_table *table;
3035
3036 @@ -376,24 +376,52 @@ static void __net_exit xfrm6_net_exit(struct net *net)
3037 if (!net_eq(net, &init_net))
3038 kfree(table);
3039 }
3040 +#else /* CONFIG_SYSCTL */
3041 +static int inline xfrm6_net_sysctl_init(struct net *net)
3042 +{
3043 + return 0;
3044 +}
3045 +
3046 +static void inline xfrm6_net_sysctl_exit(struct net *net)
3047 +{
3048 +}
3049 +#endif
3050 +
3051 +static int __net_init xfrm6_net_init(struct net *net)
3052 +{
3053 + int ret;
3054 +
3055 + memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template,
3056 + sizeof(xfrm6_dst_ops_template));
3057 + ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops);
3058 + if (ret)
3059 + return ret;
3060 +
3061 + ret = xfrm6_net_sysctl_init(net);
3062 + if (ret)
3063 + dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
3064 +
3065 + return ret;
3066 +}
3067 +
3068 +static void __net_exit xfrm6_net_exit(struct net *net)
3069 +{
3070 + xfrm6_net_sysctl_exit(net);
3071 + dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
3072 +}
3073
3074 static struct pernet_operations xfrm6_net_ops = {
3075 .init = xfrm6_net_init,
3076 .exit = xfrm6_net_exit,
3077 };
3078 -#endif
3079
3080 int __init xfrm6_init(void)
3081 {
3082 int ret;
3083
3084 - dst_entries_init(&xfrm6_dst_ops);
3085 -
3086 ret = xfrm6_policy_init();
3087 - if (ret) {
3088 - dst_entries_destroy(&xfrm6_dst_ops);
3089 + if (ret)
3090 goto out;
3091 - }
3092 ret = xfrm6_state_init();
3093 if (ret)
3094 goto out_policy;
3095 @@ -402,9 +430,7 @@ int __init xfrm6_init(void)
3096 if (ret)
3097 goto out_state;
3098
3099 -#ifdef CONFIG_SYSCTL
3100 register_pernet_subsys(&xfrm6_net_ops);
3101 -#endif
3102 out:
3103 return ret;
3104 out_state:
3105 @@ -416,11 +442,8 @@ out_policy:
3106
3107 void xfrm6_fini(void)
3108 {
3109 -#ifdef CONFIG_SYSCTL
3110 unregister_pernet_subsys(&xfrm6_net_ops);
3111 -#endif
3112 xfrm6_protocol_fini();
3113 xfrm6_policy_fini();
3114 xfrm6_state_fini();
3115 - dst_entries_destroy(&xfrm6_dst_ops);
3116 }
3117 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
3118 index 27e14962b504..b3fe02a2339e 100644
3119 --- a/net/openvswitch/datapath.c
3120 +++ b/net/openvswitch/datapath.c
3121 @@ -337,12 +337,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
3122 unsigned short gso_type = skb_shinfo(skb)->gso_type;
3123 struct sw_flow_key later_key;
3124 struct sk_buff *segs, *nskb;
3125 - struct ovs_skb_cb ovs_cb;
3126 int err;
3127
3128 - ovs_cb = *OVS_CB(skb);
3129 + BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
3130 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
3131 - *OVS_CB(skb) = ovs_cb;
3132 if (IS_ERR(segs))
3133 return PTR_ERR(segs);
3134 if (segs == NULL)
3135 @@ -360,7 +358,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
3136 /* Queue all of the segments. */
3137 skb = segs;
3138 do {
3139 - *OVS_CB(skb) = ovs_cb;
3140 if (gso_type & SKB_GSO_UDP && skb != segs)
3141 key = &later_key;
3142
3143 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
3144 index 32ab87d34828..11d0b29ce4b8 100644
3145 --- a/net/phonet/af_phonet.c
3146 +++ b/net/phonet/af_phonet.c
3147 @@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
3148 struct sockaddr_pn sa;
3149 u16 len;
3150
3151 + skb = skb_share_check(skb, GFP_ATOMIC);
3152 + if (!skb)
3153 + return NET_RX_DROP;
3154 +
3155 /* check we have at least a full Phonet header */
3156 if (!pskb_pull(skb, sizeof(struct phonethdr)))
3157 goto out;
3158 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
3159 index b453270be3fd..3c6f6b774ba6 100644
3160 --- a/net/sched/sch_generic.c
3161 +++ b/net/sched/sch_generic.c
3162 @@ -666,8 +666,10 @@ static void qdisc_rcu_free(struct rcu_head *head)
3163 {
3164 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
3165
3166 - if (qdisc_is_percpu_stats(qdisc))
3167 + if (qdisc_is_percpu_stats(qdisc)) {
3168 free_percpu(qdisc->cpu_bstats);
3169 + free_percpu(qdisc->cpu_qstats);
3170 + }
3171
3172 kfree((char *) qdisc - qdisc->padded);
3173 }
3174 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
3175 index fef2acdf4a2e..ecae5561b912 100644
3176 --- a/net/sctp/sm_sideeffect.c
3177 +++ b/net/sctp/sm_sideeffect.c
3178 @@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
3179 int error;
3180 struct sctp_transport *transport = (struct sctp_transport *) peer;
3181 struct sctp_association *asoc = transport->asoc;
3182 - struct net *net = sock_net(asoc->base.sk);
3183 + struct sock *sk = asoc->base.sk;
3184 + struct net *net = sock_net(sk);
3185
3186 /* Check whether a task is in the sock. */
3187
3188 - bh_lock_sock(asoc->base.sk);
3189 - if (sock_owned_by_user(asoc->base.sk)) {
3190 + bh_lock_sock(sk);
3191 + if (sock_owned_by_user(sk)) {
3192 pr_debug("%s: sock is busy\n", __func__);
3193
3194 /* Try again later. */
3195 @@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
3196 transport, GFP_ATOMIC);
3197
3198 if (error)
3199 - asoc->base.sk->sk_err = -error;
3200 + sk->sk_err = -error;
3201
3202 out_unlock:
3203 - bh_unlock_sock(asoc->base.sk);
3204 + bh_unlock_sock(sk);
3205 sctp_transport_put(transport);
3206 }
3207
3208 @@ -285,11 +286,12 @@ out_unlock:
3209 static void sctp_generate_timeout_event(struct sctp_association *asoc,
3210 sctp_event_timeout_t timeout_type)
3211 {
3212 - struct net *net = sock_net(asoc->base.sk);
3213 + struct sock *sk = asoc->base.sk;
3214 + struct net *net = sock_net(sk);
3215 int error = 0;
3216
3217 - bh_lock_sock(asoc->base.sk);
3218 - if (sock_owned_by_user(asoc->base.sk)) {
3219 + bh_lock_sock(sk);
3220 + if (sock_owned_by_user(sk)) {
3221 pr_debug("%s: sock is busy: timer %d\n", __func__,
3222 timeout_type);
3223
3224 @@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
3225 (void *)timeout_type, GFP_ATOMIC);
3226
3227 if (error)
3228 - asoc->base.sk->sk_err = -error;
3229 + sk->sk_err = -error;
3230
3231 out_unlock:
3232 - bh_unlock_sock(asoc->base.sk);
3233 + bh_unlock_sock(sk);
3234 sctp_association_put(asoc);
3235 }
3236
3237 @@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
3238 int error = 0;
3239 struct sctp_transport *transport = (struct sctp_transport *) data;
3240 struct sctp_association *asoc = transport->asoc;
3241 - struct net *net = sock_net(asoc->base.sk);
3242 + struct sock *sk = asoc->base.sk;
3243 + struct net *net = sock_net(sk);
3244
3245 - bh_lock_sock(asoc->base.sk);
3246 - if (sock_owned_by_user(asoc->base.sk)) {
3247 + bh_lock_sock(sk);
3248 + if (sock_owned_by_user(sk)) {
3249 pr_debug("%s: sock is busy\n", __func__);
3250
3251 /* Try again later. */
3252 @@ -389,10 +392,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
3253 transport, GFP_ATOMIC);
3254
3255 if (error)
3256 - asoc->base.sk->sk_err = -error;
3257 + sk->sk_err = -error;
3258
3259 out_unlock:
3260 - bh_unlock_sock(asoc->base.sk);
3261 + bh_unlock_sock(sk);
3262 sctp_transport_put(transport);
3263 }
3264
3265 @@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
3266 {
3267 struct sctp_transport *transport = (struct sctp_transport *) data;
3268 struct sctp_association *asoc = transport->asoc;
3269 - struct net *net = sock_net(asoc->base.sk);
3270 + struct sock *sk = asoc->base.sk;
3271 + struct net *net = sock_net(sk);
3272
3273 - bh_lock_sock(asoc->base.sk);
3274 - if (sock_owned_by_user(asoc->base.sk)) {
3275 + bh_lock_sock(sk);
3276 + if (sock_owned_by_user(sk)) {
3277 pr_debug("%s: sock is busy\n", __func__);
3278
3279 /* Try again later. */
3280 @@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
3281 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
3282
3283 out_unlock:
3284 - bh_unlock_sock(asoc->base.sk);
3285 + bh_unlock_sock(sk);
3286 sctp_association_put(asoc);
3287 }
3288
3289 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3290 index 3ee27b7704ff..e6bb98e583fb 100644
3291 --- a/net/sctp/sm_statefuns.c
3292 +++ b/net/sctp/sm_statefuns.c
3293 @@ -4829,7 +4829,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
3294
3295 retval = SCTP_DISPOSITION_CONSUME;
3296
3297 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3298 + if (abort)
3299 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3300
3301 /* Even if we can't send the ABORT due to low memory delete the
3302 * TCB. This is a departure from our typical NOMEM handling.
3303 @@ -4966,7 +4967,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
3304 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3305 retval = SCTP_DISPOSITION_CONSUME;
3306
3307 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3308 + if (abort)
3309 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3310
3311 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
3312 SCTP_STATE(SCTP_STATE_CLOSED));
3313 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3314 index a63c2c87a0c6..76e6ec62cf92 100644
3315 --- a/net/sctp/socket.c
3316 +++ b/net/sctp/socket.c
3317 @@ -1513,8 +1513,7 @@ static void sctp_close(struct sock *sk, long timeout)
3318 struct sctp_chunk *chunk;
3319
3320 chunk = sctp_make_abort_user(asoc, NULL, 0);
3321 - if (chunk)
3322 - sctp_primitive_ABORT(net, asoc, chunk);
3323 + sctp_primitive_ABORT(net, asoc, chunk);
3324 } else
3325 sctp_primitive_SHUTDOWN(net, asoc, NULL);
3326 }
3327 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
3328 index 26d50c565f54..3e0fc5127225 100644
3329 --- a/net/sctp/sysctl.c
3330 +++ b/net/sctp/sysctl.c
3331 @@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
3332 struct ctl_table tbl;
3333 bool changed = false;
3334 char *none = "none";
3335 - char tmp[8];
3336 + char tmp[8] = {0};
3337 int ret;
3338
3339 memset(&tbl, 0, sizeof(struct ctl_table));
3340 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3341 index a398f624c28d..cb3a01a9ed38 100644
3342 --- a/net/unix/af_unix.c
3343 +++ b/net/unix/af_unix.c
3344 @@ -1481,6 +1481,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
3345 sock_wfree(skb);
3346 }
3347
3348 +/*
3349 + * The "user->unix_inflight" variable is protected by the garbage
3350 + * collection lock, and we just read it locklessly here. If you go
3351 + * over the limit, there might be a tiny race in actually noticing
3352 + * it across threads. Tough.
3353 + */
3354 +static inline bool too_many_unix_fds(struct task_struct *p)
3355 +{
3356 + struct user_struct *user = current_user();
3357 +
3358 + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
3359 + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
3360 + return false;
3361 +}
3362 +
3363 #define MAX_RECURSION_LEVEL 4
3364
3365 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
3366 @@ -1489,6 +1504,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
3367 unsigned char max_level = 0;
3368 int unix_sock_count = 0;
3369
3370 + if (too_many_unix_fds(current))
3371 + return -ETOOMANYREFS;
3372 +
3373 for (i = scm->fp->count - 1; i >= 0; i--) {
3374 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
3375
3376 @@ -1510,10 +1528,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
3377 if (!UNIXCB(skb).fp)
3378 return -ENOMEM;
3379
3380 - if (unix_sock_count) {
3381 - for (i = scm->fp->count - 1; i >= 0; i--)
3382 - unix_inflight(scm->fp->fp[i]);
3383 - }
3384 + for (i = scm->fp->count - 1; i >= 0; i--)
3385 + unix_inflight(scm->fp->fp[i]);
3386 return max_level;
3387 }
3388
3389 diff --git a/net/unix/garbage.c b/net/unix/garbage.c
3390 index a73a226f2d33..8fcdc2283af5 100644
3391 --- a/net/unix/garbage.c
3392 +++ b/net/unix/garbage.c
3393 @@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
3394 {
3395 struct sock *s = unix_get_socket(fp);
3396
3397 + spin_lock(&unix_gc_lock);
3398 +
3399 if (s) {
3400 struct unix_sock *u = unix_sk(s);
3401
3402 - spin_lock(&unix_gc_lock);
3403 -
3404 if (atomic_long_inc_return(&u->inflight) == 1) {
3405 BUG_ON(!list_empty(&u->link));
3406 list_add_tail(&u->link, &gc_inflight_list);
3407 @@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
3408 BUG_ON(list_empty(&u->link));
3409 }
3410 unix_tot_inflight++;
3411 - spin_unlock(&unix_gc_lock);
3412 }
3413 + fp->f_cred->user->unix_inflight++;
3414 + spin_unlock(&unix_gc_lock);
3415 }
3416
3417 void unix_notinflight(struct file *fp)
3418 {
3419 struct sock *s = unix_get_socket(fp);
3420
3421 + spin_lock(&unix_gc_lock);
3422 +
3423 if (s) {
3424 struct unix_sock *u = unix_sk(s);
3425
3426 - spin_lock(&unix_gc_lock);
3427 BUG_ON(list_empty(&u->link));
3428
3429 if (atomic_long_dec_and_test(&u->inflight))
3430 list_del_init(&u->link);
3431 unix_tot_inflight--;
3432 - spin_unlock(&unix_gc_lock);
3433 }
3434 + fp->f_cred->user->unix_inflight--;
3435 + spin_unlock(&unix_gc_lock);
3436 }
3437
3438 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
3439 diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
3440 index fbcedbe33190..5097dce5b916 100644
3441 --- a/net/xfrm/xfrm_output.c
3442 +++ b/net/xfrm/xfrm_output.c
3443 @@ -153,6 +153,8 @@ static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
3444 {
3445 struct sk_buff *segs;
3446
3447 + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
3448 + BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
3449 segs = skb_gso_segment(skb, 0);
3450 kfree_skb(skb);
3451 if (IS_ERR(segs))
3452 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3453 index 638af0655aaf..4cd2076ff84b 100644
3454 --- a/net/xfrm/xfrm_policy.c
3455 +++ b/net/xfrm/xfrm_policy.c
3456 @@ -2806,7 +2806,6 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3457
3458 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
3459 {
3460 - struct net *net;
3461 int err = 0;
3462 if (unlikely(afinfo == NULL))
3463 return -EINVAL;
3464 @@ -2837,26 +2836,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
3465 }
3466 spin_unlock(&xfrm_policy_afinfo_lock);
3467
3468 - rtnl_lock();
3469 - for_each_net(net) {
3470 - struct dst_ops *xfrm_dst_ops;
3471 -
3472 - switch (afinfo->family) {
3473 - case AF_INET:
3474 - xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
3475 - break;
3476 -#if IS_ENABLED(CONFIG_IPV6)
3477 - case AF_INET6:
3478 - xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
3479 - break;
3480 -#endif
3481 - default:
3482 - BUG();
3483 - }
3484 - *xfrm_dst_ops = *afinfo->dst_ops;
3485 - }
3486 - rtnl_unlock();
3487 -
3488 return err;
3489 }
3490 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3491 @@ -2892,22 +2871,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
3492 }
3493 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3494
3495 -static void __net_init xfrm_dst_ops_init(struct net *net)
3496 -{
3497 - struct xfrm_policy_afinfo *afinfo;
3498 -
3499 - rcu_read_lock();
3500 - afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
3501 - if (afinfo)
3502 - net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
3503 -#if IS_ENABLED(CONFIG_IPV6)
3504 - afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
3505 - if (afinfo)
3506 - net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
3507 -#endif
3508 - rcu_read_unlock();
3509 -}
3510 -
3511 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
3512 {
3513 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3514 @@ -3056,7 +3019,6 @@ static int __net_init xfrm_net_init(struct net *net)
3515 rv = xfrm_policy_init(net);
3516 if (rv < 0)
3517 goto out_policy;
3518 - xfrm_dst_ops_init(net);
3519 rv = xfrm_sysctl_init(net);
3520 if (rv < 0)
3521 goto out_sysctl;
3522 diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
3523 index 3d1984e59a30..e00bcd129336 100644
3524 --- a/scripts/recordmcount.c
3525 +++ b/scripts/recordmcount.c
3526 @@ -42,6 +42,7 @@
3527
3528 #ifndef EM_AARCH64
3529 #define EM_AARCH64 183
3530 +#define R_AARCH64_NONE 0
3531 #define R_AARCH64_ABS64 257
3532 #endif
3533
3534 @@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
3535 return 0;
3536 }
3537
3538 +static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
3539 +static int make_nop_arm64(void *map, size_t const offset)
3540 +{
3541 + uint32_t *ptr;
3542 +
3543 + ptr = map + offset;
3544 + /* bl <_mcount> is 0x94000000 before relocation */
3545 + if (*ptr != 0x94000000)
3546 + return -1;
3547 +
3548 + /* Convert to nop */
3549 + ulseek(fd_map, offset, SEEK_SET);
3550 + uwrite(fd_map, ideal_nop, 4);
3551 + return 0;
3552 +}
3553 +
3554 /*
3555 * Get the whole file as a programming convenience in order to avoid
3556 * malloc+lseek+read+free of many pieces. If successful, then mmap
3557 @@ -353,7 +370,12 @@ do_file(char const *const fname)
3558 altmcount = "__gnu_mcount_nc";
3559 break;
3560 case EM_AARCH64:
3561 - reltype = R_AARCH64_ABS64; gpfx = '_'; break;
3562 + reltype = R_AARCH64_ABS64;
3563 + make_nop = make_nop_arm64;
3564 + rel_type_nop = R_AARCH64_NONE;
3565 + ideal_nop = ideal_nop4_arm64;
3566 + gpfx = '_';
3567 + break;
3568 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
3569 case EM_METAG: reltype = R_METAG_ADDR32;
3570 altmcount = "_mcount_wrapper";
3571 diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
3572 index 49b582a225b0..b9897e2be404 100644
3573 --- a/scripts/recordmcount.h
3574 +++ b/scripts/recordmcount.h
3575 @@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
3576
3577 if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
3578 if (make_nop)
3579 - ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
3580 + ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
3581 if (warn_on_notrace_sect && !once) {
3582 printf("Section %s has mcount callers being ignored\n",
3583 txtname);
3584 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
3585 index 826470d7f000..96e2486a6fc4 100755
3586 --- a/scripts/recordmcount.pl
3587 +++ b/scripts/recordmcount.pl
3588 @@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
3589
3590 } elsif ($arch eq "powerpc") {
3591 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
3592 - $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
3593 + # See comment in the sparc64 section for why we use '\w'.
3594 + $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
3595 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
3596
3597 if ($bits == 64) {
3598 diff --git a/sound/core/control.c b/sound/core/control.c
3599 index 196a6fe100ca..a85d45595d02 100644
3600 --- a/sound/core/control.c
3601 +++ b/sound/core/control.c
3602 @@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
3603 return -EFAULT;
3604 if (tlv.length < sizeof(unsigned int) * 2)
3605 return -EINVAL;
3606 + if (!tlv.numid)
3607 + return -EINVAL;
3608 down_read(&card->controls_rwsem);
3609 kctl = snd_ctl_find_numid(card, tlv.numid);
3610 if (kctl == NULL) {
3611 diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
3612 index 886be7da989d..38514ed6e55c 100644
3613 --- a/sound/core/hrtimer.c
3614 +++ b/sound/core/hrtimer.c
3615 @@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
3616 struct snd_hrtimer *stime = t->private_data;
3617
3618 atomic_set(&stime->running, 0);
3619 - hrtimer_cancel(&stime->hrt);
3620 + hrtimer_try_to_cancel(&stime->hrt);
3621 hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
3622 HRTIMER_MODE_REL);
3623 atomic_set(&stime->running, 1);
3624 @@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
3625 {
3626 struct snd_hrtimer *stime = t->private_data;
3627 atomic_set(&stime->running, 0);
3628 + hrtimer_try_to_cancel(&stime->hrt);
3629 return 0;
3630 }
3631
3632 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
3633 index b48b434444ed..9630e9f72b7b 100644
3634 --- a/sound/core/pcm_compat.c
3635 +++ b/sound/core/pcm_compat.c
3636 @@ -255,10 +255,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
3637 if (! (runtime = substream->runtime))
3638 return -ENOTTY;
3639
3640 - /* only fifo_size is different, so just copy all */
3641 - data = memdup_user(data32, sizeof(*data32));
3642 - if (IS_ERR(data))
3643 - return PTR_ERR(data);
3644 + data = kmalloc(sizeof(*data), GFP_KERNEL);
3645 + if (!data)
3646 + return -ENOMEM;
3647 +
3648 + /* only fifo_size (RO from userspace) is different, so just copy all */
3649 + if (copy_from_user(data, data32, sizeof(*data32))) {
3650 + err = -EFAULT;
3651 + goto error;
3652 + }
3653
3654 if (refine)
3655 err = snd_pcm_hw_refine(substream, data);
3656 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3657 index edbdab85fc02..bd4741442909 100644
3658 --- a/sound/core/seq/seq_clientmgr.c
3659 +++ b/sound/core/seq/seq_clientmgr.c
3660 @@ -1962,7 +1962,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
3661 * No restrictions so for a user client we can clear
3662 * the whole fifo
3663 */
3664 - if (client->type == USER_CLIENT)
3665 + if (client->type == USER_CLIENT && client->data.user.fifo)
3666 snd_seq_fifo_clear(client->data.user.fifo);
3667 }
3668
3669 diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
3670 index 81f7c109dc46..65175902a68a 100644
3671 --- a/sound/core/seq/seq_compat.c
3672 +++ b/sound/core/seq/seq_compat.c
3673 @@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
3674 struct snd_seq_port_info *data;
3675 mm_segment_t fs;
3676
3677 - data = memdup_user(data32, sizeof(*data32));
3678 - if (IS_ERR(data))
3679 - return PTR_ERR(data);
3680 + data = kmalloc(sizeof(*data), GFP_KERNEL);
3681 + if (!data)
3682 + return -ENOMEM;
3683
3684 - if (get_user(data->flags, &data32->flags) ||
3685 + if (copy_from_user(data, data32, sizeof(*data32)) ||
3686 + get_user(data->flags, &data32->flags) ||
3687 get_user(data->time_queue, &data32->time_queue))
3688 goto error;
3689 data->kernel = NULL;
3690 diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
3691 index a0cda38205b9..77ec21420355 100644
3692 --- a/sound/core/seq/seq_queue.c
3693 +++ b/sound/core/seq/seq_queue.c
3694 @@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
3695 static void queue_delete(struct snd_seq_queue *q)
3696 {
3697 /* stop and release the timer */
3698 + mutex_lock(&q->timer_mutex);
3699 snd_seq_timer_stop(q->timer);
3700 snd_seq_timer_close(q);
3701 + mutex_unlock(&q->timer_mutex);
3702 /* wait until access free */
3703 snd_use_lock_sync(&q->use_lock);
3704 /* release resources... */
3705 diff --git a/sound/core/timer.c b/sound/core/timer.c
3706 index a9a1a047c521..a419878901c4 100644
3707 --- a/sound/core/timer.c
3708 +++ b/sound/core/timer.c
3709 @@ -65,6 +65,7 @@ struct snd_timer_user {
3710 int qtail;
3711 int qused;
3712 int queue_size;
3713 + bool disconnected;
3714 struct snd_timer_read *queue;
3715 struct snd_timer_tread *tqueue;
3716 spinlock_t qlock;
3717 @@ -73,7 +74,7 @@ struct snd_timer_user {
3718 struct timespec tstamp; /* trigger tstamp */
3719 wait_queue_head_t qchange_sleep;
3720 struct fasync_struct *fasync;
3721 - struct mutex tread_sem;
3722 + struct mutex ioctl_lock;
3723 };
3724
3725 /* list of timers */
3726 @@ -215,11 +216,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
3727 slave->slave_id == master->slave_id) {
3728 list_move_tail(&slave->open_list, &master->slave_list_head);
3729 spin_lock_irq(&slave_active_lock);
3730 + spin_lock(&master->timer->lock);
3731 slave->master = master;
3732 slave->timer = master->timer;
3733 if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
3734 list_add_tail(&slave->active_list,
3735 &master->slave_active_head);
3736 + spin_unlock(&master->timer->lock);
3737 spin_unlock_irq(&slave_active_lock);
3738 }
3739 }
3740 @@ -288,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
3741 mutex_unlock(&register_mutex);
3742 return -ENOMEM;
3743 }
3744 + /* take a card refcount for safe disconnection */
3745 + if (timer->card)
3746 + get_device(&timer->card->card_dev);
3747 timeri->slave_class = tid->dev_sclass;
3748 timeri->slave_id = slave_id;
3749 if (list_empty(&timer->open_list_head) && timer->hw.open)
3750 @@ -346,15 +352,21 @@ int snd_timer_close(struct snd_timer_instance *timeri)
3751 timer->hw.close)
3752 timer->hw.close(timer);
3753 /* remove slave links */
3754 + spin_lock_irq(&slave_active_lock);
3755 + spin_lock(&timer->lock);
3756 list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
3757 open_list) {
3758 - spin_lock_irq(&slave_active_lock);
3759 - _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
3760 list_move_tail(&slave->open_list, &snd_timer_slave_list);
3761 slave->master = NULL;
3762 slave->timer = NULL;
3763 - spin_unlock_irq(&slave_active_lock);
3764 + list_del_init(&slave->ack_list);
3765 + list_del_init(&slave->active_list);
3766 }
3767 + spin_unlock(&timer->lock);
3768 + spin_unlock_irq(&slave_active_lock);
3769 + /* release a card refcount for safe disconnection */
3770 + if (timer->card)
3771 + put_device(&timer->card->card_dev);
3772 mutex_unlock(&register_mutex);
3773 }
3774 out:
3775 @@ -441,9 +453,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
3776
3777 spin_lock_irqsave(&slave_active_lock, flags);
3778 timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
3779 - if (timeri->master)
3780 + if (timeri->master && timeri->timer) {
3781 + spin_lock(&timeri->timer->lock);
3782 list_add_tail(&timeri->active_list,
3783 &timeri->master->slave_active_head);
3784 + spin_unlock(&timeri->timer->lock);
3785 + }
3786 spin_unlock_irqrestore(&slave_active_lock, flags);
3787 return 1; /* delayed start */
3788 }
3789 @@ -467,6 +482,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
3790 timer = timeri->timer;
3791 if (timer == NULL)
3792 return -EINVAL;
3793 + if (timer->card && timer->card->shutdown)
3794 + return -ENODEV;
3795 spin_lock_irqsave(&timer->lock, flags);
3796 timeri->ticks = timeri->cticks = ticks;
3797 timeri->pticks = 0;
3798 @@ -489,6 +506,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
3799 if (!keep_flag) {
3800 spin_lock_irqsave(&slave_active_lock, flags);
3801 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
3802 + list_del_init(&timeri->ack_list);
3803 + list_del_init(&timeri->active_list);
3804 spin_unlock_irqrestore(&slave_active_lock, flags);
3805 }
3806 goto __end;
3807 @@ -499,6 +518,10 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
3808 spin_lock_irqsave(&timer->lock, flags);
3809 list_del_init(&timeri->ack_list);
3810 list_del_init(&timeri->active_list);
3811 + if (timer->card && timer->card->shutdown) {
3812 + spin_unlock_irqrestore(&timer->lock, flags);
3813 + return 0;
3814 + }
3815 if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
3816 !(--timer->running)) {
3817 timer->hw.stop(timer);
3818 @@ -561,6 +584,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
3819 timer = timeri->timer;
3820 if (! timer)
3821 return -EINVAL;
3822 + if (timer->card && timer->card->shutdown)
3823 + return -ENODEV;
3824 spin_lock_irqsave(&timer->lock, flags);
3825 if (!timeri->cticks)
3826 timeri->cticks = 1;
3827 @@ -624,6 +649,9 @@ static void snd_timer_tasklet(unsigned long arg)
3828 unsigned long resolution, ticks;
3829 unsigned long flags;
3830
3831 + if (timer->card && timer->card->shutdown)
3832 + return;
3833 +
3834 spin_lock_irqsave(&timer->lock, flags);
3835 /* now process all callbacks */
3836 while (!list_empty(&timer->sack_list_head)) {
3837 @@ -664,6 +692,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
3838 if (timer == NULL)
3839 return;
3840
3841 + if (timer->card && timer->card->shutdown)
3842 + return;
3843 +
3844 spin_lock_irqsave(&timer->lock, flags);
3845
3846 /* remember the current resolution */
3847 @@ -694,7 +725,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
3848 } else {
3849 ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
3850 if (--timer->running)
3851 - list_del(&ti->active_list);
3852 + list_del_init(&ti->active_list);
3853 }
3854 if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
3855 (ti->flags & SNDRV_TIMER_IFLG_FAST))
3856 @@ -874,11 +905,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
3857 return 0;
3858 }
3859
3860 +/* just for reference in snd_timer_dev_disconnect() below */
3861 +static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
3862 + int event, struct timespec *tstamp,
3863 + unsigned long resolution);
3864 +
3865 static int snd_timer_dev_disconnect(struct snd_device *device)
3866 {
3867 struct snd_timer *timer = device->device_data;
3868 + struct snd_timer_instance *ti;
3869 +
3870 mutex_lock(&register_mutex);
3871 list_del_init(&timer->device_list);
3872 + /* wake up pending sleepers */
3873 + list_for_each_entry(ti, &timer->open_list_head, open_list) {
3874 + /* FIXME: better to have a ti.disconnect() op */
3875 + if (ti->ccallback == snd_timer_user_ccallback) {
3876 + struct snd_timer_user *tu = ti->callback_data;
3877 +
3878 + tu->disconnected = true;
3879 + wake_up(&tu->qchange_sleep);
3880 + }
3881 + }
3882 mutex_unlock(&register_mutex);
3883 return 0;
3884 }
3885 @@ -889,6 +937,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
3886 unsigned long resolution = 0;
3887 struct snd_timer_instance *ti, *ts;
3888
3889 + if (timer->card && timer->card->shutdown)
3890 + return;
3891 if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
3892 return;
3893 if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
3894 @@ -1047,6 +1097,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
3895
3896 mutex_lock(&register_mutex);
3897 list_for_each_entry(timer, &snd_timer_list, device_list) {
3898 + if (timer->card && timer->card->shutdown)
3899 + continue;
3900 switch (timer->tmr_class) {
3901 case SNDRV_TIMER_CLASS_GLOBAL:
3902 snd_iprintf(buffer, "G%i: ", timer->tmr_device);
3903 @@ -1253,7 +1305,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
3904 return -ENOMEM;
3905 spin_lock_init(&tu->qlock);
3906 init_waitqueue_head(&tu->qchange_sleep);
3907 - mutex_init(&tu->tread_sem);
3908 + mutex_init(&tu->ioctl_lock);
3909 tu->ticks = 1;
3910 tu->queue_size = 128;
3911 tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
3912 @@ -1273,8 +1325,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
3913 if (file->private_data) {
3914 tu = file->private_data;
3915 file->private_data = NULL;
3916 + mutex_lock(&tu->ioctl_lock);
3917 if (tu->timeri)
3918 snd_timer_close(tu->timeri);
3919 + mutex_unlock(&tu->ioctl_lock);
3920 kfree(tu->queue);
3921 kfree(tu->tqueue);
3922 kfree(tu);
3923 @@ -1512,7 +1566,6 @@ static int snd_timer_user_tselect(struct file *file,
3924 int err = 0;
3925
3926 tu = file->private_data;
3927 - mutex_lock(&tu->tread_sem);
3928 if (tu->timeri) {
3929 snd_timer_close(tu->timeri);
3930 tu->timeri = NULL;
3931 @@ -1556,7 +1609,6 @@ static int snd_timer_user_tselect(struct file *file,
3932 }
3933
3934 __err:
3935 - mutex_unlock(&tu->tread_sem);
3936 return err;
3937 }
3938
3939 @@ -1769,7 +1821,7 @@ enum {
3940 SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
3941 };
3942
3943 -static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
3944 +static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
3945 unsigned long arg)
3946 {
3947 struct snd_timer_user *tu;
3948 @@ -1786,17 +1838,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
3949 {
3950 int xarg;
3951
3952 - mutex_lock(&tu->tread_sem);
3953 - if (tu->timeri) { /* too late */
3954 - mutex_unlock(&tu->tread_sem);
3955 + if (tu->timeri) /* too late */
3956 return -EBUSY;
3957 - }
3958 - if (get_user(xarg, p)) {
3959 - mutex_unlock(&tu->tread_sem);
3960 + if (get_user(xarg, p))
3961 return -EFAULT;
3962 - }
3963 tu->tread = xarg ? 1 : 0;
3964 - mutex_unlock(&tu->tread_sem);
3965 return 0;
3966 }
3967 case SNDRV_TIMER_IOCTL_GINFO:
3968 @@ -1829,6 +1875,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
3969 return -ENOTTY;
3970 }
3971
3972 +static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
3973 + unsigned long arg)
3974 +{
3975 + struct snd_timer_user *tu = file->private_data;
3976 + long ret;
3977 +
3978 + mutex_lock(&tu->ioctl_lock);
3979 + ret = __snd_timer_user_ioctl(file, cmd, arg);
3980 + mutex_unlock(&tu->ioctl_lock);
3981 + return ret;
3982 +}
3983 +
3984 static int snd_timer_user_fasync(int fd, struct file * file, int on)
3985 {
3986 struct snd_timer_user *tu;
3987 @@ -1866,6 +1924,10 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
3988
3989 remove_wait_queue(&tu->qchange_sleep, &wait);
3990
3991 + if (tu->disconnected) {
3992 + err = -ENODEV;
3993 + break;
3994 + }
3995 if (signal_pending(current)) {
3996 err = -ERESTARTSYS;
3997 break;
3998 @@ -1915,6 +1977,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
3999 mask = 0;
4000 if (tu->qused)
4001 mask |= POLLIN | POLLRDNORM;
4002 + if (tu->disconnected)
4003 + mask |= POLLERR;
4004
4005 return mask;
4006 }
4007 diff --git a/sound/firewire/bebob/Makefile b/sound/firewire/bebob/Makefile
4008 index 6cf470c80d1f..af7ed6643266 100644
4009 --- a/sound/firewire/bebob/Makefile
4010 +++ b/sound/firewire/bebob/Makefile
4011 @@ -1,4 +1,4 @@
4012 snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
4013 bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
4014 bebob_focusrite.o bebob_maudio.o bebob.o
4015 -obj-m += snd-bebob.o
4016 +obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
4017 diff --git a/sound/firewire/dice/Makefile b/sound/firewire/dice/Makefile
4018 index 9ef228ef7baf..55b4be9b0034 100644
4019 --- a/sound/firewire/dice/Makefile
4020 +++ b/sound/firewire/dice/Makefile
4021 @@ -1,3 +1,3 @@
4022 snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
4023 dice-pcm.o dice-hwdep.o dice.o
4024 -obj-m += snd-dice.o
4025 +obj-$(CONFIG_SND_DICE) += snd-dice.o
4026 diff --git a/sound/firewire/fireworks/Makefile b/sound/firewire/fireworks/Makefile
4027 index 0c7440826db8..15ef7f75a8ef 100644
4028 --- a/sound/firewire/fireworks/Makefile
4029 +++ b/sound/firewire/fireworks/Makefile
4030 @@ -1,4 +1,4 @@
4031 snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \
4032 fireworks_stream.o fireworks_proc.o fireworks_midi.o \
4033 fireworks_pcm.o fireworks_hwdep.o fireworks.o
4034 -obj-m += snd-fireworks.o
4035 +obj-$(CONFIG_SND_FIREWORKS) += snd-fireworks.o
4036 diff --git a/sound/firewire/oxfw/Makefile b/sound/firewire/oxfw/Makefile
4037 index a926850864f6..06ff50f4e6c0 100644
4038 --- a/sound/firewire/oxfw/Makefile
4039 +++ b/sound/firewire/oxfw/Makefile
4040 @@ -1,3 +1,3 @@
4041 snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
4042 oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
4043 -obj-m += snd-oxfw.o
4044 +obj-$(CONFIG_SND_OXFW) += snd-oxfw.o
4045 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4046 index 44dfc7b92bc3..09920ba55ba1 100644
4047 --- a/sound/pci/hda/hda_intel.c
4048 +++ b/sound/pci/hda/hda_intel.c
4049 @@ -329,6 +329,7 @@ enum {
4050
4051 #define AZX_DCAPS_PRESET_CTHDA \
4052 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
4053 + AZX_DCAPS_NO_64BIT |\
4054 AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
4055
4056 /*
4057 @@ -839,6 +840,36 @@ static int azx_resume(struct device *dev)
4058 }
4059 #endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
4060
4061 +#ifdef CONFIG_PM_SLEEP
4062 +/* put codec down to D3 at hibernation for Intel SKL+;
4063 + * otherwise BIOS may still access the codec and screw up the driver
4064 + */
4065 +#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
4066 +#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
4067 +#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
4068 +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
4069 +
4070 +static int azx_freeze_noirq(struct device *dev)
4071 +{
4072 + struct pci_dev *pci = to_pci_dev(dev);
4073 +
4074 + if (IS_SKL_PLUS(pci))
4075 + pci_set_power_state(pci, PCI_D3hot);
4076 +
4077 + return 0;
4078 +}
4079 +
4080 +static int azx_thaw_noirq(struct device *dev)
4081 +{
4082 + struct pci_dev *pci = to_pci_dev(dev);
4083 +
4084 + if (IS_SKL_PLUS(pci))
4085 + pci_set_power_state(pci, PCI_D0);
4086 +
4087 + return 0;
4088 +}
4089 +#endif /* CONFIG_PM_SLEEP */
4090 +
4091 #ifdef CONFIG_PM
4092 static int azx_runtime_suspend(struct device *dev)
4093 {
4094 @@ -939,6 +970,10 @@ static int azx_runtime_idle(struct device *dev)
4095
4096 static const struct dev_pm_ops azx_pm = {
4097 SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
4098 +#ifdef CONFIG_PM_SLEEP
4099 + .freeze_noirq = azx_freeze_noirq,
4100 + .thaw_noirq = azx_thaw_noirq,
4101 +#endif
4102 SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
4103 };
4104
4105 @@ -1937,9 +1972,17 @@ out_free:
4106 static void azx_remove(struct pci_dev *pci)
4107 {
4108 struct snd_card *card = pci_get_drvdata(pci);
4109 + struct azx *chip;
4110 + struct hda_intel *hda;
4111 +
4112 + if (card) {
4113 + /* flush the pending probing work */
4114 + chip = card->private_data;
4115 + hda = container_of(chip, struct hda_intel, chip);
4116 + flush_work(&hda->probe_work);
4117
4118 - if (card)
4119 snd_card_free(card);
4120 + }
4121 }
4122
4123 static void azx_shutdown(struct pci_dev *pci)
4124 @@ -1976,6 +2019,11 @@ static const struct pci_device_id azx_ids[] = {
4125 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
4126 { PCI_DEVICE(0x8086, 0x8d21),
4127 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
4128 + /* Lewisburg */
4129 + { PCI_DEVICE(0x8086, 0xa1f0),
4130 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
4131 + { PCI_DEVICE(0x8086, 0xa270),
4132 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
4133 /* Lynx Point-LP */
4134 { PCI_DEVICE(0x8086, 0x9c20),
4135 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
4136 @@ -2156,11 +2204,13 @@ static const struct pci_device_id azx_ids[] = {
4137 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
4138 .class_mask = 0xffffff,
4139 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
4140 + AZX_DCAPS_NO_64BIT |
4141 AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
4142 #else
4143 /* this entry seems still valid -- i.e. without emu20kx chip */
4144 { PCI_DEVICE(0x1102, 0x0009),
4145 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
4146 + AZX_DCAPS_NO_64BIT |
4147 AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
4148 #endif
4149 /* CM8888 */
4150 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4151 index 57bb5a559f8e..8189f02f8446 100644
4152 --- a/sound/pci/hda/patch_realtek.c
4153 +++ b/sound/pci/hda/patch_realtek.c
4154 @@ -111,6 +111,7 @@ struct alc_spec {
4155 void (*power_hook)(struct hda_codec *codec);
4156 #endif
4157 void (*shutup)(struct hda_codec *codec);
4158 + void (*reboot_notify)(struct hda_codec *codec);
4159
4160 int init_amp;
4161 int codec_variant; /* flag for other variants */
4162 @@ -773,6 +774,25 @@ static inline void alc_shutup(struct hda_codec *codec)
4163 snd_hda_shutup_pins(codec);
4164 }
4165
4166 +static void alc_reboot_notify(struct hda_codec *codec)
4167 +{
4168 + struct alc_spec *spec = codec->spec;
4169 +
4170 + if (spec && spec->reboot_notify)
4171 + spec->reboot_notify(codec);
4172 + else
4173 + alc_shutup(codec);
4174 +}
4175 +
4176 +/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
4177 +static void alc_d3_at_reboot(struct hda_codec *codec)
4178 +{
4179 + snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
4180 + snd_hda_codec_write(codec, codec->core.afg, 0,
4181 + AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
4182 + msleep(10);
4183 +}
4184 +
4185 #define alc_free snd_hda_gen_free
4186
4187 #ifdef CONFIG_PM
4188 @@ -818,7 +838,7 @@ static const struct hda_codec_ops alc_patch_ops = {
4189 .suspend = alc_suspend,
4190 .check_power_status = snd_hda_gen_check_power_status,
4191 #endif
4192 - .reboot_notify = alc_shutup,
4193 + .reboot_notify = alc_reboot_notify,
4194 };
4195
4196
4197 @@ -1767,10 +1787,12 @@ enum {
4198 ALC889_FIXUP_MBA11_VREF,
4199 ALC889_FIXUP_MBA21_VREF,
4200 ALC889_FIXUP_MP11_VREF,
4201 + ALC889_FIXUP_MP41_VREF,
4202 ALC882_FIXUP_INV_DMIC,
4203 ALC882_FIXUP_NO_PRIMARY_HP,
4204 ALC887_FIXUP_ASUS_BASS,
4205 ALC887_FIXUP_BASS_CHMAP,
4206 + ALC882_FIXUP_DISABLE_AAMIX,
4207 };
4208
4209 static void alc889_fixup_coef(struct hda_codec *codec,
4210 @@ -1854,7 +1876,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
4211 const struct hda_fixup *fix, int action)
4212 {
4213 struct alc_spec *spec = codec->spec;
4214 - static hda_nid_t nids[2] = { 0x14, 0x15 };
4215 + static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
4216 int i;
4217
4218 if (action != HDA_FIXUP_ACT_INIT)
4219 @@ -1932,6 +1954,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
4220
4221 static void alc_fixup_bass_chmap(struct hda_codec *codec,
4222 const struct hda_fixup *fix, int action);
4223 +static void alc_fixup_disable_aamix(struct hda_codec *codec,
4224 + const struct hda_fixup *fix, int action);
4225
4226 static const struct hda_fixup alc882_fixups[] = {
4227 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
4228 @@ -2142,6 +2166,12 @@ static const struct hda_fixup alc882_fixups[] = {
4229 .chained = true,
4230 .chain_id = ALC885_FIXUP_MACPRO_GPIO,
4231 },
4232 + [ALC889_FIXUP_MP41_VREF] = {
4233 + .type = HDA_FIXUP_FUNC,
4234 + .v.func = alc889_fixup_mbp_vref,
4235 + .chained = true,
4236 + .chain_id = ALC885_FIXUP_MACPRO_GPIO,
4237 + },
4238 [ALC882_FIXUP_INV_DMIC] = {
4239 .type = HDA_FIXUP_FUNC,
4240 .v.func = alc_fixup_inv_dmic,
4241 @@ -2163,6 +2193,10 @@ static const struct hda_fixup alc882_fixups[] = {
4242 .type = HDA_FIXUP_FUNC,
4243 .v.func = alc_fixup_bass_chmap,
4244 },
4245 + [ALC882_FIXUP_DISABLE_AAMIX] = {
4246 + .type = HDA_FIXUP_FUNC,
4247 + .v.func = alc_fixup_disable_aamix,
4248 + },
4249 };
4250
4251 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4252 @@ -2220,7 +2254,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4253 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
4254 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
4255 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
4256 - SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
4257 + SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
4258 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
4259 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
4260 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
4261 @@ -2230,6 +2264,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4262 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
4263 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
4264 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
4265 + SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
4266 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
4267 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
4268 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
4269 @@ -4194,6 +4229,8 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4270 struct alc_spec *spec = codec->spec;
4271
4272 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4273 + spec->shutup = alc_no_shutup; /* reduce click noise */
4274 + spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
4275 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4276 codec->power_save_node = 0; /* avoid click noises */
4277 snd_hda_apply_pincfgs(codec, pincfgs);
4278 @@ -4525,6 +4562,7 @@ enum {
4279 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
4280 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4281 ALC292_FIXUP_TPT440_DOCK,
4282 + ALC292_FIXUP_TPT440,
4283 ALC283_FIXUP_BXBT2807_MIC,
4284 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4285 ALC282_FIXUP_ASPIRE_V5_PINS,
4286 @@ -4993,6 +5031,12 @@ static const struct hda_fixup alc269_fixups[] = {
4287 .chained = true,
4288 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
4289 },
4290 + [ALC292_FIXUP_TPT440] = {
4291 + .type = HDA_FIXUP_FUNC,
4292 + .v.func = alc_fixup_disable_aamix,
4293 + .chained = true,
4294 + .chain_id = ALC292_FIXUP_TPT440_DOCK,
4295 + },
4296 [ALC283_FIXUP_BXBT2807_MIC] = {
4297 .type = HDA_FIXUP_PINS,
4298 .v.pins = (const struct hda_pintbl[]) {
4299 @@ -5107,6 +5151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4300 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
4301 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4302 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4303 + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
4304 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4305 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
4306 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
4307 @@ -5116,6 +5161,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4308 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4309 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4310 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4311 + SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
4312 SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
4313 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
4314 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4315 @@ -5227,12 +5273,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4316 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
4317 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
4318 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
4319 - SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
4320 + SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
4321 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
4322 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
4323 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
4324 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
4325 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4326 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
4327 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
4328 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
4329 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
4330 @@ -5322,6 +5369,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4331 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
4332 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
4333 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
4334 + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
4335 {}
4336 };
4337
4338 @@ -5448,6 +5496,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4339 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4340 ALC255_STANDARD_PINS,
4341 {0x12, 0x90a60170},
4342 + {0x14, 0x90171130},
4343 + {0x21, 0x02211040}),
4344 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4345 + {0x12, 0x90a60170},
4346 {0x14, 0x90170140},
4347 {0x17, 0x40000000},
4348 {0x1d, 0x40700001},
4349 @@ -6456,6 +6508,7 @@ static const struct hda_fixup alc662_fixups[] = {
4350 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4351 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
4352 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
4353 + SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
4354 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4355 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4356 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
4357 @@ -6473,6 +6526,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4358 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4359 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4360 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
4361 + SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
4362 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
4363 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
4364 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
4365 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
4366 index b1bc66783974..8e7d4c087a7a 100644
4367 --- a/sound/pci/hda/patch_sigmatel.c
4368 +++ b/sound/pci/hda/patch_sigmatel.c
4369 @@ -702,6 +702,7 @@ static bool hp_bnb2011_with_dock(struct hda_codec *codec)
4370 static bool hp_blike_system(u32 subsystem_id)
4371 {
4372 switch (subsystem_id) {
4373 + case 0x103c1473: /* HP ProBook 6550b */
4374 case 0x103c1520:
4375 case 0x103c1521:
4376 case 0x103c1523:
4377 @@ -3109,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
4378 spec->gpio_led = 0x08;
4379 }
4380
4381 +static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
4382 +{
4383 + unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
4384 +
4385 + /* count line-out, too, as BIOS sets often so */
4386 + return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
4387 + (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
4388 + get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
4389 +}
4390 +
4391 +static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
4392 +{
4393 + unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
4394 +
4395 + /* It was changed in the BIOS to just satisfy MS DTM.
4396 + * Lets turn it back into slaved HP
4397 + */
4398 + pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
4399 + (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
4400 + pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
4401 + 0x1f;
4402 + snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
4403 +}
4404
4405 static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
4406 const struct hda_fixup *fix, int action)
4407 @@ -3118,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
4408 if (action != HDA_FIXUP_ACT_PRE_PROBE)
4409 return;
4410
4411 - if (hp_blike_system(codec->core.subsystem_id)) {
4412 - unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
4413 - if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
4414 - get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER ||
4415 - get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) {
4416 - /* It was changed in the BIOS to just satisfy MS DTM.
4417 - * Lets turn it back into slaved HP
4418 - */
4419 - pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
4420 - | (AC_JACK_HP_OUT <<
4421 - AC_DEFCFG_DEVICE_SHIFT);
4422 - pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
4423 - | AC_DEFCFG_SEQUENCE)))
4424 - | 0x1f;
4425 - snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
4426 - }
4427 + /* when both output A and F are assigned, these are supposedly
4428 + * dock and built-in headphones; fix both pin configs
4429 + */
4430 + if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
4431 + fixup_hp_headphone(codec, 0x0a);
4432 + fixup_hp_headphone(codec, 0x0f);
4433 }
4434
4435 if (find_mute_led_cfg(codec, 1))
4436 diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
4437 index 2306ccf7281e..77c963ced67a 100644
4438 --- a/sound/pci/rme96.c
4439 +++ b/sound/pci/rme96.c
4440 @@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
4441 {
4442 /* change to/from double-speed: reset the DAC (if available) */
4443 snd_rme96_reset_dac(rme96);
4444 + return 1; /* need to restore volume */
4445 } else {
4446 writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
4447 + return 0;
4448 }
4449 - return 0;
4450 }
4451
4452 static int
4453 @@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
4454 struct rme96 *rme96 = snd_pcm_substream_chip(substream);
4455 struct snd_pcm_runtime *runtime = substream->runtime;
4456 int err, rate, dummy;
4457 + bool apply_dac_volume = false;
4458
4459 runtime->dma_area = (void __force *)(rme96->iobase +
4460 RME96_IO_PLAY_BUFFER);
4461 @@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
4462 {
4463 /* slave clock */
4464 if ((int)params_rate(params) != rate) {
4465 - spin_unlock_irq(&rme96->lock);
4466 - return -EIO;
4467 - }
4468 - } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
4469 - spin_unlock_irq(&rme96->lock);
4470 - return err;
4471 - }
4472 - if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
4473 - spin_unlock_irq(&rme96->lock);
4474 - return err;
4475 + err = -EIO;
4476 + goto error;
4477 + }
4478 + } else {
4479 + err = snd_rme96_playback_setrate(rme96, params_rate(params));
4480 + if (err < 0)
4481 + goto error;
4482 + apply_dac_volume = err > 0; /* need to restore volume later? */
4483 }
4484 +
4485 + err = snd_rme96_playback_setformat(rme96, params_format(params));
4486 + if (err < 0)
4487 + goto error;
4488 snd_rme96_setframelog(rme96, params_channels(params), 1);
4489 if (rme96->capture_periodsize != 0) {
4490 if (params_period_size(params) << rme96->playback_frlog !=
4491 rme96->capture_periodsize)
4492 {
4493 - spin_unlock_irq(&rme96->lock);
4494 - return -EBUSY;
4495 + err = -EBUSY;
4496 + goto error;
4497 }
4498 }
4499 rme96->playback_periodsize =
4500 @@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
4501 rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
4502 writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
4503 }
4504 +
4505 + err = 0;
4506 + error:
4507 spin_unlock_irq(&rme96->lock);
4508 -
4509 - return 0;
4510 + if (apply_dac_volume) {
4511 + usleep_range(3000, 10000);
4512 + snd_rme96_apply_dac_volume(rme96);
4513 + }
4514 +
4515 + return err;
4516 }
4517
4518 static int
4519 diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
4520 index ee91edcf3cb0..13191891fc4c 100644
4521 --- a/sound/soc/codecs/arizona.c
4522 +++ b/sound/soc/codecs/arizona.c
4523 @@ -1354,7 +1354,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
4524 bool reconfig;
4525 unsigned int aif_tx_state, aif_rx_state;
4526
4527 - if (params_rate(params) % 8000)
4528 + if (params_rate(params) % 4000)
4529 rates = &arizona_44k1_bclk_rates[0];
4530 else
4531 rates = &arizona_48k_bclk_rates[0];
4532 diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
4533 index c5f35a07e8e4..3ad7f5be1cfa 100644
4534 --- a/sound/soc/codecs/es8328.c
4535 +++ b/sound/soc/codecs/es8328.c
4536 @@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
4537 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
4538 static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
4539
4540 -static const int deemph_settings[] = { 0, 32000, 44100, 48000 };
4541 +static const struct {
4542 + int rate;
4543 + unsigned int val;
4544 +} deemph_settings[] = {
4545 + { 0, ES8328_DACCONTROL6_DEEMPH_OFF },
4546 + { 32000, ES8328_DACCONTROL6_DEEMPH_32k },
4547 + { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k },
4548 + { 48000, ES8328_DACCONTROL6_DEEMPH_48k },
4549 +};
4550
4551 static int es8328_set_deemph(struct snd_soc_codec *codec)
4552 {
4553 @@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec)
4554 * rate.
4555 */
4556 if (es8328->deemph) {
4557 - best = 1;
4558 - for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
4559 - if (abs(deemph_settings[i] - es8328->playback_fs) <
4560 - abs(deemph_settings[best] - es8328->playback_fs))
4561 + best = 0;
4562 + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) {
4563 + if (abs(deemph_settings[i].rate - es8328->playback_fs) <
4564 + abs(deemph_settings[best].rate - es8328->playback_fs))
4565 best = i;
4566 }
4567
4568 - val = best << 1;
4569 + val = deemph_settings[best].val;
4570 } else {
4571 - val = 0;
4572 + val = ES8328_DACCONTROL6_DEEMPH_OFF;
4573 }
4574
4575 dev_dbg(codec->dev, "Set deemphasis %d\n", val);
4576
4577 - return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val);
4578 + return snd_soc_update_bits(codec, ES8328_DACCONTROL6,
4579 + ES8328_DACCONTROL6_DEEMPH_MASK, val);
4580 }
4581
4582 static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
4583 diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
4584 index cb36afe10c0e..156c748c89c7 100644
4585 --- a/sound/soc/codecs/es8328.h
4586 +++ b/sound/soc/codecs/es8328.h
4587 @@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
4588 #define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
4589 #define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
4590 #define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
4591 +#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6)
4592 #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
4593 #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
4594 #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
4595 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
4596 index 118b0034ba23..154c1a24a303 100644
4597 --- a/sound/soc/codecs/wm8962.c
4598 +++ b/sound/soc/codecs/wm8962.c
4599 @@ -365,8 +365,8 @@ static struct reg_default wm8962_reg[] = {
4600 { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */
4601 { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */
4602
4603 - { 17048, 0x0083 }, /* R17408 - HPF_C_1 */
4604 - { 17049, 0x98AD }, /* R17409 - HPF_C_0 */
4605 + { 17408, 0x0083 }, /* R17408 - HPF_C_1 */
4606 + { 17409, 0x98AD }, /* R17409 - HPF_C_0 */
4607
4608 { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */
4609 { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */
4610 diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
4611 index ff0e4646b934..88317c1b7f96 100644
4612 --- a/sound/soc/codecs/wm8974.c
4613 +++ b/sound/soc/codecs/wm8974.c
4614 @@ -575,6 +575,7 @@ static const struct regmap_config wm8974_regmap = {
4615 .max_register = WM8974_MONOMIX,
4616 .reg_defaults = wm8974_reg_defaults,
4617 .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
4618 + .cache_type = REGCACHE_FLAT,
4619 };
4620
4621 static int wm8974_probe(struct snd_soc_codec *codec)
4622 diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
4623 index 23c91fa65ab8..76dd8c6aa4f0 100644
4624 --- a/sound/soc/davinci/davinci-mcasp.c
4625 +++ b/sound/soc/davinci/davinci-mcasp.c
4626 @@ -221,8 +221,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
4627
4628 /* wait for XDATA to be cleared */
4629 cnt = 0;
4630 - while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
4631 - ~XRDATA) && (cnt < 100000))
4632 + while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
4633 + (cnt < 100000))
4634 cnt++;
4635
4636 /* Release TX state machine */
4637 diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
4638 index 8c7dc51b1c4f..f7a0cb786d5c 100644
4639 --- a/sound/soc/sh/rcar/gen.c
4640 +++ b/sound/soc/sh/rcar/gen.c
4641 @@ -214,7 +214,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
4642 RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
4643 RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
4644 RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
4645 - RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4),
4646 + RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
4647 RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40),
4648 RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40),
4649 RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40),
4650 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
4651 index 025c38fbe3c0..1874cf0e6cab 100644
4652 --- a/sound/soc/soc-compress.c
4653 +++ b/sound/soc/soc-compress.c
4654 @@ -623,6 +623,7 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
4655 struct snd_pcm *be_pcm;
4656 char new_name[64];
4657 int ret = 0, direction = 0;
4658 + int playback = 0, capture = 0;
4659
4660 if (rtd->num_codecs > 1) {
4661 dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
4662 @@ -634,11 +635,27 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
4663 rtd->dai_link->stream_name, codec_dai->name, num);
4664
4665 if (codec_dai->driver->playback.channels_min)
4666 + playback = 1;
4667 + if (codec_dai->driver->capture.channels_min)
4668 + capture = 1;
4669 +
4670 + capture = capture && cpu_dai->driver->capture.channels_min;
4671 + playback = playback && cpu_dai->driver->playback.channels_min;
4672 +
4673 + /*
4674 + * Compress devices are unidirectional so only one of the directions
4675 + * should be set, check for that (xor)
4676 + */
4677 + if (playback + capture != 1) {
4678 + dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
4679 + playback, capture);
4680 + return -EINVAL;
4681 + }
4682 +
4683 + if(playback)
4684 direction = SND_COMPRESS_PLAYBACK;
4685 - else if (codec_dai->driver->capture.channels_min)
4686 - direction = SND_COMPRESS_CAPTURE;
4687 else
4688 - return -EINVAL;
4689 + direction = SND_COMPRESS_CAPTURE;
4690
4691 compr = kzalloc(sizeof(*compr), GFP_KERNEL);
4692 if (compr == NULL) {
4693 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4694 index cd8ed2e393a2..f9a9752d4dbc 100644
4695 --- a/sound/usb/mixer.c
4696 +++ b/sound/usb/mixer.c
4697 @@ -1336,6 +1336,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
4698 }
4699 }
4700
4701 + snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
4702 +
4703 range = (cval->max - cval->min) / cval->res;
4704 /*
4705 * Are there devices with volume range more than 255? I use a bit more
4706 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
4707 index 6a803eff87f7..ddca6547399b 100644
4708 --- a/sound/usb/mixer_maps.c
4709 +++ b/sound/usb/mixer_maps.c
4710 @@ -348,13 +348,6 @@ static struct usbmix_name_map bose_companion5_map[] = {
4711 { 0 } /* terminator */
4712 };
4713
4714 -/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
4715 -static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
4716 -static struct usbmix_name_map dragonfly_1_2_map[] = {
4717 - { 7, NULL, .dB = &dragonfly_1_2_dB },
4718 - { 0 } /* terminator */
4719 -};
4720 -
4721 /*
4722 * Control map entries
4723 */
4724 @@ -470,11 +463,6 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
4725 .id = USB_ID(0x05a7, 0x1020),
4726 .map = bose_companion5_map,
4727 },
4728 - {
4729 - /* Dragonfly DAC 1.2 */
4730 - .id = USB_ID(0x21b4, 0x0081),
4731 - .map = dragonfly_1_2_map,
4732 - },
4733 { 0 } /* terminator */
4734 };
4735
4736 diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
4737 index 337c317ead6f..db9547d04f38 100644
4738 --- a/sound/usb/mixer_quirks.c
4739 +++ b/sound/usb/mixer_quirks.c
4740 @@ -37,6 +37,7 @@
4741 #include <sound/control.h>
4742 #include <sound/hwdep.h>
4743 #include <sound/info.h>
4744 +#include <sound/tlv.h>
4745
4746 #include "usbaudio.h"
4747 #include "mixer.h"
4748 @@ -802,7 +803,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
4749 return 0;
4750
4751 kcontrol->private_value &= ~(0xff << 24);
4752 - kcontrol->private_value |= newval;
4753 + kcontrol->private_value |= (unsigned int)newval << 24;
4754 err = snd_ni_update_cur_val(list);
4755 return err < 0 ? err : 1;
4756 }
4757 @@ -1843,3 +1844,39 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
4758 }
4759 }
4760
4761 +static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
4762 + struct snd_kcontrol *kctl)
4763 +{
4764 + /* Approximation using 10 ranges based on output measurement on hw v1.2.
4765 + * This seems close to the cubic mapping e.g. alsamixer uses. */
4766 + static const DECLARE_TLV_DB_RANGE(scale,
4767 + 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970),
4768 + 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160),
4769 + 6, 7, TLV_DB_MINMAX_ITEM(-3884, -3710),
4770 + 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560),
4771 + 15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324),
4772 + 17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031),
4773 + 20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393),
4774 + 27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032),
4775 + 32, 40, TLV_DB_MINMAX_ITEM(-968, -490),
4776 + 41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
4777 + );
4778 +
4779 + usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
4780 + kctl->tlv.p = scale;
4781 + kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
4782 + kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
4783 +}
4784 +
4785 +void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
4786 + struct usb_mixer_elem_info *cval, int unitid,
4787 + struct snd_kcontrol *kctl)
4788 +{
4789 + switch (mixer->chip->usb_id) {
4790 + case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
4791 + if (unitid == 7 && cval->min == 0 && cval->max == 50)
4792 + snd_dragonfly_quirk_db_scale(mixer, kctl);
4793 + break;
4794 + }
4795 +}
4796 +
4797 diff --git a/sound/usb/mixer_quirks.h b/sound/usb/mixer_quirks.h
4798 index bdbfab093816..177c329cd4dd 100644
4799 --- a/sound/usb/mixer_quirks.h
4800 +++ b/sound/usb/mixer_quirks.h
4801 @@ -9,5 +9,9 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
4802 void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
4803 int unitid);
4804
4805 +void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
4806 + struct usb_mixer_elem_info *cval, int unitid,
4807 + struct snd_kcontrol *kctl);
4808 +
4809 #endif /* SND_USB_MIXER_QUIRKS_H */
4810
4811 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
4812 index eef9b8e4b949..fb9a8a5787a6 100644
4813 --- a/sound/usb/quirks.c
4814 +++ b/sound/usb/quirks.c
4815 @@ -1122,6 +1122,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
4816 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
4817 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
4818 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
4819 + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
4820 return true;
4821 }
4822 return false;
4823 @@ -1265,6 +1266,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
4824 case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
4825 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
4826 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
4827 + case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
4828 if (fp->altsetting == 2)
4829 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
4830 break;