Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0187-4.9.88-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3164 - (show annotations) (download)
Wed Aug 8 14:17:21 2018 UTC (5 years, 8 months ago) by niro
File size: 124544 byte(s)
-linux-4.9.88
1 diff --git a/Makefile b/Makefile
2 index 3043937a65d1..1512ebceffda 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 87
9 +SUBLEVEL = 88
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
14 index 9ff92050053c..fa7f308c9027 100644
15 --- a/arch/arm/mach-omap2/omap-secure.c
16 +++ b/arch/arm/mach-omap2/omap-secure.c
17 @@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void)
18 return omap_secure_memblock_base;
19 }
20
21 +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
22 u32 omap3_save_secure_ram(void __iomem *addr, int size)
23 {
24 u32 ret;
25 @@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
26
27 return ret;
28 }
29 +#endif
30
31 /**
32 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
33 diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
34 index 9ab48ff80c1c..6d11ae581ea7 100644
35 --- a/arch/mips/ath25/board.c
36 +++ b/arch/mips/ath25/board.c
37 @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
38 }
39
40 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
41 + if (!board_data)
42 + goto error;
43 ath25_board.config = (struct ath25_boarddata *)board_data;
44 memcpy_fromio(board_data, bcfg, 0x100);
45 if (broken_boarddata) {
46 diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
47 index c1eb1ff7c800..6ed1ded87b8f 100644
48 --- a/arch/mips/cavium-octeon/octeon-irq.c
49 +++ b/arch/mips/cavium-octeon/octeon-irq.c
50 @@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
51 }
52
53 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
54 + if (!host_data)
55 + return -ENOMEM;
56 raw_spin_lock_init(&host_data->lock);
57
58 addr = of_get_address(ciu_node, 0, NULL, NULL);
59 diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
60 index 47c9646f93b3..d4a293b68249 100644
61 --- a/arch/mips/kernel/smp-bmips.c
62 +++ b/arch/mips/kernel/smp-bmips.c
63 @@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
64 return;
65 }
66
67 - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
68 - "smp_ipi0", NULL))
69 + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
70 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
71 panic("Can't request IPI0 interrupt");
72 - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
73 - "smp_ipi1", NULL))
74 + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
75 + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
76 panic("Can't request IPI1 interrupt");
77 }
78
79 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
80 index 5ba494ed18c1..a70ff09b4982 100644
81 --- a/arch/s390/kvm/kvm-s390.c
82 +++ b/arch/s390/kvm/kvm-s390.c
83 @@ -1601,6 +1601,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
84 /* we still need the basic sca for the ipte control */
85 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
86 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
87 + return;
88 }
89 read_lock(&vcpu->kvm->arch.sca_lock);
90 if (vcpu->kvm->arch.use_esca) {
91 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
92 index cd22cb8ebd42..b60996184fa4 100644
93 --- a/arch/x86/Makefile
94 +++ b/arch/x86/Makefile
95 @@ -184,7 +184,10 @@ KBUILD_AFLAGS += $(mflags-y)
96
97 # Avoid indirect branches in kernel to deal with Spectre
98 ifdef CONFIG_RETPOLINE
99 - RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
100 + RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
101 + RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
102 +
103 + RETPOLINE_CFLAGS += $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
104 ifneq ($(RETPOLINE_CFLAGS),)
105 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
106 endif
107 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
108 index f5434b4670c1..a76dc738ec61 100644
109 --- a/arch/x86/entry/entry_32.S
110 +++ b/arch/x86/entry/entry_32.S
111 @@ -237,8 +237,7 @@ ENTRY(__switch_to_asm)
112 * exist, overwrite the RSB with entries which capture
113 * speculative execution to prevent attack.
114 */
115 - /* Clobbers %ebx */
116 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
117 + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
118 #endif
119
120 /* restore callee-saved registers */
121 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
122 index 8d7e4d48db0d..58610fe93f5d 100644
123 --- a/arch/x86/entry/entry_64.S
124 +++ b/arch/x86/entry/entry_64.S
125 @@ -331,8 +331,7 @@ ENTRY(__switch_to_asm)
126 * exist, overwrite the RSB with entries which capture
127 * speculative execution to prevent attack.
128 */
129 - /* Clobbers %rbx */
130 - FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
131 + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
132 #endif
133
134 /* restore callee-saved registers */
135 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
136 index 93eebc636c76..46e40aeae446 100644
137 --- a/arch/x86/include/asm/apm.h
138 +++ b/arch/x86/include/asm/apm.h
139 @@ -6,6 +6,8 @@
140 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
141 #define _ASM_X86_MACH_DEFAULT_APM_H
142
143 +#include <asm/nospec-branch.h>
144 +
145 #ifdef APM_ZERO_SEGS
146 # define APM_DO_ZERO_SEGS \
147 "pushl %%ds\n\t" \
148 @@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
149 * N.B. We do NOT need a cld after the BIOS call
150 * because we always save and restore the flags.
151 */
152 + firmware_restrict_branch_speculation_start();
153 __asm__ __volatile__(APM_DO_ZERO_SEGS
154 "pushl %%edi\n\t"
155 "pushl %%ebp\n\t"
156 @@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
157 "=S" (*esi)
158 : "a" (func), "b" (ebx_in), "c" (ecx_in)
159 : "memory", "cc");
160 + firmware_restrict_branch_speculation_end();
161 }
162
163 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
164 @@ -55,6 +59,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
165 * N.B. We do NOT need a cld after the BIOS call
166 * because we always save and restore the flags.
167 */
168 + firmware_restrict_branch_speculation_start();
169 __asm__ __volatile__(APM_DO_ZERO_SEGS
170 "pushl %%edi\n\t"
171 "pushl %%ebp\n\t"
172 @@ -67,6 +72,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
173 "=S" (si)
174 : "a" (func), "b" (ebx_in), "c" (ecx_in)
175 : "memory", "cc");
176 + firmware_restrict_branch_speculation_end();
177 return error;
178 }
179
180 diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
181 index 166654218329..5a25ada75aeb 100644
182 --- a/arch/x86/include/asm/asm-prototypes.h
183 +++ b/arch/x86/include/asm/asm-prototypes.h
184 @@ -37,7 +37,4 @@ INDIRECT_THUNK(dx)
185 INDIRECT_THUNK(si)
186 INDIRECT_THUNK(di)
187 INDIRECT_THUNK(bp)
188 -asmlinkage void __fill_rsb(void);
189 -asmlinkage void __clear_rsb(void);
190 -
191 #endif /* CONFIG_RETPOLINE */
192 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
193 index 8eb23f5cf7f4..ed7a1d2c4235 100644
194 --- a/arch/x86/include/asm/cpufeatures.h
195 +++ b/arch/x86/include/asm/cpufeatures.h
196 @@ -203,6 +203,7 @@
197 #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
198
199 #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
200 +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
201
202 /* Virtualization flags: Linux defined, word 8 */
203 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
204 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
205 index 389d700b961e..9df22bb07f7f 100644
206 --- a/arch/x86/include/asm/efi.h
207 +++ b/arch/x86/include/asm/efi.h
208 @@ -5,6 +5,7 @@
209 #include <asm/pgtable.h>
210 #include <asm/processor-flags.h>
211 #include <asm/tlb.h>
212 +#include <asm/nospec-branch.h>
213
214 /*
215 * We map the EFI regions needed for runtime services non-contiguously,
216 @@ -35,8 +36,18 @@
217
218 extern unsigned long asmlinkage efi_call_phys(void *, ...);
219
220 -#define arch_efi_call_virt_setup() kernel_fpu_begin()
221 -#define arch_efi_call_virt_teardown() kernel_fpu_end()
222 +#define arch_efi_call_virt_setup() \
223 +({ \
224 + kernel_fpu_begin(); \
225 + firmware_restrict_branch_speculation_start(); \
226 +})
227 +
228 +#define arch_efi_call_virt_teardown() \
229 +({ \
230 + firmware_restrict_branch_speculation_end(); \
231 + kernel_fpu_end(); \
232 +})
233 +
234
235 /*
236 * Wrap all the virtual calls in a way that forces the parameters on the stack.
237 @@ -72,6 +83,7 @@ struct efi_scratch {
238 efi_sync_low_kernel_mappings(); \
239 preempt_disable(); \
240 __kernel_fpu_begin(); \
241 + firmware_restrict_branch_speculation_start(); \
242 \
243 if (efi_scratch.use_pgd) { \
244 efi_scratch.prev_cr3 = read_cr3(); \
245 @@ -90,6 +102,7 @@ struct efi_scratch {
246 __flush_tlb_all(); \
247 } \
248 \
249 + firmware_restrict_branch_speculation_end(); \
250 __kernel_fpu_end(); \
251 preempt_enable(); \
252 })
253 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
254 index 81a1be326571..d0dabeae0505 100644
255 --- a/arch/x86/include/asm/nospec-branch.h
256 +++ b/arch/x86/include/asm/nospec-branch.h
257 @@ -8,6 +8,50 @@
258 #include <asm/cpufeatures.h>
259 #include <asm/msr-index.h>
260
261 +/*
262 + * Fill the CPU return stack buffer.
263 + *
264 + * Each entry in the RSB, if used for a speculative 'ret', contains an
265 + * infinite 'pause; lfence; jmp' loop to capture speculative execution.
266 + *
267 + * This is required in various cases for retpoline and IBRS-based
268 + * mitigations for the Spectre variant 2 vulnerability. Sometimes to
269 + * eliminate potentially bogus entries from the RSB, and sometimes
270 + * purely to ensure that it doesn't get empty, which on some CPUs would
271 + * allow predictions from other (unwanted!) sources to be used.
272 + *
273 + * We define a CPP macro such that it can be used from both .S files and
274 + * inline assembly. It's possible to do a .macro and then include that
275 + * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
276 + */
277 +
278 +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
279 +#define RSB_FILL_LOOPS 16 /* To avoid underflow */
280 +
281 +/*
282 + * Google experimented with loop-unrolling and this turned out to be
283 + * the optimal version — two calls, each with their own speculation
284 + * trap should their return address end up getting used, in a loop.
285 + */
286 +#define __FILL_RETURN_BUFFER(reg, nr, sp) \
287 + mov $(nr/2), reg; \
288 +771: \
289 + call 772f; \
290 +773: /* speculation trap */ \
291 + pause; \
292 + lfence; \
293 + jmp 773b; \
294 +772: \
295 + call 774f; \
296 +775: /* speculation trap */ \
297 + pause; \
298 + lfence; \
299 + jmp 775b; \
300 +774: \
301 + dec reg; \
302 + jnz 771b; \
303 + add $(BITS_PER_LONG/8) * nr, sp;
304 +
305 #ifdef __ASSEMBLY__
306
307 /*
308 @@ -23,6 +67,18 @@
309 .popsection
310 .endm
311
312 +/*
313 + * This should be used immediately before an indirect jump/call. It tells
314 + * objtool the subsequent indirect jump/call is vouched safe for retpoline
315 + * builds.
316 + */
317 +.macro ANNOTATE_RETPOLINE_SAFE
318 + .Lannotate_\@:
319 + .pushsection .discard.retpoline_safe
320 + _ASM_PTR .Lannotate_\@
321 + .popsection
322 +.endm
323 +
324 /*
325 * These are the bare retpoline primitives for indirect jmp and call.
326 * Do not use these directly; they only exist to make the ALTERNATIVE
327 @@ -59,9 +115,9 @@
328 .macro JMP_NOSPEC reg:req
329 #ifdef CONFIG_RETPOLINE
330 ANNOTATE_NOSPEC_ALTERNATIVE
331 - ALTERNATIVE_2 __stringify(jmp *\reg), \
332 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
333 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
334 - __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
335 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
336 #else
337 jmp *\reg
338 #endif
339 @@ -70,18 +126,25 @@
340 .macro CALL_NOSPEC reg:req
341 #ifdef CONFIG_RETPOLINE
342 ANNOTATE_NOSPEC_ALTERNATIVE
343 - ALTERNATIVE_2 __stringify(call *\reg), \
344 + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
345 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
346 - __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
347 + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
348 #else
349 call *\reg
350 #endif
351 .endm
352
353 -/* This clobbers the BX register */
354 -.macro FILL_RETURN_BUFFER nr:req ftr:req
355 + /*
356 + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
357 + * monstrosity above, manually.
358 + */
359 +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
360 #ifdef CONFIG_RETPOLINE
361 - ALTERNATIVE "", "call __clear_rsb", \ftr
362 + ANNOTATE_NOSPEC_ALTERNATIVE
363 + ALTERNATIVE "jmp .Lskip_rsb_\@", \
364 + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
365 + \ftr
366 +.Lskip_rsb_\@:
367 #endif
368 .endm
369
370 @@ -93,6 +156,12 @@
371 ".long 999b - .\n\t" \
372 ".popsection\n\t"
373
374 +#define ANNOTATE_RETPOLINE_SAFE \
375 + "999:\n\t" \
376 + ".pushsection .discard.retpoline_safe\n\t" \
377 + _ASM_PTR " 999b\n\t" \
378 + ".popsection\n\t"
379 +
380 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
381
382 /*
383 @@ -102,6 +171,7 @@
384 # define CALL_NOSPEC \
385 ANNOTATE_NOSPEC_ALTERNATIVE \
386 ALTERNATIVE( \
387 + ANNOTATE_RETPOLINE_SAFE \
388 "call *%[thunk_target]\n", \
389 "call __x86_indirect_thunk_%V[thunk_target]\n", \
390 X86_FEATURE_RETPOLINE)
391 @@ -156,26 +226,54 @@ extern char __indirect_thunk_end[];
392 static inline void vmexit_fill_RSB(void)
393 {
394 #ifdef CONFIG_RETPOLINE
395 - alternative_input("",
396 - "call __fill_rsb",
397 - X86_FEATURE_RETPOLINE,
398 - ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
399 + unsigned long loops;
400 +
401 + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
402 + ALTERNATIVE("jmp 910f",
403 + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
404 + X86_FEATURE_RETPOLINE)
405 + "910:"
406 + : "=r" (loops), ASM_CALL_CONSTRAINT
407 + : : "memory" );
408 #endif
409 }
410
411 +#define alternative_msr_write(_msr, _val, _feature) \
412 + asm volatile(ALTERNATIVE("", \
413 + "movl %[msr], %%ecx\n\t" \
414 + "movl %[val], %%eax\n\t" \
415 + "movl $0, %%edx\n\t" \
416 + "wrmsr", \
417 + _feature) \
418 + : : [msr] "i" (_msr), [val] "i" (_val) \
419 + : "eax", "ecx", "edx", "memory")
420 +
421 static inline void indirect_branch_prediction_barrier(void)
422 {
423 - asm volatile(ALTERNATIVE("",
424 - "movl %[msr], %%ecx\n\t"
425 - "movl %[val], %%eax\n\t"
426 - "movl $0, %%edx\n\t"
427 - "wrmsr",
428 - X86_FEATURE_USE_IBPB)
429 - : : [msr] "i" (MSR_IA32_PRED_CMD),
430 - [val] "i" (PRED_CMD_IBPB)
431 - : "eax", "ecx", "edx", "memory");
432 + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
433 + X86_FEATURE_USE_IBPB);
434 }
435
436 +/*
437 + * With retpoline, we must use IBRS to restrict branch prediction
438 + * before calling into firmware.
439 + *
440 + * (Implemented as CPP macros due to header hell.)
441 + */
442 +#define firmware_restrict_branch_speculation_start() \
443 +do { \
444 + preempt_disable(); \
445 + alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
446 + X86_FEATURE_USE_IBRS_FW); \
447 +} while (0)
448 +
449 +#define firmware_restrict_branch_speculation_end() \
450 +do { \
451 + alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
452 + X86_FEATURE_USE_IBRS_FW); \
453 + preempt_enable(); \
454 +} while (0)
455 +
456 #endif /* __ASSEMBLY__ */
457
458 /*
459 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
460 index ce932812f142..24af8b1de438 100644
461 --- a/arch/x86/include/asm/paravirt.h
462 +++ b/arch/x86/include/asm/paravirt.h
463 @@ -6,6 +6,7 @@
464 #ifdef CONFIG_PARAVIRT
465 #include <asm/pgtable_types.h>
466 #include <asm/asm.h>
467 +#include <asm/nospec-branch.h>
468
469 #include <asm/paravirt_types.h>
470
471 @@ -869,23 +870,27 @@ extern void default_banner(void);
472
473 #define INTERRUPT_RETURN \
474 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
475 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
476 + ANNOTATE_RETPOLINE_SAFE; \
477 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
478
479 #define DISABLE_INTERRUPTS(clobbers) \
480 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
481 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
482 + ANNOTATE_RETPOLINE_SAFE; \
483 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
484 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
485
486 #define ENABLE_INTERRUPTS(clobbers) \
487 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
488 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
489 + ANNOTATE_RETPOLINE_SAFE; \
490 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
491 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
492
493 #ifdef CONFIG_X86_32
494 #define GET_CR0_INTO_EAX \
495 push %ecx; push %edx; \
496 + ANNOTATE_RETPOLINE_SAFE; \
497 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
498 pop %edx; pop %ecx
499 #else /* !CONFIG_X86_32 */
500 @@ -907,11 +912,13 @@ extern void default_banner(void);
501 */
502 #define SWAPGS \
503 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
504 - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
505 + ANNOTATE_RETPOLINE_SAFE; \
506 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
507 )
508
509 #define GET_CR2_INTO_RAX \
510 - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
511 + ANNOTATE_RETPOLINE_SAFE; \
512 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
513
514 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
515 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
516 @@ -921,7 +928,8 @@ extern void default_banner(void);
517 #define USERGS_SYSRET64 \
518 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
519 CLBR_NONE, \
520 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
521 + ANNOTATE_RETPOLINE_SAFE; \
522 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
523 #endif /* CONFIG_X86_32 */
524
525 #endif /* __ASSEMBLY__ */
526 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
527 index 0f400c0e4979..04b79712b09c 100644
528 --- a/arch/x86/include/asm/paravirt_types.h
529 +++ b/arch/x86/include/asm/paravirt_types.h
530 @@ -42,6 +42,7 @@
531 #include <asm/desc_defs.h>
532 #include <asm/kmap_types.h>
533 #include <asm/pgtable_types.h>
534 +#include <asm/nospec-branch.h>
535
536 struct page;
537 struct thread_struct;
538 @@ -391,7 +392,9 @@ int paravirt_disable_iospace(void);
539 * offset into the paravirt_patch_template structure, and can therefore be
540 * freely converted back into a structure offset.
541 */
542 -#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
543 +#define PARAVIRT_CALL \
544 + ANNOTATE_RETPOLINE_SAFE \
545 + "call *%c[paravirt_opptr];"
546
547 /*
548 * These macros are intended to wrap calls through one of the paravirt
549 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
550 index baddc9ed3454..b8b0b6e78371 100644
551 --- a/arch/x86/kernel/cpu/bugs.c
552 +++ b/arch/x86/kernel/cpu/bugs.c
553 @@ -299,6 +299,15 @@ static void __init spectre_v2_select_mitigation(void)
554 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
555 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
556 }
557 +
558 + /*
559 + * Retpoline means the kernel is safe because it has no indirect
560 + * branches. But firmware isn't, so use IBRS to protect that.
561 + */
562 + if (boot_cpu_has(X86_FEATURE_IBRS)) {
563 + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
564 + pr_info("Enabling Restricted Speculation for firmware calls\n");
565 + }
566 }
567
568 #undef pr_fmt
569 @@ -325,8 +334,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
570 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
571 return sprintf(buf, "Not affected\n");
572
573 - return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
574 + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
575 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
576 + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
577 spectre_v2_module_string());
578 }
579 #endif
580 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
581 index 6ed206bd9071..768042530af2 100644
582 --- a/arch/x86/kernel/cpu/intel.c
583 +++ b/arch/x86/kernel/cpu/intel.c
584 @@ -103,6 +103,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
585 {
586 int i;
587
588 + /*
589 + * We know that the hypervisor lie to us on the microcode version so
590 + * we may as well hope that it is running the correct version.
591 + */
592 + if (cpu_has(c, X86_FEATURE_HYPERVISOR))
593 + return false;
594 +
595 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
596 if (c->x86_model == spectre_bad_microcodes[i].model &&
597 c->x86_stepping == spectre_bad_microcodes[i].stepping)
598 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
599 index fe5cd6ea1f0e..684d9fd191e0 100644
600 --- a/arch/x86/kernel/cpu/mcheck/mce.c
601 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
602 @@ -61,6 +61,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
603 smp_load_acquire(&(p)); \
604 })
605
606 +/* sysfs synchronization */
607 +static DEFINE_MUTEX(mce_sysfs_mutex);
608 +
609 #define CREATE_TRACE_POINTS
610 #include <trace/events/mce.h>
611
612 @@ -2308,6 +2311,7 @@ static ssize_t set_ignore_ce(struct device *s,
613 if (kstrtou64(buf, 0, &new) < 0)
614 return -EINVAL;
615
616 + mutex_lock(&mce_sysfs_mutex);
617 if (mca_cfg.ignore_ce ^ !!new) {
618 if (new) {
619 /* disable ce features */
620 @@ -2320,6 +2324,8 @@ static ssize_t set_ignore_ce(struct device *s,
621 on_each_cpu(mce_enable_ce, (void *)1, 1);
622 }
623 }
624 + mutex_unlock(&mce_sysfs_mutex);
625 +
626 return size;
627 }
628
629 @@ -2332,6 +2338,7 @@ static ssize_t set_cmci_disabled(struct device *s,
630 if (kstrtou64(buf, 0, &new) < 0)
631 return -EINVAL;
632
633 + mutex_lock(&mce_sysfs_mutex);
634 if (mca_cfg.cmci_disabled ^ !!new) {
635 if (new) {
636 /* disable cmci */
637 @@ -2343,6 +2350,8 @@ static ssize_t set_cmci_disabled(struct device *s,
638 on_each_cpu(mce_enable_ce, NULL, 1);
639 }
640 }
641 + mutex_unlock(&mce_sysfs_mutex);
642 +
643 return size;
644 }
645
646 @@ -2350,8 +2359,19 @@ static ssize_t store_int_with_restart(struct device *s,
647 struct device_attribute *attr,
648 const char *buf, size_t size)
649 {
650 - ssize_t ret = device_store_int(s, attr, buf, size);
651 + unsigned long old_check_interval = check_interval;
652 + ssize_t ret = device_store_ulong(s, attr, buf, size);
653 +
654 + if (check_interval == old_check_interval)
655 + return ret;
656 +
657 + if (check_interval < 1)
658 + check_interval = 1;
659 +
660 + mutex_lock(&mce_sysfs_mutex);
661 mce_restart();
662 + mutex_unlock(&mce_sysfs_mutex);
663 +
664 return ret;
665 }
666
667 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
668 index 67cd7c1b99da..9d72cf547c88 100644
669 --- a/arch/x86/kernel/head_64.S
670 +++ b/arch/x86/kernel/head_64.S
671 @@ -22,6 +22,7 @@
672 #include <asm/nops.h>
673 #include "../entry/calling.h"
674 #include <asm/export.h>
675 +#include <asm/nospec-branch.h>
676
677 #ifdef CONFIG_PARAVIRT
678 #include <asm/asm-offsets.h>
679 @@ -200,6 +201,7 @@ ENTRY(secondary_startup_64)
680
681 /* Ensure I am executing from virtual addresses */
682 movq $1f, %rax
683 + ANNOTATE_RETPOLINE_SAFE
684 jmp *%rax
685 1:
686
687 diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
688 index 8c1f218926d7..a5784a14f8d1 100644
689 --- a/arch/x86/kernel/machine_kexec_64.c
690 +++ b/arch/x86/kernel/machine_kexec_64.c
691 @@ -524,6 +524,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
692 goto overflow;
693 break;
694 case R_X86_64_PC32:
695 + case R_X86_64_PLT32:
696 value -= (u64)address;
697 *(u32 *)location = value;
698 break;
699 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
700 index 477ae806c2fa..19977d2f97fb 100644
701 --- a/arch/x86/kernel/module.c
702 +++ b/arch/x86/kernel/module.c
703 @@ -171,19 +171,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
704 case R_X86_64_NONE:
705 break;
706 case R_X86_64_64:
707 + if (*(u64 *)loc != 0)
708 + goto invalid_relocation;
709 *(u64 *)loc = val;
710 break;
711 case R_X86_64_32:
712 + if (*(u32 *)loc != 0)
713 + goto invalid_relocation;
714 *(u32 *)loc = val;
715 if (val != *(u32 *)loc)
716 goto overflow;
717 break;
718 case R_X86_64_32S:
719 + if (*(s32 *)loc != 0)
720 + goto invalid_relocation;
721 *(s32 *)loc = val;
722 if ((s64)val != *(s32 *)loc)
723 goto overflow;
724 break;
725 case R_X86_64_PC32:
726 + case R_X86_64_PLT32:
727 + if (*(u32 *)loc != 0)
728 + goto invalid_relocation;
729 val -= (u64)loc;
730 *(u32 *)loc = val;
731 #if 0
732 @@ -199,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
733 }
734 return 0;
735
736 +invalid_relocation:
737 + pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
738 + (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
739 + return -ENOEXEC;
740 +
741 overflow:
742 pr_err("overflow in relocation type %d val %Lx\n",
743 (int)ELF64_R_TYPE(rel[i].r_info), val);
744 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
745 index 4ad7c4dd311c..6bf1898ddf49 100644
746 --- a/arch/x86/lib/Makefile
747 +++ b/arch/x86/lib/Makefile
748 @@ -26,7 +26,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
749 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
750 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
751 lib-$(CONFIG_RETPOLINE) += retpoline.o
752 -OBJECT_FILES_NON_STANDARD_retpoline.o :=y
753
754 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
755
756 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
757 index 480edc3a5e03..c909961e678a 100644
758 --- a/arch/x86/lib/retpoline.S
759 +++ b/arch/x86/lib/retpoline.S
760 @@ -7,7 +7,6 @@
761 #include <asm/alternative-asm.h>
762 #include <asm/export.h>
763 #include <asm/nospec-branch.h>
764 -#include <asm/bitsperlong.h>
765
766 .macro THUNK reg
767 .section .text.__x86.indirect_thunk
768 @@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
769 GENERATE_THUNK(r14)
770 GENERATE_THUNK(r15)
771 #endif
772 -
773 -/*
774 - * Fill the CPU return stack buffer.
775 - *
776 - * Each entry in the RSB, if used for a speculative 'ret', contains an
777 - * infinite 'pause; lfence; jmp' loop to capture speculative execution.
778 - *
779 - * This is required in various cases for retpoline and IBRS-based
780 - * mitigations for the Spectre variant 2 vulnerability. Sometimes to
781 - * eliminate potentially bogus entries from the RSB, and sometimes
782 - * purely to ensure that it doesn't get empty, which on some CPUs would
783 - * allow predictions from other (unwanted!) sources to be used.
784 - *
785 - * Google experimented with loop-unrolling and this turned out to be
786 - * the optimal version - two calls, each with their own speculation
787 - * trap should their return address end up getting used, in a loop.
788 - */
789 -.macro STUFF_RSB nr:req sp:req
790 - mov $(\nr / 2), %_ASM_BX
791 - .align 16
792 -771:
793 - call 772f
794 -773: /* speculation trap */
795 - pause
796 - lfence
797 - jmp 773b
798 - .align 16
799 -772:
800 - call 774f
801 -775: /* speculation trap */
802 - pause
803 - lfence
804 - jmp 775b
805 - .align 16
806 -774:
807 - dec %_ASM_BX
808 - jnz 771b
809 - add $((BITS_PER_LONG/8) * \nr), \sp
810 -.endm
811 -
812 -#define RSB_FILL_LOOPS 16 /* To avoid underflow */
813 -
814 -ENTRY(__fill_rsb)
815 - STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
816 - ret
817 -END(__fill_rsb)
818 -EXPORT_SYMBOL_GPL(__fill_rsb)
819 -
820 -#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
821 -
822 -ENTRY(__clear_rsb)
823 - STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
824 - ret
825 -END(__clear_rsb)
826 -EXPORT_SYMBOL_GPL(__clear_rsb)
827 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
828 index 73eb7fd4aec4..5b6c8486a0be 100644
829 --- a/arch/x86/tools/relocs.c
830 +++ b/arch/x86/tools/relocs.c
831 @@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
832 break;
833
834 case R_X86_64_PC32:
835 + case R_X86_64_PLT32:
836 /*
837 * PC relative relocations don't need to be adjusted unless
838 * referencing a percpu symbol.
839 + *
840 + * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
841 */
842 if (is_percpu_sym(sym, symname))
843 add_reloc(&relocs32neg, offset);
844 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
845 index 402254d26247..68bfcef24701 100644
846 --- a/drivers/block/loop.c
847 +++ b/drivers/block/loop.c
848 @@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
849 struct iov_iter i;
850 ssize_t bw;
851
852 - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
853 + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
854
855 file_start_write(file);
856 bw = vfs_iter_write(file, &i, ppos);
857 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
858 index 5796539a0bcb..648ecf69bad5 100644
859 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
860 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
861 @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
862 size_t size;
863 u32 retry = 3;
864
865 + if (amdgpu_acpi_pcie_notify_device_ready(adev))
866 + return -EINVAL;
867 +
868 /* Get the device handle */
869 handle = ACPI_HANDLE(&adev->pdev->dev);
870 if (!handle)
871 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
872 index 086aa5c9c634..c82b04b24bf9 100644
873 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
874 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
875 @@ -739,9 +739,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
876 enum drm_connector_status ret = connector_status_disconnected;
877 int r;
878
879 - r = pm_runtime_get_sync(connector->dev->dev);
880 - if (r < 0)
881 - return connector_status_disconnected;
882 + if (!drm_kms_helper_is_poll_worker()) {
883 + r = pm_runtime_get_sync(connector->dev->dev);
884 + if (r < 0)
885 + return connector_status_disconnected;
886 + }
887
888 if (encoder) {
889 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
890 @@ -760,8 +762,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
891 /* check acpi lid status ??? */
892
893 amdgpu_connector_update_scratch_regs(connector, ret);
894 - pm_runtime_mark_last_busy(connector->dev->dev);
895 - pm_runtime_put_autosuspend(connector->dev->dev);
896 +
897 + if (!drm_kms_helper_is_poll_worker()) {
898 + pm_runtime_mark_last_busy(connector->dev->dev);
899 + pm_runtime_put_autosuspend(connector->dev->dev);
900 + }
901 +
902 return ret;
903 }
904
905 @@ -871,9 +877,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
906 enum drm_connector_status ret = connector_status_disconnected;
907 int r;
908
909 - r = pm_runtime_get_sync(connector->dev->dev);
910 - if (r < 0)
911 - return connector_status_disconnected;
912 + if (!drm_kms_helper_is_poll_worker()) {
913 + r = pm_runtime_get_sync(connector->dev->dev);
914 + if (r < 0)
915 + return connector_status_disconnected;
916 + }
917
918 encoder = amdgpu_connector_best_single_encoder(connector);
919 if (!encoder)
920 @@ -927,8 +935,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
921 amdgpu_connector_update_scratch_regs(connector, ret);
922
923 out:
924 - pm_runtime_mark_last_busy(connector->dev->dev);
925 - pm_runtime_put_autosuspend(connector->dev->dev);
926 + if (!drm_kms_helper_is_poll_worker()) {
927 + pm_runtime_mark_last_busy(connector->dev->dev);
928 + pm_runtime_put_autosuspend(connector->dev->dev);
929 + }
930
931 return ret;
932 }
933 @@ -991,9 +1001,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
934 enum drm_connector_status ret = connector_status_disconnected;
935 bool dret = false, broken_edid = false;
936
937 - r = pm_runtime_get_sync(connector->dev->dev);
938 - if (r < 0)
939 - return connector_status_disconnected;
940 + if (!drm_kms_helper_is_poll_worker()) {
941 + r = pm_runtime_get_sync(connector->dev->dev);
942 + if (r < 0)
943 + return connector_status_disconnected;
944 + }
945
946 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
947 ret = connector->status;
948 @@ -1118,8 +1130,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
949 amdgpu_connector_update_scratch_regs(connector, ret);
950
951 exit:
952 - pm_runtime_mark_last_busy(connector->dev->dev);
953 - pm_runtime_put_autosuspend(connector->dev->dev);
954 + if (!drm_kms_helper_is_poll_worker()) {
955 + pm_runtime_mark_last_busy(connector->dev->dev);
956 + pm_runtime_put_autosuspend(connector->dev->dev);
957 + }
958
959 return ret;
960 }
961 @@ -1362,9 +1376,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
962 struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
963 int r;
964
965 - r = pm_runtime_get_sync(connector->dev->dev);
966 - if (r < 0)
967 - return connector_status_disconnected;
968 + if (!drm_kms_helper_is_poll_worker()) {
969 + r = pm_runtime_get_sync(connector->dev->dev);
970 + if (r < 0)
971 + return connector_status_disconnected;
972 + }
973
974 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
975 ret = connector->status;
976 @@ -1432,8 +1448,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
977
978 amdgpu_connector_update_scratch_regs(connector, ret);
979 out:
980 - pm_runtime_mark_last_busy(connector->dev->dev);
981 - pm_runtime_put_autosuspend(connector->dev->dev);
982 + if (!drm_kms_helper_is_poll_worker()) {
983 + pm_runtime_mark_last_busy(connector->dev->dev);
984 + pm_runtime_put_autosuspend(connector->dev->dev);
985 + }
986
987 return ret;
988 }
989 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
990 index e3281cacc586..5caf517eec9f 100644
991 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
992 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
993 @@ -273,12 +273,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
994 if (adev->uvd.vcpu_bo == NULL)
995 return 0;
996
997 - for (i = 0; i < adev->uvd.max_handles; ++i)
998 - if (atomic_read(&adev->uvd.handles[i]))
999 - break;
1000 + /* only valid for physical mode */
1001 + if (adev->asic_type < CHIP_POLARIS10) {
1002 + for (i = 0; i < adev->uvd.max_handles; ++i)
1003 + if (atomic_read(&adev->uvd.handles[i]))
1004 + break;
1005
1006 - if (i == AMDGPU_MAX_UVD_HANDLES)
1007 - return 0;
1008 + if (i == adev->uvd.max_handles)
1009 + return 0;
1010 + }
1011
1012 cancel_delayed_work_sync(&adev->uvd.idle_work);
1013
1014 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1015 index 71116da9e782..e040a896179c 100644
1016 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1017 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1018 @@ -4475,34 +4475,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
1019 case CHIP_KAVERI:
1020 adev->gfx.config.max_shader_engines = 1;
1021 adev->gfx.config.max_tile_pipes = 4;
1022 - if ((adev->pdev->device == 0x1304) ||
1023 - (adev->pdev->device == 0x1305) ||
1024 - (adev->pdev->device == 0x130C) ||
1025 - (adev->pdev->device == 0x130F) ||
1026 - (adev->pdev->device == 0x1310) ||
1027 - (adev->pdev->device == 0x1311) ||
1028 - (adev->pdev->device == 0x131C)) {
1029 - adev->gfx.config.max_cu_per_sh = 8;
1030 - adev->gfx.config.max_backends_per_se = 2;
1031 - } else if ((adev->pdev->device == 0x1309) ||
1032 - (adev->pdev->device == 0x130A) ||
1033 - (adev->pdev->device == 0x130D) ||
1034 - (adev->pdev->device == 0x1313) ||
1035 - (adev->pdev->device == 0x131D)) {
1036 - adev->gfx.config.max_cu_per_sh = 6;
1037 - adev->gfx.config.max_backends_per_se = 2;
1038 - } else if ((adev->pdev->device == 0x1306) ||
1039 - (adev->pdev->device == 0x1307) ||
1040 - (adev->pdev->device == 0x130B) ||
1041 - (adev->pdev->device == 0x130E) ||
1042 - (adev->pdev->device == 0x1315) ||
1043 - (adev->pdev->device == 0x131B)) {
1044 - adev->gfx.config.max_cu_per_sh = 4;
1045 - adev->gfx.config.max_backends_per_se = 1;
1046 - } else {
1047 - adev->gfx.config.max_cu_per_sh = 3;
1048 - adev->gfx.config.max_backends_per_se = 1;
1049 - }
1050 + adev->gfx.config.max_cu_per_sh = 8;
1051 + adev->gfx.config.max_backends_per_se = 2;
1052 adev->gfx.config.max_sh_per_se = 1;
1053 adev->gfx.config.max_texture_channel_caches = 4;
1054 adev->gfx.config.max_gprs = 256;
1055 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
1056 index 276474d13763..d7822bef1986 100644
1057 --- a/drivers/gpu/drm/drm_probe_helper.c
1058 +++ b/drivers/gpu/drm/drm_probe_helper.c
1059 @@ -460,6 +460,26 @@ static void output_poll_execute(struct work_struct *work)
1060 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
1061 }
1062
1063 +/**
1064 + * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
1065 + *
1066 + * Determine if %current task is an output poll worker. This can be used
1067 + * to select distinct code paths for output polling versus other contexts.
1068 + *
1069 + * One use case is to avoid a deadlock between the output poll worker and
1070 + * the autosuspend worker wherein the latter waits for polling to finish
1071 + * upon calling drm_kms_helper_poll_disable(), while the former waits for
1072 + * runtime suspend to finish upon calling pm_runtime_get_sync() in a
1073 + * connector ->detect hook.
1074 + */
1075 +bool drm_kms_helper_is_poll_worker(void)
1076 +{
1077 + struct work_struct *work = current_work();
1078 +
1079 + return work && work->func == output_poll_execute;
1080 +}
1081 +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
1082 +
1083 /**
1084 * drm_kms_helper_poll_disable - disable output polling
1085 * @dev: drm_device
1086 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1087 index 7513e7678263..bae62cf934cf 100644
1088 --- a/drivers/gpu/drm/i915/i915_drv.c
1089 +++ b/drivers/gpu/drm/i915/i915_drv.c
1090 @@ -1703,6 +1703,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
1091 if (IS_BROXTON(dev_priv) ||
1092 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1093 intel_power_domains_init_hw(dev_priv, true);
1094 + else
1095 + intel_display_set_init_power(dev_priv, true);
1096
1097 enable_rpm_wakeref_asserts(dev_priv);
1098
1099 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1100 index 13c306173f27..0c935dede9f4 100644
1101 --- a/drivers/gpu/drm/i915/intel_hdmi.c
1102 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1103 @@ -1452,12 +1452,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
1104 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1105 struct edid *edid;
1106 bool connected = false;
1107 + struct i2c_adapter *i2c;
1108
1109 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1110
1111 - edid = drm_get_edid(connector,
1112 - intel_gmbus_get_adapter(dev_priv,
1113 - intel_hdmi->ddc_bus));
1114 + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
1115 +
1116 + edid = drm_get_edid(connector, i2c);
1117 +
1118 + if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
1119 + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
1120 + intel_gmbus_force_bit(i2c, true);
1121 + edid = drm_get_edid(connector, i2c);
1122 + intel_gmbus_force_bit(i2c, false);
1123 + }
1124
1125 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
1126
1127 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
1128 index c1084088f9e4..56c288f78d8a 100644
1129 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
1130 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
1131 @@ -271,9 +271,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
1132 nv_connector->edid = NULL;
1133 }
1134
1135 - ret = pm_runtime_get_sync(connector->dev->dev);
1136 - if (ret < 0 && ret != -EACCES)
1137 - return conn_status;
1138 + /* Outputs are only polled while runtime active, so acquiring a
1139 + * runtime PM ref here is unnecessary (and would deadlock upon
1140 + * runtime suspend because it waits for polling to finish).
1141 + */
1142 + if (!drm_kms_helper_is_poll_worker()) {
1143 + ret = pm_runtime_get_sync(connector->dev->dev);
1144 + if (ret < 0 && ret != -EACCES)
1145 + return conn_status;
1146 + }
1147
1148 nv_encoder = nouveau_connector_ddc_detect(connector);
1149 if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
1150 @@ -348,8 +354,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
1151
1152 out:
1153
1154 - pm_runtime_mark_last_busy(connector->dev->dev);
1155 - pm_runtime_put_autosuspend(connector->dev->dev);
1156 + if (!drm_kms_helper_is_poll_worker()) {
1157 + pm_runtime_mark_last_busy(connector->dev->dev);
1158 + pm_runtime_put_autosuspend(connector->dev->dev);
1159 + }
1160
1161 return conn_status;
1162 }
1163 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1164 index edee6a5f4da9..b99f3e59011c 100644
1165 --- a/drivers/gpu/drm/radeon/cik.c
1166 +++ b/drivers/gpu/drm/radeon/cik.c
1167 @@ -3244,35 +3244,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
1168 case CHIP_KAVERI:
1169 rdev->config.cik.max_shader_engines = 1;
1170 rdev->config.cik.max_tile_pipes = 4;
1171 - if ((rdev->pdev->device == 0x1304) ||
1172 - (rdev->pdev->device == 0x1305) ||
1173 - (rdev->pdev->device == 0x130C) ||
1174 - (rdev->pdev->device == 0x130F) ||
1175 - (rdev->pdev->device == 0x1310) ||
1176 - (rdev->pdev->device == 0x1311) ||
1177 - (rdev->pdev->device == 0x131C)) {
1178 - rdev->config.cik.max_cu_per_sh = 8;
1179 - rdev->config.cik.max_backends_per_se = 2;
1180 - } else if ((rdev->pdev->device == 0x1309) ||
1181 - (rdev->pdev->device == 0x130A) ||
1182 - (rdev->pdev->device == 0x130D) ||
1183 - (rdev->pdev->device == 0x1313) ||
1184 - (rdev->pdev->device == 0x131D)) {
1185 - rdev->config.cik.max_cu_per_sh = 6;
1186 - rdev->config.cik.max_backends_per_se = 2;
1187 - } else if ((rdev->pdev->device == 0x1306) ||
1188 - (rdev->pdev->device == 0x1307) ||
1189 - (rdev->pdev->device == 0x130B) ||
1190 - (rdev->pdev->device == 0x130E) ||
1191 - (rdev->pdev->device == 0x1315) ||
1192 - (rdev->pdev->device == 0x1318) ||
1193 - (rdev->pdev->device == 0x131B)) {
1194 - rdev->config.cik.max_cu_per_sh = 4;
1195 - rdev->config.cik.max_backends_per_se = 1;
1196 - } else {
1197 - rdev->config.cik.max_cu_per_sh = 3;
1198 - rdev->config.cik.max_backends_per_se = 1;
1199 - }
1200 + rdev->config.cik.max_cu_per_sh = 8;
1201 + rdev->config.cik.max_backends_per_se = 2;
1202 rdev->config.cik.max_sh_per_se = 1;
1203 rdev->config.cik.max_texture_channel_caches = 4;
1204 rdev->config.cik.max_gprs = 256;
1205 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1206 index 27affbde058c..af0d7fd5706b 100644
1207 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
1208 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1209 @@ -897,9 +897,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
1210 enum drm_connector_status ret = connector_status_disconnected;
1211 int r;
1212
1213 - r = pm_runtime_get_sync(connector->dev->dev);
1214 - if (r < 0)
1215 - return connector_status_disconnected;
1216 + if (!drm_kms_helper_is_poll_worker()) {
1217 + r = pm_runtime_get_sync(connector->dev->dev);
1218 + if (r < 0)
1219 + return connector_status_disconnected;
1220 + }
1221
1222 if (encoder) {
1223 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1224 @@ -922,8 +924,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
1225 /* check acpi lid status ??? */
1226
1227 radeon_connector_update_scratch_regs(connector, ret);
1228 - pm_runtime_mark_last_busy(connector->dev->dev);
1229 - pm_runtime_put_autosuspend(connector->dev->dev);
1230 +
1231 + if (!drm_kms_helper_is_poll_worker()) {
1232 + pm_runtime_mark_last_busy(connector->dev->dev);
1233 + pm_runtime_put_autosuspend(connector->dev->dev);
1234 + }
1235 +
1236 return ret;
1237 }
1238
1239 @@ -1037,9 +1043,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1240 enum drm_connector_status ret = connector_status_disconnected;
1241 int r;
1242
1243 - r = pm_runtime_get_sync(connector->dev->dev);
1244 - if (r < 0)
1245 - return connector_status_disconnected;
1246 + if (!drm_kms_helper_is_poll_worker()) {
1247 + r = pm_runtime_get_sync(connector->dev->dev);
1248 + if (r < 0)
1249 + return connector_status_disconnected;
1250 + }
1251
1252 encoder = radeon_best_single_encoder(connector);
1253 if (!encoder)
1254 @@ -1106,8 +1114,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1255 radeon_connector_update_scratch_regs(connector, ret);
1256
1257 out:
1258 - pm_runtime_mark_last_busy(connector->dev->dev);
1259 - pm_runtime_put_autosuspend(connector->dev->dev);
1260 + if (!drm_kms_helper_is_poll_worker()) {
1261 + pm_runtime_mark_last_busy(connector->dev->dev);
1262 + pm_runtime_put_autosuspend(connector->dev->dev);
1263 + }
1264
1265 return ret;
1266 }
1267 @@ -1171,9 +1181,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1268 if (!radeon_connector->dac_load_detect)
1269 return ret;
1270
1271 - r = pm_runtime_get_sync(connector->dev->dev);
1272 - if (r < 0)
1273 - return connector_status_disconnected;
1274 + if (!drm_kms_helper_is_poll_worker()) {
1275 + r = pm_runtime_get_sync(connector->dev->dev);
1276 + if (r < 0)
1277 + return connector_status_disconnected;
1278 + }
1279
1280 encoder = radeon_best_single_encoder(connector);
1281 if (!encoder)
1282 @@ -1185,8 +1197,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
1283 if (ret == connector_status_connected)
1284 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
1285 radeon_connector_update_scratch_regs(connector, ret);
1286 - pm_runtime_mark_last_busy(connector->dev->dev);
1287 - pm_runtime_put_autosuspend(connector->dev->dev);
1288 +
1289 + if (!drm_kms_helper_is_poll_worker()) {
1290 + pm_runtime_mark_last_busy(connector->dev->dev);
1291 + pm_runtime_put_autosuspend(connector->dev->dev);
1292 + }
1293 +
1294 return ret;
1295 }
1296
1297 @@ -1249,9 +1265,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1298 enum drm_connector_status ret = connector_status_disconnected;
1299 bool dret = false, broken_edid = false;
1300
1301 - r = pm_runtime_get_sync(connector->dev->dev);
1302 - if (r < 0)
1303 - return connector_status_disconnected;
1304 + if (!drm_kms_helper_is_poll_worker()) {
1305 + r = pm_runtime_get_sync(connector->dev->dev);
1306 + if (r < 0)
1307 + return connector_status_disconnected;
1308 + }
1309
1310 if (radeon_connector->detected_hpd_without_ddc) {
1311 force = true;
1312 @@ -1434,8 +1452,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1313 }
1314
1315 exit:
1316 - pm_runtime_mark_last_busy(connector->dev->dev);
1317 - pm_runtime_put_autosuspend(connector->dev->dev);
1318 + if (!drm_kms_helper_is_poll_worker()) {
1319 + pm_runtime_mark_last_busy(connector->dev->dev);
1320 + pm_runtime_put_autosuspend(connector->dev->dev);
1321 + }
1322
1323 return ret;
1324 }
1325 @@ -1686,9 +1706,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1326 if (radeon_dig_connector->is_mst)
1327 return connector_status_disconnected;
1328
1329 - r = pm_runtime_get_sync(connector->dev->dev);
1330 - if (r < 0)
1331 - return connector_status_disconnected;
1332 + if (!drm_kms_helper_is_poll_worker()) {
1333 + r = pm_runtime_get_sync(connector->dev->dev);
1334 + if (r < 0)
1335 + return connector_status_disconnected;
1336 + }
1337
1338 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1339 ret = connector->status;
1340 @@ -1775,8 +1797,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1341 }
1342
1343 out:
1344 - pm_runtime_mark_last_busy(connector->dev->dev);
1345 - pm_runtime_put_autosuspend(connector->dev->dev);
1346 + if (!drm_kms_helper_is_poll_worker()) {
1347 + pm_runtime_mark_last_busy(connector->dev->dev);
1348 + pm_runtime_put_autosuspend(connector->dev->dev);
1349 + }
1350
1351 return ret;
1352 }
1353 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1354 index 9520154f1d7c..6840d3c5cd64 100644
1355 --- a/drivers/infiniband/core/ucma.c
1356 +++ b/drivers/infiniband/core/ucma.c
1357 @@ -1139,6 +1139,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1358 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1359 return -EFAULT;
1360
1361 + if (cmd.qp_state > IB_QPS_ERR)
1362 + return -EINVAL;
1363 +
1364 ctx = ucma_get_ctx(file, cmd.id);
1365 if (IS_ERR(ctx))
1366 return PTR_ERR(ctx);
1367 @@ -1275,6 +1278,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1368 if (IS_ERR(ctx))
1369 return PTR_ERR(ctx);
1370
1371 + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1372 + return -EINVAL;
1373 +
1374 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1375 cmd.optlen);
1376 if (IS_ERR(optval)) {
1377 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1378 index fcd04b881ec1..9cdcff77b9a8 100644
1379 --- a/drivers/infiniband/hw/mlx5/cq.c
1380 +++ b/drivers/infiniband/hw/mlx5/cq.c
1381 @@ -1117,7 +1117,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1382 if (ucmd.reserved0 || ucmd.reserved1)
1383 return -EINVAL;
1384
1385 - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
1386 + /* check multiplication overflow */
1387 + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1388 + return -EINVAL;
1389 +
1390 + umem = ib_umem_get(context, ucmd.buf_addr,
1391 + (size_t)ucmd.cqe_size * entries,
1392 IB_ACCESS_LOCAL_WRITE, 1);
1393 if (IS_ERR(umem)) {
1394 err = PTR_ERR(umem);
1395 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1396 index c7d5786d366b..7e466f031e30 100644
1397 --- a/drivers/infiniband/hw/mlx5/mr.c
1398 +++ b/drivers/infiniband/hw/mlx5/mr.c
1399 @@ -1821,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1400
1401 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1402 mr->ibmr.length = 0;
1403 - mr->ndescs = sg_nents;
1404
1405 for_each_sg(sgl, sg, sg_nents, i) {
1406 if (unlikely(i >= mr->max_descs))
1407 @@ -1833,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1408
1409 sg_offset = 0;
1410 }
1411 + mr->ndescs = i;
1412
1413 if (sg_offset_p)
1414 *sg_offset_p = sg_offset;
1415 diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
1416 index 7f12b6579f82..795fa353de7c 100644
1417 --- a/drivers/input/keyboard/matrix_keypad.c
1418 +++ b/drivers/input/keyboard/matrix_keypad.c
1419 @@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
1420 {
1421 struct matrix_keypad *keypad = input_get_drvdata(dev);
1422
1423 + spin_lock_irq(&keypad->lock);
1424 keypad->stopped = true;
1425 - mb();
1426 + spin_unlock_irq(&keypad->lock);
1427 +
1428 flush_work(&keypad->work.work);
1429 /*
1430 * matrix_keypad_scan() will leave IRQs enabled;
1431 diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
1432 index 3048ef3e3e16..a5e8998047fe 100644
1433 --- a/drivers/input/keyboard/tca8418_keypad.c
1434 +++ b/drivers/input/keyboard/tca8418_keypad.c
1435 @@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
1436 input_event(input, EV_MSC, MSC_SCAN, code);
1437 input_report_key(input, keymap[code], state);
1438
1439 - /* Read for next loop */
1440 - error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
1441 } while (1);
1442
1443 input_sync(input);
1444 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1445 index 28ce342348a9..c61341c84d2d 100644
1446 --- a/drivers/md/bcache/super.c
1447 +++ b/drivers/md/bcache/super.c
1448 @@ -937,6 +937,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
1449 uint32_t rtime = cpu_to_le32(get_seconds());
1450 struct uuid_entry *u;
1451 char buf[BDEVNAME_SIZE];
1452 + struct cached_dev *exist_dc, *t;
1453
1454 bdevname(dc->bdev, buf);
1455
1456 @@ -960,6 +961,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
1457 return -EINVAL;
1458 }
1459
1460 + /* Check whether already attached */
1461 + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1462 + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1463 + pr_err("Tried to attach %s but duplicate UUID already attached",
1464 + buf);
1465 +
1466 + return -EINVAL;
1467 + }
1468 + }
1469 +
1470 u = uuid_find(c, dc->sb.uuid);
1471
1472 if (u &&
1473 @@ -1182,7 +1193,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1474
1475 return;
1476 err:
1477 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1478 + pr_notice("error %s: %s", bdevname(bdev, name), err);
1479 bcache_device_stop(&dc->disk);
1480 }
1481
1482 @@ -1853,6 +1864,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1483 const char *err = NULL; /* must be set for any error case */
1484 int ret = 0;
1485
1486 + bdevname(bdev, name);
1487 +
1488 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1489 ca->bdev = bdev;
1490 ca->bdev->bd_holder = ca;
1491 @@ -1863,11 +1876,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1492 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1493 get_page(sb_page);
1494
1495 - if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1496 + if (blk_queue_discard(bdev_get_queue(bdev)))
1497 ca->discard = CACHE_DISCARD(&ca->sb);
1498
1499 ret = cache_alloc(ca);
1500 if (ret != 0) {
1501 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1502 if (ret == -ENOMEM)
1503 err = "cache_alloc(): -ENOMEM";
1504 else
1505 @@ -1890,14 +1904,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1506 goto out;
1507 }
1508
1509 - pr_info("registered cache device %s", bdevname(bdev, name));
1510 + pr_info("registered cache device %s", name);
1511
1512 out:
1513 kobject_put(&ca->kobj);
1514
1515 err:
1516 if (err)
1517 - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1518 + pr_notice("error %s: %s", name, err);
1519
1520 return ret;
1521 }
1522 @@ -1986,6 +2000,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1523 if (err)
1524 goto err_close;
1525
1526 + err = "failed to register device";
1527 if (SB_IS_BDEV(sb)) {
1528 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1529 if (!dc)
1530 @@ -2000,7 +2015,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1531 goto err_close;
1532
1533 if (register_cache(sb, sb_page, bdev, ca) != 0)
1534 - goto err_close;
1535 + goto err;
1536 }
1537 out:
1538 if (sb_page)
1539 @@ -2013,7 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1540 err_close:
1541 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1542 err:
1543 - pr_info("error opening %s: %s", path, err);
1544 + pr_info("error %s: %s", path, err);
1545 ret = -EINVAL;
1546 goto out;
1547 }
1548 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
1549 index 26d999c812c9..0f572bff64f5 100644
1550 --- a/drivers/media/i2c/tc358743.c
1551 +++ b/drivers/media/i2c/tc358743.c
1552 @@ -222,7 +222,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
1553 static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
1554 u8 mask, u8 val)
1555 {
1556 - i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
1557 + i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
1558 }
1559
1560 static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
1561 diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
1562 index 7ac78c13dd1c..1bcb25bc35ef 100644
1563 --- a/drivers/mtd/ubi/vmt.c
1564 +++ b/drivers/mtd/ubi/vmt.c
1565 @@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
1566 vol->last_eb_bytes = vol->usable_leb_size;
1567 }
1568
1569 + /* Make volume "available" before it becomes accessible via sysfs */
1570 + spin_lock(&ubi->volumes_lock);
1571 + ubi->volumes[vol_id] = vol;
1572 + ubi->vol_count += 1;
1573 + spin_unlock(&ubi->volumes_lock);
1574 +
1575 /* Register character device for the volume */
1576 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
1577 vol->cdev.owner = THIS_MODULE;
1578 @@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
1579 if (err)
1580 goto out_sysfs;
1581
1582 - spin_lock(&ubi->volumes_lock);
1583 - ubi->volumes[vol_id] = vol;
1584 - ubi->vol_count += 1;
1585 - spin_unlock(&ubi->volumes_lock);
1586 -
1587 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
1588 self_check_volumes(ubi);
1589 return err;
1590 @@ -328,6 +329,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
1591 out_cdev:
1592 cdev_del(&vol->cdev);
1593 out_mapping:
1594 + spin_lock(&ubi->volumes_lock);
1595 + ubi->volumes[vol_id] = NULL;
1596 + ubi->vol_count -= 1;
1597 + spin_unlock(&ubi->volumes_lock);
1598 if (do_free)
1599 ubi_eba_destroy_table(eba_tbl);
1600 out_acc:
1601 diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
1602 index af8f6e92e885..b3a87159e457 100644
1603 --- a/drivers/pci/host/pcie-designware.c
1604 +++ b/drivers/pci/host/pcie-designware.c
1605 @@ -861,7 +861,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
1606 /* setup bus numbers */
1607 val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
1608 val &= 0xff000000;
1609 - val |= 0x00010100;
1610 + val |= 0x00ff0100;
1611 dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
1612
1613 /* setup command register */
1614 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1615 index 8f12f6baa6b8..4441a559f139 100644
1616 --- a/drivers/scsi/qla2xxx/qla_init.c
1617 +++ b/drivers/scsi/qla2xxx/qla_init.c
1618 @@ -369,6 +369,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
1619 srb_t *sp = (srb_t *)ptr;
1620 struct srb_iocb *abt = &sp->u.iocb_cmd;
1621
1622 + del_timer(&sp->u.iocb_cmd.timer);
1623 complete(&abt->u.abt.comp);
1624 }
1625
1626 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
1627 index 59059ffbb98c..11f45cb99892 100644
1628 --- a/drivers/scsi/qla2xxx/qla_target.c
1629 +++ b/drivers/scsi/qla2xxx/qla_target.c
1630 @@ -5789,7 +5789,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
1631 fc_port_t *fcport;
1632 int rc;
1633
1634 - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
1635 + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1636 if (!fcport) {
1637 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
1638 "qla_target(%d): Allocation of tmp FC port failed",
1639 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
1640 index d08324998933..db100154f85c 100644
1641 --- a/drivers/staging/android/ashmem.c
1642 +++ b/drivers/staging/android/ashmem.c
1643 @@ -343,24 +343,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
1644 mutex_lock(&ashmem_mutex);
1645
1646 if (asma->size == 0) {
1647 - ret = -EINVAL;
1648 - goto out;
1649 + mutex_unlock(&ashmem_mutex);
1650 + return -EINVAL;
1651 }
1652
1653 if (!asma->file) {
1654 - ret = -EBADF;
1655 - goto out;
1656 + mutex_unlock(&ashmem_mutex);
1657 + return -EBADF;
1658 }
1659
1660 + mutex_unlock(&ashmem_mutex);
1661 +
1662 ret = vfs_llseek(asma->file, offset, origin);
1663 if (ret < 0)
1664 - goto out;
1665 + return ret;
1666
1667 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
1668 file->f_pos = asma->file->f_pos;
1669 -
1670 -out:
1671 - mutex_unlock(&ashmem_mutex);
1672 return ret;
1673 }
1674
1675 diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
1676 index a5bf2cc165c0..1736248bc5b8 100644
1677 --- a/drivers/staging/comedi/drivers.c
1678 +++ b/drivers/staging/comedi/drivers.c
1679 @@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
1680 struct comedi_cmd *cmd = &async->cmd;
1681
1682 if (cmd->stop_src == TRIG_COUNT) {
1683 - unsigned int nscans = nsamples / cmd->scan_end_arg;
1684 - unsigned int scans_left = __comedi_nscans_left(s, nscans);
1685 + unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
1686 unsigned int scan_pos =
1687 comedi_bytes_to_samples(s, async->scan_progress);
1688 unsigned long long samples_left = 0;
1689 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1690 index b80ea872b039..e82b3473b6b8 100644
1691 --- a/drivers/tty/serial/8250/8250_pci.c
1692 +++ b/drivers/tty/serial/8250/8250_pci.c
1693 @@ -5099,6 +5099,17 @@ static struct pci_device_id serial_pci_tbl[] = {
1694 { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
1695 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
1696 pbn_b2_4_115200 },
1697 + /*
1698 + * BrainBoxes UC-260
1699 + */
1700 + { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
1701 + PCI_ANY_ID, PCI_ANY_ID,
1702 + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1703 + pbn_b2_4_115200 },
1704 + { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
1705 + PCI_ANY_ID, PCI_ANY_ID,
1706 + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1707 + pbn_b2_4_115200 },
1708 /*
1709 * Perle PCI-RAS cards
1710 */
1711 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1712 index 4d079cdaa7a3..addb287cacea 100644
1713 --- a/drivers/tty/serial/atmel_serial.c
1714 +++ b/drivers/tty/serial/atmel_serial.c
1715 @@ -1780,6 +1780,7 @@ static void atmel_get_ip_name(struct uart_port *port)
1716 switch (version) {
1717 case 0x302:
1718 case 0x10213:
1719 + case 0x10302:
1720 dev_dbg(port->dev, "This version is usart\n");
1721 atmel_port->has_frac_baudrate = true;
1722 atmel_port->has_hw_timer = true;
1723 diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
1724 index c3651540e1ba..3b31fd8863eb 100644
1725 --- a/drivers/tty/serial/earlycon.c
1726 +++ b/drivers/tty/serial/earlycon.c
1727 @@ -253,11 +253,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
1728 }
1729 port->mapbase = addr;
1730 port->uartclk = BASE_BAUD * 16;
1731 - port->membase = earlycon_map(port->mapbase, SZ_4K);
1732
1733 val = of_get_flat_dt_prop(node, "reg-offset", NULL);
1734 if (val)
1735 port->mapbase += be32_to_cpu(*val);
1736 + port->membase = earlycon_map(port->mapbase, SZ_4K);
1737 +
1738 val = of_get_flat_dt_prop(node, "reg-shift", NULL);
1739 if (val)
1740 port->regshift = be32_to_cpu(*val);
1741 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1742 index 23973a8124fc..6cbd46c565f8 100644
1743 --- a/drivers/tty/serial/serial_core.c
1744 +++ b/drivers/tty/serial/serial_core.c
1745 @@ -1135,6 +1135,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
1746 uport->ops->config_port(uport, flags);
1747
1748 ret = uart_startup(tty, state, 1);
1749 + if (ret == 0)
1750 + tty_port_set_initialized(port, true);
1751 if (ret > 0)
1752 ret = 0;
1753 }
1754 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
1755 index 15eaea53b3df..a55c94dfefd2 100644
1756 --- a/drivers/tty/serial/sh-sci.c
1757 +++ b/drivers/tty/serial/sh-sci.c
1758 @@ -935,6 +935,8 @@ static void sci_receive_chars(struct uart_port *port)
1759 /* Tell the rest of the system the news. New characters! */
1760 tty_flip_buffer_push(tport);
1761 } else {
1762 + /* TTY buffers full; read from RX reg to prevent lockup */
1763 + serial_port_in(port, SCxRDR);
1764 serial_port_in(port, SCxSR); /* dummy read */
1765 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
1766 }
1767 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1768 index 4c388451f31f..9cb66475c211 100644
1769 --- a/drivers/usb/core/message.c
1770 +++ b/drivers/usb/core/message.c
1771 @@ -148,6 +148,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
1772
1773 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
1774
1775 + /* Linger a bit, prior to the next control message. */
1776 + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
1777 + msleep(200);
1778 +
1779 kfree(dr);
1780
1781 return ret;
1782 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1783 index 774c97bb1c08..4f1c6f8d4352 100644
1784 --- a/drivers/usb/core/quirks.c
1785 +++ b/drivers/usb/core/quirks.c
1786 @@ -229,7 +229,8 @@ static const struct usb_device_id usb_quirk_list[] = {
1787 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
1788
1789 /* Corsair Strafe RGB */
1790 - { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
1791 + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
1792 + USB_QUIRK_DELAY_CTRL_MSG },
1793
1794 /* Corsair K70 LUX */
1795 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
1796 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1797 index 48f52138bb1a..071346973dd6 100644
1798 --- a/drivers/usb/gadget/function/f_fs.c
1799 +++ b/drivers/usb/gadget/function/f_fs.c
1800 @@ -1522,7 +1522,6 @@ ffs_fs_kill_sb(struct super_block *sb)
1801 if (sb->s_fs_info) {
1802 ffs_release_dev(sb->s_fs_info);
1803 ffs_data_closed(sb->s_fs_info);
1804 - ffs_data_put(sb->s_fs_info);
1805 }
1806 }
1807
1808 diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
1809 index e59334b09c41..f8ac59e0158d 100644
1810 --- a/drivers/usb/mon/mon_text.c
1811 +++ b/drivers/usb/mon/mon_text.c
1812 @@ -83,6 +83,8 @@ struct mon_reader_text {
1813
1814 wait_queue_head_t wait;
1815 int printf_size;
1816 + size_t printf_offset;
1817 + size_t printf_togo;
1818 char *printf_buf;
1819 struct mutex printf_lock;
1820
1821 @@ -374,75 +376,103 @@ static int mon_text_open(struct inode *inode, struct file *file)
1822 return rc;
1823 }
1824
1825 -/*
1826 - * For simplicity, we read one record in one system call and throw out
1827 - * what does not fit. This means that the following does not work:
1828 - * dd if=/dbg/usbmon/0t bs=10
1829 - * Also, we do not allow seeks and do not bother advancing the offset.
1830 - */
1831 +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
1832 + char __user * const buf, const size_t nbytes)
1833 +{
1834 + const size_t togo = min(nbytes, rp->printf_togo);
1835 +
1836 + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
1837 + return -EFAULT;
1838 + rp->printf_togo -= togo;
1839 + rp->printf_offset += togo;
1840 + return togo;
1841 +}
1842 +
1843 +/* ppos is not advanced since the llseek operation is not permitted. */
1844 static ssize_t mon_text_read_t(struct file *file, char __user *buf,
1845 - size_t nbytes, loff_t *ppos)
1846 + size_t nbytes, loff_t *ppos)
1847 {
1848 struct mon_reader_text *rp = file->private_data;
1849 struct mon_event_text *ep;
1850 struct mon_text_ptr ptr;
1851 + ssize_t ret;
1852
1853 - ep = mon_text_read_wait(rp, file);
1854 - if (IS_ERR(ep))
1855 - return PTR_ERR(ep);
1856 mutex_lock(&rp->printf_lock);
1857 - ptr.cnt = 0;
1858 - ptr.pbuf = rp->printf_buf;
1859 - ptr.limit = rp->printf_size;
1860 -
1861 - mon_text_read_head_t(rp, &ptr, ep);
1862 - mon_text_read_statset(rp, &ptr, ep);
1863 - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
1864 - " %d", ep->length);
1865 - mon_text_read_data(rp, &ptr, ep);
1866 -
1867 - if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
1868 - ptr.cnt = -EFAULT;
1869 +
1870 + if (rp->printf_togo == 0) {
1871 +
1872 + ep = mon_text_read_wait(rp, file);
1873 + if (IS_ERR(ep)) {
1874 + mutex_unlock(&rp->printf_lock);
1875 + return PTR_ERR(ep);
1876 + }
1877 + ptr.cnt = 0;
1878 + ptr.pbuf = rp->printf_buf;
1879 + ptr.limit = rp->printf_size;
1880 +
1881 + mon_text_read_head_t(rp, &ptr, ep);
1882 + mon_text_read_statset(rp, &ptr, ep);
1883 + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
1884 + " %d", ep->length);
1885 + mon_text_read_data(rp, &ptr, ep);
1886 +
1887 + rp->printf_togo = ptr.cnt;
1888 + rp->printf_offset = 0;
1889 +
1890 + kmem_cache_free(rp->e_slab, ep);
1891 + }
1892 +
1893 + ret = mon_text_copy_to_user(rp, buf, nbytes);
1894 mutex_unlock(&rp->printf_lock);
1895 - kmem_cache_free(rp->e_slab, ep);
1896 - return ptr.cnt;
1897 + return ret;
1898 }
1899
1900 +/* ppos is not advanced since the llseek operation is not permitted. */
1901 static ssize_t mon_text_read_u(struct file *file, char __user *buf,
1902 - size_t nbytes, loff_t *ppos)
1903 + size_t nbytes, loff_t *ppos)
1904 {
1905 struct mon_reader_text *rp = file->private_data;
1906 struct mon_event_text *ep;
1907 struct mon_text_ptr ptr;
1908 + ssize_t ret;
1909
1910 - ep = mon_text_read_wait(rp, file);
1911 - if (IS_ERR(ep))
1912 - return PTR_ERR(ep);
1913 mutex_lock(&rp->printf_lock);
1914 - ptr.cnt = 0;
1915 - ptr.pbuf = rp->printf_buf;
1916 - ptr.limit = rp->printf_size;
1917
1918 - mon_text_read_head_u(rp, &ptr, ep);
1919 - if (ep->type == 'E') {
1920 - mon_text_read_statset(rp, &ptr, ep);
1921 - } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
1922 - mon_text_read_isostat(rp, &ptr, ep);
1923 - mon_text_read_isodesc(rp, &ptr, ep);
1924 - } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
1925 - mon_text_read_intstat(rp, &ptr, ep);
1926 - } else {
1927 - mon_text_read_statset(rp, &ptr, ep);
1928 + if (rp->printf_togo == 0) {
1929 +
1930 + ep = mon_text_read_wait(rp, file);
1931 + if (IS_ERR(ep)) {
1932 + mutex_unlock(&rp->printf_lock);
1933 + return PTR_ERR(ep);
1934 + }
1935 + ptr.cnt = 0;
1936 + ptr.pbuf = rp->printf_buf;
1937 + ptr.limit = rp->printf_size;
1938 +
1939 + mon_text_read_head_u(rp, &ptr, ep);
1940 + if (ep->type == 'E') {
1941 + mon_text_read_statset(rp, &ptr, ep);
1942 + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
1943 + mon_text_read_isostat(rp, &ptr, ep);
1944 + mon_text_read_isodesc(rp, &ptr, ep);
1945 + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
1946 + mon_text_read_intstat(rp, &ptr, ep);
1947 + } else {
1948 + mon_text_read_statset(rp, &ptr, ep);
1949 + }
1950 + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
1951 + " %d", ep->length);
1952 + mon_text_read_data(rp, &ptr, ep);
1953 +
1954 + rp->printf_togo = ptr.cnt;
1955 + rp->printf_offset = 0;
1956 +
1957 + kmem_cache_free(rp->e_slab, ep);
1958 }
1959 - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
1960 - " %d", ep->length);
1961 - mon_text_read_data(rp, &ptr, ep);
1962
1963 - if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
1964 - ptr.cnt = -EFAULT;
1965 + ret = mon_text_copy_to_user(rp, buf, nbytes);
1966 mutex_unlock(&rp->printf_lock);
1967 - kmem_cache_free(rp->e_slab, ep);
1968 - return ptr.cnt;
1969 + return ret;
1970 }
1971
1972 static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
1973 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
1974 index 6891e9092775..a96dcc660d0f 100644
1975 --- a/drivers/usb/storage/uas.c
1976 +++ b/drivers/usb/storage/uas.c
1977 @@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)
1978 return 0;
1979
1980 err = uas_configure_endpoints(devinfo);
1981 - if (err && err != ENODEV)
1982 + if (err && err != -ENODEV)
1983 shost_printk(KERN_ERR, shost,
1984 "%s: alloc streams error %d after reset",
1985 __func__, err);
1986 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1987 index b605115eb47a..ca3a5d430ae1 100644
1988 --- a/drivers/usb/storage/unusual_devs.h
1989 +++ b/drivers/usb/storage/unusual_devs.h
1990 @@ -2137,6 +2137,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
1991 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1992 US_FL_BROKEN_FUA ),
1993
1994 +/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
1995 +UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
1996 + "JMicron",
1997 + "USB to ATA/ATAPI Bridge",
1998 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1999 + US_FL_BROKEN_FUA ),
2000 +
2001 /* Reported-by George Cherian <george.cherian@cavium.com> */
2002 UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2003 "JMicron",
2004 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
2005 index 0f98f2c7475f..7efa374a4970 100644
2006 --- a/drivers/usb/usbip/vudc_sysfs.c
2007 +++ b/drivers/usb/usbip/vudc_sysfs.c
2008 @@ -117,10 +117,14 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
2009 if (rv != 0)
2010 return -EINVAL;
2011
2012 + if (!udc) {
2013 + dev_err(dev, "no device");
2014 + return -ENODEV;
2015 + }
2016 spin_lock_irqsave(&udc->lock, flags);
2017 /* Don't export what we don't have */
2018 - if (!udc || !udc->driver || !udc->pullup) {
2019 - dev_err(dev, "no device or gadget not bound");
2020 + if (!udc->driver || !udc->pullup) {
2021 + dev_err(dev, "gadget not bound");
2022 ret = -ENODEV;
2023 goto unlock;
2024 }
2025 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2026 index 489bfc61cf30..8977f40ea441 100644
2027 --- a/drivers/virtio/virtio_ring.c
2028 +++ b/drivers/virtio/virtio_ring.c
2029 @@ -423,8 +423,6 @@ static inline int virtqueue_add(struct virtqueue *_vq,
2030 i = vq->vring.desc[i].next;
2031 }
2032
2033 - vq->vq.num_free += total_sg;
2034 -
2035 if (indirect)
2036 kfree(desc);
2037
2038 diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
2039 index 70c7194e2810..b0a158073abd 100644
2040 --- a/drivers/watchdog/hpwdt.c
2041 +++ b/drivers/watchdog/hpwdt.c
2042 @@ -28,16 +28,7 @@
2043 #include <linux/types.h>
2044 #include <linux/uaccess.h>
2045 #include <linux/watchdog.h>
2046 -#ifdef CONFIG_HPWDT_NMI_DECODING
2047 -#include <linux/dmi.h>
2048 -#include <linux/spinlock.h>
2049 -#include <linux/nmi.h>
2050 -#include <linux/kdebug.h>
2051 -#include <linux/notifier.h>
2052 -#include <asm/cacheflush.h>
2053 -#endif /* CONFIG_HPWDT_NMI_DECODING */
2054 #include <asm/nmi.h>
2055 -#include <asm/frame.h>
2056
2057 #define HPWDT_VERSION "1.4.0"
2058 #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
2059 @@ -48,10 +39,14 @@
2060 static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
2061 static unsigned int reload; /* the computed soft_margin */
2062 static bool nowayout = WATCHDOG_NOWAYOUT;
2063 +#ifdef CONFIG_HPWDT_NMI_DECODING
2064 +static unsigned int allow_kdump = 1;
2065 +#endif
2066 static char expect_release;
2067 static unsigned long hpwdt_is_open;
2068
2069 static void __iomem *pci_mem_addr; /* the PCI-memory address */
2070 +static unsigned long __iomem *hpwdt_nmistat;
2071 static unsigned long __iomem *hpwdt_timer_reg;
2072 static unsigned long __iomem *hpwdt_timer_con;
2073
2074 @@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
2075 };
2076 MODULE_DEVICE_TABLE(pci, hpwdt_devices);
2077
2078 -#ifdef CONFIG_HPWDT_NMI_DECODING
2079 -#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
2080 -#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
2081 -#define PCI_BIOS32_PARAGRAPH_LEN 16
2082 -#define PCI_ROM_BASE1 0x000F0000
2083 -#define ROM_SIZE 0x10000
2084 -
2085 -struct bios32_service_dir {
2086 - u32 signature;
2087 - u32 entry_point;
2088 - u8 revision;
2089 - u8 length;
2090 - u8 checksum;
2091 - u8 reserved[5];
2092 -};
2093 -
2094 -/* type 212 */
2095 -struct smbios_cru64_info {
2096 - u8 type;
2097 - u8 byte_length;
2098 - u16 handle;
2099 - u32 signature;
2100 - u64 physical_address;
2101 - u32 double_length;
2102 - u32 double_offset;
2103 -};
2104 -#define SMBIOS_CRU64_INFORMATION 212
2105 -
2106 -/* type 219 */
2107 -struct smbios_proliant_info {
2108 - u8 type;
2109 - u8 byte_length;
2110 - u16 handle;
2111 - u32 power_features;
2112 - u32 omega_features;
2113 - u32 reserved;
2114 - u32 misc_features;
2115 -};
2116 -#define SMBIOS_ICRU_INFORMATION 219
2117 -
2118 -
2119 -struct cmn_registers {
2120 - union {
2121 - struct {
2122 - u8 ral;
2123 - u8 rah;
2124 - u16 rea2;
2125 - };
2126 - u32 reax;
2127 - } u1;
2128 - union {
2129 - struct {
2130 - u8 rbl;
2131 - u8 rbh;
2132 - u8 reb2l;
2133 - u8 reb2h;
2134 - };
2135 - u32 rebx;
2136 - } u2;
2137 - union {
2138 - struct {
2139 - u8 rcl;
2140 - u8 rch;
2141 - u16 rec2;
2142 - };
2143 - u32 recx;
2144 - } u3;
2145 - union {
2146 - struct {
2147 - u8 rdl;
2148 - u8 rdh;
2149 - u16 red2;
2150 - };
2151 - u32 redx;
2152 - } u4;
2153 -
2154 - u32 resi;
2155 - u32 redi;
2156 - u16 rds;
2157 - u16 res;
2158 - u32 reflags;
2159 -} __attribute__((packed));
2160 -
2161 -static unsigned int hpwdt_nmi_decoding;
2162 -static unsigned int allow_kdump = 1;
2163 -static unsigned int is_icru;
2164 -static unsigned int is_uefi;
2165 -static DEFINE_SPINLOCK(rom_lock);
2166 -static void *cru_rom_addr;
2167 -static struct cmn_registers cmn_regs;
2168 -
2169 -extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
2170 - unsigned long *pRomEntry);
2171 -
2172 -#ifdef CONFIG_X86_32
2173 -/* --32 Bit Bios------------------------------------------------------------ */
2174 -
2175 -#define HPWDT_ARCH 32
2176 -
2177 -asm(".text \n\t"
2178 - ".align 4 \n\t"
2179 - ".globl asminline_call \n"
2180 - "asminline_call: \n\t"
2181 - "pushl %ebp \n\t"
2182 - "movl %esp, %ebp \n\t"
2183 - "pusha \n\t"
2184 - "pushf \n\t"
2185 - "push %es \n\t"
2186 - "push %ds \n\t"
2187 - "pop %es \n\t"
2188 - "movl 8(%ebp),%eax \n\t"
2189 - "movl 4(%eax),%ebx \n\t"
2190 - "movl 8(%eax),%ecx \n\t"
2191 - "movl 12(%eax),%edx \n\t"
2192 - "movl 16(%eax),%esi \n\t"
2193 - "movl 20(%eax),%edi \n\t"
2194 - "movl (%eax),%eax \n\t"
2195 - "push %cs \n\t"
2196 - "call *12(%ebp) \n\t"
2197 - "pushf \n\t"
2198 - "pushl %eax \n\t"
2199 - "movl 8(%ebp),%eax \n\t"
2200 - "movl %ebx,4(%eax) \n\t"
2201 - "movl %ecx,8(%eax) \n\t"
2202 - "movl %edx,12(%eax) \n\t"
2203 - "movl %esi,16(%eax) \n\t"
2204 - "movl %edi,20(%eax) \n\t"
2205 - "movw %ds,24(%eax) \n\t"
2206 - "movw %es,26(%eax) \n\t"
2207 - "popl %ebx \n\t"
2208 - "movl %ebx,(%eax) \n\t"
2209 - "popl %ebx \n\t"
2210 - "movl %ebx,28(%eax) \n\t"
2211 - "pop %es \n\t"
2212 - "popf \n\t"
2213 - "popa \n\t"
2214 - "leave \n\t"
2215 - "ret \n\t"
2216 - ".previous");
2217 -
2218 -
2219 -/*
2220 - * cru_detect
2221 - *
2222 - * Routine Description:
2223 - * This function uses the 32-bit BIOS Service Directory record to
2224 - * search for a $CRU record.
2225 - *
2226 - * Return Value:
2227 - * 0 : SUCCESS
2228 - * <0 : FAILURE
2229 - */
2230 -static int cru_detect(unsigned long map_entry,
2231 - unsigned long map_offset)
2232 -{
2233 - void *bios32_map;
2234 - unsigned long *bios32_entrypoint;
2235 - unsigned long cru_physical_address;
2236 - unsigned long cru_length;
2237 - unsigned long physical_bios_base = 0;
2238 - unsigned long physical_bios_offset = 0;
2239 - int retval = -ENODEV;
2240 -
2241 - bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
2242 -
2243 - if (bios32_map == NULL)
2244 - return -ENODEV;
2245 -
2246 - bios32_entrypoint = bios32_map + map_offset;
2247 -
2248 - cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
2249 -
2250 - set_memory_x((unsigned long)bios32_map, 2);
2251 - asminline_call(&cmn_regs, bios32_entrypoint);
2252 -
2253 - if (cmn_regs.u1.ral != 0) {
2254 - pr_warn("Call succeeded but with an error: 0x%x\n",
2255 - cmn_regs.u1.ral);
2256 - } else {
2257 - physical_bios_base = cmn_regs.u2.rebx;
2258 - physical_bios_offset = cmn_regs.u4.redx;
2259 - cru_length = cmn_regs.u3.recx;
2260 - cru_physical_address =
2261 - physical_bios_base + physical_bios_offset;
2262 -
2263 - /* If the values look OK, then map it in. */
2264 - if ((physical_bios_base + physical_bios_offset)) {
2265 - cru_rom_addr =
2266 - ioremap(cru_physical_address, cru_length);
2267 - if (cru_rom_addr) {
2268 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
2269 - (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
2270 - retval = 0;
2271 - }
2272 - }
2273 -
2274 - pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
2275 - pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
2276 - pr_debug("CRU Length: 0x%lx\n", cru_length);
2277 - pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
2278 - }
2279 - iounmap(bios32_map);
2280 - return retval;
2281 -}
2282 -
2283 -/*
2284 - * bios_checksum
2285 - */
2286 -static int bios_checksum(const char __iomem *ptr, int len)
2287 -{
2288 - char sum = 0;
2289 - int i;
2290 -
2291 - /*
2292 - * calculate checksum of size bytes. This should add up
2293 - * to zero if we have a valid header.
2294 - */
2295 - for (i = 0; i < len; i++)
2296 - sum += ptr[i];
2297 -
2298 - return ((sum == 0) && (len > 0));
2299 -}
2300 -
2301 -/*
2302 - * bios32_present
2303 - *
2304 - * Routine Description:
2305 - * This function finds the 32-bit BIOS Service Directory
2306 - *
2307 - * Return Value:
2308 - * 0 : SUCCESS
2309 - * <0 : FAILURE
2310 - */
2311 -static int bios32_present(const char __iomem *p)
2312 -{
2313 - struct bios32_service_dir *bios_32_ptr;
2314 - int length;
2315 - unsigned long map_entry, map_offset;
2316 -
2317 - bios_32_ptr = (struct bios32_service_dir *) p;
2318 -
2319 - /*
2320 - * Search for signature by checking equal to the swizzled value
2321 - * instead of calling another routine to perform a strcmp.
2322 - */
2323 - if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
2324 - length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
2325 - if (bios_checksum(p, length)) {
2326 - /*
2327 - * According to the spec, we're looking for the
2328 - * first 4KB-aligned address below the entrypoint
2329 - * listed in the header. The Service Directory code
2330 - * is guaranteed to occupy no more than 2 4KB pages.
2331 - */
2332 - map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
2333 - map_offset = bios_32_ptr->entry_point - map_entry;
2334 -
2335 - return cru_detect(map_entry, map_offset);
2336 - }
2337 - }
2338 - return -ENODEV;
2339 -}
2340 -
2341 -static int detect_cru_service(void)
2342 -{
2343 - char __iomem *p, *q;
2344 - int rc = -1;
2345 -
2346 - /*
2347 - * Search from 0x0f0000 through 0x0fffff, inclusive.
2348 - */
2349 - p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
2350 - if (p == NULL)
2351 - return -ENOMEM;
2352 -
2353 - for (q = p; q < p + ROM_SIZE; q += 16) {
2354 - rc = bios32_present(q);
2355 - if (!rc)
2356 - break;
2357 - }
2358 - iounmap(p);
2359 - return rc;
2360 -}
2361 -/* ------------------------------------------------------------------------- */
2362 -#endif /* CONFIG_X86_32 */
2363 -#ifdef CONFIG_X86_64
2364 -/* --64 Bit Bios------------------------------------------------------------ */
2365 -
2366 -#define HPWDT_ARCH 64
2367 -
2368 -asm(".text \n\t"
2369 - ".align 4 \n\t"
2370 - ".globl asminline_call \n\t"
2371 - ".type asminline_call, @function \n\t"
2372 - "asminline_call: \n\t"
2373 - FRAME_BEGIN
2374 - "pushq %rax \n\t"
2375 - "pushq %rbx \n\t"
2376 - "pushq %rdx \n\t"
2377 - "pushq %r12 \n\t"
2378 - "pushq %r9 \n\t"
2379 - "movq %rsi, %r12 \n\t"
2380 - "movq %rdi, %r9 \n\t"
2381 - "movl 4(%r9),%ebx \n\t"
2382 - "movl 8(%r9),%ecx \n\t"
2383 - "movl 12(%r9),%edx \n\t"
2384 - "movl 16(%r9),%esi \n\t"
2385 - "movl 20(%r9),%edi \n\t"
2386 - "movl (%r9),%eax \n\t"
2387 - "call *%r12 \n\t"
2388 - "pushfq \n\t"
2389 - "popq %r12 \n\t"
2390 - "movl %eax, (%r9) \n\t"
2391 - "movl %ebx, 4(%r9) \n\t"
2392 - "movl %ecx, 8(%r9) \n\t"
2393 - "movl %edx, 12(%r9) \n\t"
2394 - "movl %esi, 16(%r9) \n\t"
2395 - "movl %edi, 20(%r9) \n\t"
2396 - "movq %r12, %rax \n\t"
2397 - "movl %eax, 28(%r9) \n\t"
2398 - "popq %r9 \n\t"
2399 - "popq %r12 \n\t"
2400 - "popq %rdx \n\t"
2401 - "popq %rbx \n\t"
2402 - "popq %rax \n\t"
2403 - FRAME_END
2404 - "ret \n\t"
2405 - ".previous");
2406 -
2407 -/*
2408 - * dmi_find_cru
2409 - *
2410 - * Routine Description:
2411 - * This function checks whether or not a SMBIOS/DMI record is
2412 - * the 64bit CRU info or not
2413 - */
2414 -static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
2415 -{
2416 - struct smbios_cru64_info *smbios_cru64_ptr;
2417 - unsigned long cru_physical_address;
2418 -
2419 - if (dm->type == SMBIOS_CRU64_INFORMATION) {
2420 - smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
2421 - if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
2422 - cru_physical_address =
2423 - smbios_cru64_ptr->physical_address +
2424 - smbios_cru64_ptr->double_offset;
2425 - cru_rom_addr = ioremap(cru_physical_address,
2426 - smbios_cru64_ptr->double_length);
2427 - set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
2428 - smbios_cru64_ptr->double_length >> PAGE_SHIFT);
2429 - }
2430 - }
2431 -}
2432 -
2433 -static int detect_cru_service(void)
2434 -{
2435 - cru_rom_addr = NULL;
2436 -
2437 - dmi_walk(dmi_find_cru, NULL);
2438 -
2439 - /* if cru_rom_addr has been set then we found a CRU service */
2440 - return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
2441 -}
2442 -/* ------------------------------------------------------------------------- */
2443 -#endif /* CONFIG_X86_64 */
2444 -#endif /* CONFIG_HPWDT_NMI_DECODING */
2445
2446 /*
2447 * Watchdog operations
2448 @@ -475,32 +103,22 @@ static int hpwdt_time_left(void)
2449 }
2450
2451 #ifdef CONFIG_HPWDT_NMI_DECODING
2452 +static int hpwdt_my_nmi(void)
2453 +{
2454 + return ioread8(hpwdt_nmistat) & 0x6;
2455 +}
2456 +
2457 /*
2458 * NMI Handler
2459 */
2460 static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
2461 {
2462 - unsigned long rom_pl;
2463 - static int die_nmi_called;
2464 -
2465 - if (!hpwdt_nmi_decoding)
2466 + if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
2467 return NMI_DONE;
2468
2469 - spin_lock_irqsave(&rom_lock, rom_pl);
2470 - if (!die_nmi_called && !is_icru && !is_uefi)
2471 - asminline_call(&cmn_regs, cru_rom_addr);
2472 - die_nmi_called = 1;
2473 - spin_unlock_irqrestore(&rom_lock, rom_pl);
2474 -
2475 if (allow_kdump)
2476 hpwdt_stop();
2477
2478 - if (!is_icru && !is_uefi) {
2479 - if (cmn_regs.u1.ral == 0) {
2480 - nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
2481 - return NMI_HANDLED;
2482 - }
2483 - }
2484 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
2485 "for the NMI is logged in any one of the following "
2486 "resources:\n"
2487 @@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
2488 * Init & Exit
2489 */
2490
2491 -#ifdef CONFIG_HPWDT_NMI_DECODING
2492 -#ifdef CONFIG_X86_LOCAL_APIC
2493 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
2494 -{
2495 - /*
2496 - * If nmi_watchdog is turned off then we can turn on
2497 - * our nmi decoding capability.
2498 - */
2499 - hpwdt_nmi_decoding = 1;
2500 -}
2501 -#else
2502 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
2503 -{
2504 - dev_warn(&dev->dev, "NMI decoding is disabled. "
2505 - "Your kernel does not support a NMI Watchdog.\n");
2506 -}
2507 -#endif /* CONFIG_X86_LOCAL_APIC */
2508 -
2509 -/*
2510 - * dmi_find_icru
2511 - *
2512 - * Routine Description:
2513 - * This function checks whether or not we are on an iCRU-based server.
2514 - * This check is independent of architecture and needs to be made for
2515 - * any ProLiant system.
2516 - */
2517 -static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
2518 -{
2519 - struct smbios_proliant_info *smbios_proliant_ptr;
2520 -
2521 - if (dm->type == SMBIOS_ICRU_INFORMATION) {
2522 - smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
2523 - if (smbios_proliant_ptr->misc_features & 0x01)
2524 - is_icru = 1;
2525 - if (smbios_proliant_ptr->misc_features & 0x408)
2526 - is_uefi = 1;
2527 - }
2528 -}
2529
2530 static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
2531 {
2532 +#ifdef CONFIG_HPWDT_NMI_DECODING
2533 int retval;
2534 -
2535 - /*
2536 - * On typical CRU-based systems we need to map that service in
2537 - * the BIOS. For 32 bit Operating Systems we need to go through
2538 - * the 32 Bit BIOS Service Directory. For 64 bit Operating
2539 - * Systems we get that service through SMBIOS.
2540 - *
2541 - * On systems that support the new iCRU service all we need to
2542 - * do is call dmi_walk to get the supported flag value and skip
2543 - * the old cru detect code.
2544 - */
2545 - dmi_walk(dmi_find_icru, NULL);
2546 - if (!is_icru && !is_uefi) {
2547 -
2548 - /*
2549 - * We need to map the ROM to get the CRU service.
2550 - * For 32 bit Operating Systems we need to go through the 32 Bit
2551 - * BIOS Service Directory
2552 - * For 64 bit Operating Systems we get that service through SMBIOS.
2553 - */
2554 - retval = detect_cru_service();
2555 - if (retval < 0) {
2556 - dev_warn(&dev->dev,
2557 - "Unable to detect the %d Bit CRU Service.\n",
2558 - HPWDT_ARCH);
2559 - return retval;
2560 - }
2561 -
2562 - /*
2563 - * We know this is the only CRU call we need to make so lets keep as
2564 - * few instructions as possible once the NMI comes in.
2565 - */
2566 - cmn_regs.u1.rah = 0x0D;
2567 - cmn_regs.u1.ral = 0x02;
2568 - }
2569 -
2570 /*
2571 * Only one function can register for NMI_UNKNOWN
2572 */
2573 @@ -771,44 +316,25 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
2574 dev_warn(&dev->dev,
2575 "Unable to register a die notifier (err=%d).\n",
2576 retval);
2577 - if (cru_rom_addr)
2578 - iounmap(cru_rom_addr);
2579 return retval;
2580 +#endif /* CONFIG_HPWDT_NMI_DECODING */
2581 + return 0;
2582 }
2583
2584 static void hpwdt_exit_nmi_decoding(void)
2585 {
2586 +#ifdef CONFIG_HPWDT_NMI_DECODING
2587 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
2588 unregister_nmi_handler(NMI_SERR, "hpwdt");
2589 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
2590 - if (cru_rom_addr)
2591 - iounmap(cru_rom_addr);
2592 -}
2593 -#else /* !CONFIG_HPWDT_NMI_DECODING */
2594 -static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
2595 -{
2596 -}
2597 -
2598 -static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
2599 -{
2600 - return 0;
2601 +#endif
2602 }
2603
2604 -static void hpwdt_exit_nmi_decoding(void)
2605 -{
2606 -}
2607 -#endif /* CONFIG_HPWDT_NMI_DECODING */
2608 -
2609 static int hpwdt_init_one(struct pci_dev *dev,
2610 const struct pci_device_id *ent)
2611 {
2612 int retval;
2613
2614 - /*
2615 - * Check if we can do NMI decoding or not
2616 - */
2617 - hpwdt_check_nmi_decoding(dev);
2618 -
2619 /*
2620 * First let's find out if we are on an iLO2+ server. We will
2621 * not run on a legacy ASM box.
2622 @@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
2623 retval = -ENOMEM;
2624 goto error_pci_iomap;
2625 }
2626 + hpwdt_nmistat = pci_mem_addr + 0x6e;
2627 hpwdt_timer_reg = pci_mem_addr + 0x70;
2628 hpwdt_timer_con = pci_mem_addr + 0x72;
2629
2630 @@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
2631 #ifdef CONFIG_HPWDT_NMI_DECODING
2632 module_param(allow_kdump, int, 0);
2633 MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
2634 -#endif /* !CONFIG_HPWDT_NMI_DECODING */
2635 +#endif /* CONFIG_HPWDT_NMI_DECODING */
2636
2637 module_pci_driver(hpwdt_driver);
2638 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2639 index 3eeed8f0aa06..3fadfabcac39 100644
2640 --- a/fs/ext4/xattr.c
2641 +++ b/fs/ext4/xattr.c
2642 @@ -837,8 +837,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
2643 if (!IS_LAST_ENTRY(s->first))
2644 ext4_xattr_rehash(header(s->base),
2645 s->here);
2646 - ext4_xattr_cache_insert(ext4_mb_cache,
2647 - bs->bh);
2648 }
2649 ext4_xattr_block_csum_set(inode, bs->bh);
2650 unlock_buffer(bs->bh);
2651 @@ -959,6 +957,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
2652 } else if (bs->bh && s->base == bs->bh->b_data) {
2653 /* We were modifying this block in-place. */
2654 ea_bdebug(bs->bh, "keeping this block");
2655 + ext4_xattr_cache_insert(ext4_mb_cache, bs->bh);
2656 new_bh = bs->bh;
2657 get_bh(new_bh);
2658 } else {
2659 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
2660 index 1ac1593aded3..1ab91124a93e 100644
2661 --- a/fs/nfs/direct.c
2662 +++ b/fs/nfs/direct.c
2663 @@ -86,10 +86,10 @@ struct nfs_direct_req {
2664 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
2665 int mirror_count;
2666
2667 + loff_t io_start; /* Start offset for I/O */
2668 ssize_t count, /* bytes actually processed */
2669 max_count, /* max expected count */
2670 bytes_left, /* bytes left to be sent */
2671 - io_start, /* start of IO */
2672 error; /* any reported error */
2673 struct completion completion; /* wait for i/o completion */
2674
2675 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2676 index 9a3b3820306d..a8b786a648cd 100644
2677 --- a/fs/nfs/write.c
2678 +++ b/fs/nfs/write.c
2679 @@ -1847,40 +1847,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
2680 return status;
2681 }
2682
2683 -int nfs_commit_inode(struct inode *inode, int how)
2684 +static int __nfs_commit_inode(struct inode *inode, int how,
2685 + struct writeback_control *wbc)
2686 {
2687 LIST_HEAD(head);
2688 struct nfs_commit_info cinfo;
2689 int may_wait = how & FLUSH_SYNC;
2690 - int error = 0;
2691 - int res;
2692 + int ret, nscan;
2693
2694 nfs_init_cinfo_from_inode(&cinfo, inode);
2695 nfs_commit_begin(cinfo.mds);
2696 - res = nfs_scan_commit(inode, &head, &cinfo);
2697 - if (res)
2698 - error = nfs_generic_commit_list(inode, &head, how, &cinfo);
2699 + for (;;) {
2700 + ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
2701 + if (ret <= 0)
2702 + break;
2703 + ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
2704 + if (ret < 0)
2705 + break;
2706 + ret = 0;
2707 + if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
2708 + if (nscan < wbc->nr_to_write)
2709 + wbc->nr_to_write -= nscan;
2710 + else
2711 + wbc->nr_to_write = 0;
2712 + }
2713 + if (nscan < INT_MAX)
2714 + break;
2715 + cond_resched();
2716 + }
2717 nfs_commit_end(cinfo.mds);
2718 - if (res == 0)
2719 - return res;
2720 - if (error < 0)
2721 - goto out_error;
2722 - if (!may_wait)
2723 - goto out_mark_dirty;
2724 - error = wait_on_commit(cinfo.mds);
2725 - if (error < 0)
2726 - return error;
2727 - return res;
2728 -out_error:
2729 - res = error;
2730 - /* Note: If we exit without ensuring that the commit is complete,
2731 - * we must mark the inode as dirty. Otherwise, future calls to
2732 - * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
2733 - * that the data is on the disk.
2734 - */
2735 -out_mark_dirty:
2736 - __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
2737 - return res;
2738 + if (ret || !may_wait)
2739 + return ret;
2740 + return wait_on_commit(cinfo.mds);
2741 +}
2742 +
2743 +int nfs_commit_inode(struct inode *inode, int how)
2744 +{
2745 + return __nfs_commit_inode(inode, how, NULL);
2746 }
2747 EXPORT_SYMBOL_GPL(nfs_commit_inode);
2748
2749 @@ -1890,11 +1893,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
2750 int flags = FLUSH_SYNC;
2751 int ret = 0;
2752
2753 - /* no commits means nothing needs to be done */
2754 - if (!nfsi->commit_info.ncommit)
2755 - return ret;
2756 -
2757 if (wbc->sync_mode == WB_SYNC_NONE) {
2758 + /* no commits means nothing needs to be done */
2759 + if (!nfsi->commit_info.ncommit)
2760 + goto check_requests_outstanding;
2761 +
2762 /* Don't commit yet if this is a non-blocking flush and there
2763 * are a lot of outstanding writes for this mapping.
2764 */
2765 @@ -1905,16 +1908,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
2766 flags = 0;
2767 }
2768
2769 - ret = nfs_commit_inode(inode, flags);
2770 - if (ret >= 0) {
2771 - if (wbc->sync_mode == WB_SYNC_NONE) {
2772 - if (ret < wbc->nr_to_write)
2773 - wbc->nr_to_write -= ret;
2774 - else
2775 - wbc->nr_to_write = 0;
2776 - }
2777 - return 0;
2778 - }
2779 + ret = __nfs_commit_inode(inode, flags, wbc);
2780 + if (!ret) {
2781 + if (flags & FLUSH_SYNC)
2782 + return 0;
2783 + } else if (nfsi->commit_info.ncommit)
2784 + goto out_mark_dirty;
2785 +
2786 +check_requests_outstanding:
2787 + if (!atomic_read(&nfsi->commit_info.rpcs_out))
2788 + return ret;
2789 out_mark_dirty:
2790 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
2791 return ret;
2792 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
2793 index 982c299e435a..642d0597bb73 100644
2794 --- a/include/drm/drm_crtc_helper.h
2795 +++ b/include/drm/drm_crtc_helper.h
2796 @@ -74,5 +74,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
2797 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
2798 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
2799 extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
2800 +extern bool drm_kms_helper_is_poll_worker(void);
2801
2802 #endif
2803 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
2804 index de179993e039..01225b0059b1 100644
2805 --- a/include/linux/compiler-clang.h
2806 +++ b/include/linux/compiler-clang.h
2807 @@ -15,3 +15,8 @@
2808 * with any version that can compile the kernel
2809 */
2810 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
2811 +
2812 +/* Clang doesn't have a way to turn it off per-function, yet. */
2813 +#ifdef __noretpoline
2814 +#undef __noretpoline
2815 +#endif
2816 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
2817 index eb0ed31193a3..a1b1de17455c 100644
2818 --- a/include/linux/compiler-gcc.h
2819 +++ b/include/linux/compiler-gcc.h
2820 @@ -88,6 +88,10 @@
2821 #define __weak __attribute__((weak))
2822 #define __alias(symbol) __attribute__((alias(#symbol)))
2823
2824 +#ifdef RETPOLINE
2825 +#define __noretpoline __attribute__((indirect_branch("keep")))
2826 +#endif
2827 +
2828 /*
2829 * it doesn't make sense on ARM (currently the only user of __naked)
2830 * to trace naked functions because then mcount is called without
2831 diff --git a/include/linux/init.h b/include/linux/init.h
2832 index 8e346d1bd837..683508f6bb4e 100644
2833 --- a/include/linux/init.h
2834 +++ b/include/linux/init.h
2835 @@ -5,10 +5,10 @@
2836 #include <linux/types.h>
2837
2838 /* Built-in __init functions needn't be compiled with retpoline */
2839 -#if defined(RETPOLINE) && !defined(MODULE)
2840 -#define __noretpoline __attribute__((indirect_branch("keep")))
2841 +#if defined(__noretpoline) && !defined(MODULE)
2842 +#define __noinitretpoline __noretpoline
2843 #else
2844 -#define __noretpoline
2845 +#define __noinitretpoline
2846 #endif
2847
2848 /* These macros are used to mark some functions or
2849 @@ -46,7 +46,7 @@
2850
2851 /* These are for everybody (although not all archs will actually
2852 discard it in modules) */
2853 -#define __init __section(.init.text) __cold notrace __latent_entropy __noretpoline
2854 +#define __init __section(.init.text) __cold notrace __latent_entropy __noinitretpoline
2855 #define __initdata __section(.init.data)
2856 #define __initconst __section(.init.rodata)
2857 #define __exitdata __section(.exit.data)
2858 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
2859 index 2ad1a2b289b5..9bfeb88fb940 100644
2860 --- a/include/linux/netfilter/x_tables.h
2861 +++ b/include/linux/netfilter/x_tables.h
2862 @@ -375,38 +375,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
2863 return ret;
2864 }
2865
2866 +struct xt_percpu_counter_alloc_state {
2867 + unsigned int off;
2868 + const char __percpu *mem;
2869 +};
2870
2871 -/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
2872 - * real (percpu) counter. On !SMP, its just the packet count,
2873 - * so nothing needs to be done there.
2874 - *
2875 - * xt_percpu_counter_alloc returns the address of the percpu
2876 - * counter, or 0 on !SMP. We force an alignment of 16 bytes
2877 - * so that bytes/packets share a common cache line.
2878 - *
2879 - * Hence caller must use IS_ERR_VALUE to check for error, this
2880 - * allows us to return 0 for single core systems without forcing
2881 - * callers to deal with SMP vs. NONSMP issues.
2882 - */
2883 -static inline unsigned long xt_percpu_counter_alloc(void)
2884 -{
2885 - if (nr_cpu_ids > 1) {
2886 - void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
2887 - sizeof(struct xt_counters));
2888 -
2889 - if (res == NULL)
2890 - return -ENOMEM;
2891 -
2892 - return (__force unsigned long) res;
2893 - }
2894 -
2895 - return 0;
2896 -}
2897 -static inline void xt_percpu_counter_free(u64 pcnt)
2898 -{
2899 - if (nr_cpu_ids > 1)
2900 - free_percpu((void __percpu *) (unsigned long) pcnt);
2901 -}
2902 +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
2903 + struct xt_counters *counter);
2904 +void xt_percpu_counter_free(struct xt_counters *cnt);
2905
2906 static inline struct xt_counters *
2907 xt_get_this_cpu_counter(struct xt_counters *cnt)
2908 diff --git a/include/linux/nospec.h b/include/linux/nospec.h
2909 index 132e3f5a2e0d..e791ebc65c9c 100644
2910 --- a/include/linux/nospec.h
2911 +++ b/include/linux/nospec.h
2912 @@ -5,6 +5,7 @@
2913
2914 #ifndef _LINUX_NOSPEC_H
2915 #define _LINUX_NOSPEC_H
2916 +#include <asm/barrier.h>
2917
2918 /**
2919 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
2920 @@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
2921 }
2922 #endif
2923
2924 -/*
2925 - * Warn developers about inappropriate array_index_nospec() usage.
2926 - *
2927 - * Even if the CPU speculates past the WARN_ONCE branch, the
2928 - * sign bit of @index is taken into account when generating the
2929 - * mask.
2930 - *
2931 - * This warning is compiled out when the compiler can infer that
2932 - * @index and @size are less than LONG_MAX.
2933 - */
2934 -#define array_index_mask_nospec_check(index, size) \
2935 -({ \
2936 - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
2937 - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
2938 - _mask = 0; \
2939 - else \
2940 - _mask = array_index_mask_nospec(index, size); \
2941 - _mask; \
2942 -})
2943 -
2944 /*
2945 * array_index_nospec - sanitize an array index after a bounds check
2946 *
2947 @@ -67,7 +48,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
2948 ({ \
2949 typeof(index) _i = (index); \
2950 typeof(size) _s = (size); \
2951 - unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
2952 + unsigned long _mask = array_index_mask_nospec(_i, _s); \
2953 \
2954 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
2955 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
2956 diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
2957 index de2a722fe3cf..ea4f81c2a6d5 100644
2958 --- a/include/linux/usb/quirks.h
2959 +++ b/include/linux/usb/quirks.h
2960 @@ -56,4 +56,7 @@
2961 */
2962 #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
2963
2964 +/* Device needs a pause after every control message. */
2965 +#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
2966 +
2967 #endif /* __LINUX_USB_QUIRKS_H */
2968 diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
2969 index 1061add575d2..1def337b16d4 100644
2970 --- a/include/linux/workqueue.h
2971 +++ b/include/linux/workqueue.h
2972 @@ -453,6 +453,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
2973
2974 extern void workqueue_set_max_active(struct workqueue_struct *wq,
2975 int max_active);
2976 +extern struct work_struct *current_work(void);
2977 extern bool current_is_workqueue_rescuer(void);
2978 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
2979 extern unsigned int work_busy(struct work_struct *work);
2980 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2981 index ebfea5f94b66..664aebc50fe3 100644
2982 --- a/kernel/workqueue.c
2983 +++ b/kernel/workqueue.c
2984 @@ -4129,6 +4129,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2985 }
2986 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2987
2988 +/**
2989 + * current_work - retrieve %current task's work struct
2990 + *
2991 + * Determine if %current task is a workqueue worker and what it's working on.
2992 + * Useful to find out the context that the %current task is running in.
2993 + *
2994 + * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
2995 + */
2996 +struct work_struct *current_work(void)
2997 +{
2998 + struct worker *worker = current_wq_worker();
2999 +
3000 + return worker ? worker->current_work : NULL;
3001 +}
3002 +EXPORT_SYMBOL(current_work);
3003 +
3004 /**
3005 * current_is_workqueue_rescuer - is %current workqueue rescuer?
3006 *
3007 diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
3008 index 9024283d2bca..9637a681bdda 100644
3009 --- a/net/bridge/netfilter/ebt_among.c
3010 +++ b/net/bridge/netfilter/ebt_among.c
3011 @@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
3012 return true;
3013 }
3014
3015 +static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
3016 +{
3017 + return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
3018 +}
3019 +
3020 static int ebt_among_mt_check(const struct xt_mtchk_param *par)
3021 {
3022 const struct ebt_among_info *info = par->matchinfo;
3023 const struct ebt_entry_match *em =
3024 container_of(par->matchinfo, const struct ebt_entry_match, data);
3025 - int expected_length = sizeof(struct ebt_among_info);
3026 + unsigned int expected_length = sizeof(struct ebt_among_info);
3027 const struct ebt_mac_wormhash *wh_dst, *wh_src;
3028 int err;
3029
3030 + if (expected_length > em->match_size)
3031 + return -EINVAL;
3032 +
3033 wh_dst = ebt_among_wh_dst(info);
3034 - wh_src = ebt_among_wh_src(info);
3035 + if (poolsize_invalid(wh_dst))
3036 + return -EINVAL;
3037 +
3038 expected_length += ebt_mac_wormhash_size(wh_dst);
3039 + if (expected_length > em->match_size)
3040 + return -EINVAL;
3041 +
3042 + wh_src = ebt_among_wh_src(info);
3043 + if (poolsize_invalid(wh_src))
3044 + return -EINVAL;
3045 +
3046 expected_length += ebt_mac_wormhash_size(wh_src);
3047
3048 if (em->match_size != EBT_ALIGN(expected_length)) {
3049 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3050 index f5c11bbe27db..5a89a4ac86ef 100644
3051 --- a/net/bridge/netfilter/ebtables.c
3052 +++ b/net/bridge/netfilter/ebtables.c
3053 @@ -2031,7 +2031,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
3054 if (match_kern)
3055 match_kern->match_size = ret;
3056
3057 - WARN_ON(type == EBT_COMPAT_TARGET && size_left);
3058 + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
3059 + return -EINVAL;
3060 +
3061 match32 = (struct compat_ebt_entry_mwt *) buf;
3062 }
3063
3064 @@ -2087,6 +2089,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
3065 *
3066 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
3067 */
3068 + for (i = 0; i < 4 ; ++i) {
3069 + if (offsets[i] >= *total)
3070 + return -EINVAL;
3071 + if (i == 0)
3072 + continue;
3073 + if (offsets[i-1] > offsets[i])
3074 + return -EINVAL;
3075 + }
3076 +
3077 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
3078 struct compat_ebt_entry_mwt *match32;
3079 unsigned int size;
3080 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
3081 index 697538464e6e..d35815e5967b 100644
3082 --- a/net/ipv4/netfilter/arp_tables.c
3083 +++ b/net/ipv4/netfilter/arp_tables.c
3084 @@ -261,6 +261,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
3085 }
3086 if (table_base + v
3087 != arpt_next_entry(e)) {
3088 + if (unlikely(stackidx >= private->stacksize)) {
3089 + verdict = NF_DROP;
3090 + break;
3091 + }
3092 jumpstack[stackidx++] = e;
3093 }
3094
3095 @@ -415,17 +419,15 @@ static inline int check_target(struct arpt_entry *e, const char *name)
3096 }
3097
3098 static inline int
3099 -find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
3100 +find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
3101 + struct xt_percpu_counter_alloc_state *alloc_state)
3102 {
3103 struct xt_entry_target *t;
3104 struct xt_target *target;
3105 - unsigned long pcnt;
3106 int ret;
3107
3108 - pcnt = xt_percpu_counter_alloc();
3109 - if (IS_ERR_VALUE(pcnt))
3110 + if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
3111 return -ENOMEM;
3112 - e->counters.pcnt = pcnt;
3113
3114 t = arpt_get_target(e);
3115 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
3116 @@ -443,7 +445,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
3117 err:
3118 module_put(t->u.kernel.target->me);
3119 out:
3120 - xt_percpu_counter_free(e->counters.pcnt);
3121 + xt_percpu_counter_free(&e->counters);
3122
3123 return ret;
3124 }
3125 @@ -523,7 +525,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
3126 if (par.target->destroy != NULL)
3127 par.target->destroy(&par);
3128 module_put(par.target->me);
3129 - xt_percpu_counter_free(e->counters.pcnt);
3130 + xt_percpu_counter_free(&e->counters);
3131 }
3132
3133 /* Checks and translates the user-supplied table segment (held in
3134 @@ -532,6 +534,7 @@ static inline void cleanup_entry(struct arpt_entry *e)
3135 static int translate_table(struct xt_table_info *newinfo, void *entry0,
3136 const struct arpt_replace *repl)
3137 {
3138 + struct xt_percpu_counter_alloc_state alloc_state = { 0 };
3139 struct arpt_entry *iter;
3140 unsigned int *offsets;
3141 unsigned int i;
3142 @@ -594,7 +597,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
3143 /* Finally, each sanity check must pass */
3144 i = 0;
3145 xt_entry_foreach(iter, entry0, newinfo->size) {
3146 - ret = find_check_entry(iter, repl->name, repl->size);
3147 + ret = find_check_entry(iter, repl->name, repl->size,
3148 + &alloc_state);
3149 if (ret != 0)
3150 break;
3151 ++i;
3152 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
3153 index 7c00ce90adb8..e78f6521823f 100644
3154 --- a/net/ipv4/netfilter/ip_tables.c
3155 +++ b/net/ipv4/netfilter/ip_tables.c
3156 @@ -345,8 +345,13 @@ ipt_do_table(struct sk_buff *skb,
3157 continue;
3158 }
3159 if (table_base + v != ipt_next_entry(e) &&
3160 - !(e->ip.flags & IPT_F_GOTO))
3161 + !(e->ip.flags & IPT_F_GOTO)) {
3162 + if (unlikely(stackidx >= private->stacksize)) {
3163 + verdict = NF_DROP;
3164 + break;
3165 + }
3166 jumpstack[stackidx++] = e;
3167 + }
3168
3169 e = get_entry(table_base, v);
3170 continue;
3171 @@ -535,7 +540,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name)
3172
3173 static int
3174 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
3175 - unsigned int size)
3176 + unsigned int size,
3177 + struct xt_percpu_counter_alloc_state *alloc_state)
3178 {
3179 struct xt_entry_target *t;
3180 struct xt_target *target;
3181 @@ -543,12 +549,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
3182 unsigned int j;
3183 struct xt_mtchk_param mtpar;
3184 struct xt_entry_match *ematch;
3185 - unsigned long pcnt;
3186
3187 - pcnt = xt_percpu_counter_alloc();
3188 - if (IS_ERR_VALUE(pcnt))
3189 + if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
3190 return -ENOMEM;
3191 - e->counters.pcnt = pcnt;
3192
3193 j = 0;
3194 mtpar.net = net;
3195 @@ -586,7 +589,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
3196 cleanup_match(ematch, net);
3197 }
3198
3199 - xt_percpu_counter_free(e->counters.pcnt);
3200 + xt_percpu_counter_free(&e->counters);
3201
3202 return ret;
3203 }
3204 @@ -674,7 +677,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net)
3205 if (par.target->destroy != NULL)
3206 par.target->destroy(&par);
3207 module_put(par.target->me);
3208 - xt_percpu_counter_free(e->counters.pcnt);
3209 + xt_percpu_counter_free(&e->counters);
3210 }
3211
3212 /* Checks and translates the user-supplied table segment (held in
3213 @@ -683,6 +686,7 @@ static int
3214 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3215 const struct ipt_replace *repl)
3216 {
3217 + struct xt_percpu_counter_alloc_state alloc_state = { 0 };
3218 struct ipt_entry *iter;
3219 unsigned int *offsets;
3220 unsigned int i;
3221 @@ -742,7 +746,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3222 /* Finally, each sanity check must pass */
3223 i = 0;
3224 xt_entry_foreach(iter, entry0, newinfo->size) {
3225 - ret = find_check_entry(iter, net, repl->name, repl->size);
3226 + ret = find_check_entry(iter, net, repl->name, repl->size,
3227 + &alloc_state);
3228 if (ret != 0)
3229 break;
3230 ++i;
3231 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
3232 index 55aacea24396..e26becc9a43d 100644
3233 --- a/net/ipv6/netfilter/ip6_tables.c
3234 +++ b/net/ipv6/netfilter/ip6_tables.c
3235 @@ -376,6 +376,10 @@ ip6t_do_table(struct sk_buff *skb,
3236 }
3237 if (table_base + v != ip6t_next_entry(e) &&
3238 !(e->ipv6.flags & IP6T_F_GOTO)) {
3239 + if (unlikely(stackidx >= private->stacksize)) {
3240 + verdict = NF_DROP;
3241 + break;
3242 + }
3243 jumpstack[stackidx++] = e;
3244 }
3245
3246 @@ -566,7 +570,8 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
3247
3248 static int
3249 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
3250 - unsigned int size)
3251 + unsigned int size,
3252 + struct xt_percpu_counter_alloc_state *alloc_state)
3253 {
3254 struct xt_entry_target *t;
3255 struct xt_target *target;
3256 @@ -574,12 +579,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
3257 unsigned int j;
3258 struct xt_mtchk_param mtpar;
3259 struct xt_entry_match *ematch;
3260 - unsigned long pcnt;
3261
3262 - pcnt = xt_percpu_counter_alloc();
3263 - if (IS_ERR_VALUE(pcnt))
3264 + if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
3265 return -ENOMEM;
3266 - e->counters.pcnt = pcnt;
3267
3268 j = 0;
3269 mtpar.net = net;
3270 @@ -616,7 +618,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
3271 cleanup_match(ematch, net);
3272 }
3273
3274 - xt_percpu_counter_free(e->counters.pcnt);
3275 + xt_percpu_counter_free(&e->counters);
3276
3277 return ret;
3278 }
3279 @@ -703,8 +705,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net)
3280 if (par.target->destroy != NULL)
3281 par.target->destroy(&par);
3282 module_put(par.target->me);
3283 -
3284 - xt_percpu_counter_free(e->counters.pcnt);
3285 + xt_percpu_counter_free(&e->counters);
3286 }
3287
3288 /* Checks and translates the user-supplied table segment (held in
3289 @@ -713,6 +714,7 @@ static int
3290 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3291 const struct ip6t_replace *repl)
3292 {
3293 + struct xt_percpu_counter_alloc_state alloc_state = { 0 };
3294 struct ip6t_entry *iter;
3295 unsigned int *offsets;
3296 unsigned int i;
3297 @@ -772,7 +774,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3298 /* Finally, each sanity check must pass */
3299 i = 0;
3300 xt_entry_foreach(iter, entry0, newinfo->size) {
3301 - ret = find_check_entry(iter, net, repl->name, repl->size);
3302 + ret = find_check_entry(iter, net, repl->name, repl->size,
3303 + &alloc_state);
3304 if (ret != 0)
3305 break;
3306 ++i;
3307 diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
3308 index e0be97e636a4..322c7657165b 100644
3309 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
3310 +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
3311 @@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
3312 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
3313 target, maniptype))
3314 return false;
3315 +
3316 + /* must reload, offset might have changed */
3317 + ipv6h = (void *)skb->data + iphdroff;
3318 +
3319 manip_addr:
3320 if (maniptype == NF_NAT_MANIP_SRC)
3321 ipv6h->saddr = target->src.u3.in6;
3322 diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
3323 index fbce552a796e..7d7466dbf663 100644
3324 --- a/net/netfilter/nf_nat_proto_common.c
3325 +++ b/net/netfilter/nf_nat_proto_common.c
3326 @@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
3327 const struct nf_conn *ct,
3328 u16 *rover)
3329 {
3330 - unsigned int range_size, min, i;
3331 + unsigned int range_size, min, max, i;
3332 __be16 *portptr;
3333 u_int16_t off;
3334
3335 @@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
3336 }
3337 } else {
3338 min = ntohs(range->min_proto.all);
3339 - range_size = ntohs(range->max_proto.all) - min + 1;
3340 + max = ntohs(range->max_proto.all);
3341 + if (unlikely(max < min))
3342 + swap(max, min);
3343 + range_size = max - min + 1;
3344 }
3345
3346 if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
3347 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
3348 index e47ade305a46..ac26b71d900e 100644
3349 --- a/net/netfilter/x_tables.c
3350 +++ b/net/netfilter/x_tables.c
3351 @@ -39,6 +39,8 @@ MODULE_LICENSE("GPL");
3352 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
3353 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
3354
3355 +#define XT_PCPU_BLOCK_SIZE 4096
3356 +
3357 struct compat_delta {
3358 unsigned int offset; /* offset in kernel */
3359 int delta; /* delta in 32bit user land */
3360 @@ -1619,6 +1621,59 @@ void xt_proto_fini(struct net *net, u_int8_t af)
3361 }
3362 EXPORT_SYMBOL_GPL(xt_proto_fini);
3363
3364 +/**
3365 + * xt_percpu_counter_alloc - allocate x_tables rule counter
3366 + *
3367 + * @state: pointer to xt_percpu allocation state
3368 + * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
3369 + *
3370 + * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
3371 + * contain the address of the real (percpu) counter.
3372 + *
3373 + * Rule evaluation needs to use xt_get_this_cpu_counter() helper
3374 + * to fetch the real percpu counter.
3375 + *
3376 + * To speed up allocation and improve data locality, a 4kb block is
3377 + * allocated.
3378 + *
3379 + * xt_percpu_counter_alloc_state contains the base address of the
3380 + * allocated page and the current sub-offset.
3381 + *
3382 + * returns false on error.
3383 + */
3384 +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
3385 + struct xt_counters *counter)
3386 +{
3387 + BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
3388 +
3389 + if (nr_cpu_ids <= 1)
3390 + return true;
3391 +
3392 + if (!state->mem) {
3393 + state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
3394 + XT_PCPU_BLOCK_SIZE);
3395 + if (!state->mem)
3396 + return false;
3397 + }
3398 + counter->pcnt = (__force unsigned long)(state->mem + state->off);
3399 + state->off += sizeof(*counter);
3400 + if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
3401 + state->mem = NULL;
3402 + state->off = 0;
3403 + }
3404 + return true;
3405 +}
3406 +EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
3407 +
3408 +void xt_percpu_counter_free(struct xt_counters *counters)
3409 +{
3410 + unsigned long pcnt = counters->pcnt;
3411 +
3412 + if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
3413 + free_percpu((void __percpu *)pcnt);
3414 +}
3415 +EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
3416 +
3417 static int __net_init xt_net_init(struct net *net)
3418 {
3419 int i;
3420 diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
3421 index daf45da448fa..bb5d6a058fb7 100644
3422 --- a/net/netfilter/xt_IDLETIMER.c
3423 +++ b/net/netfilter/xt_IDLETIMER.c
3424 @@ -147,11 +147,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
3425 (unsigned long) info->timer);
3426 info->timer->refcnt = 1;
3427
3428 + INIT_WORK(&info->timer->work, idletimer_tg_work);
3429 +
3430 mod_timer(&info->timer->timer,
3431 msecs_to_jiffies(info->timeout * 1000) + jiffies);
3432
3433 - INIT_WORK(&info->timer->work, idletimer_tg_work);
3434 -
3435 return 0;
3436
3437 out_free_attr:
3438 @@ -192,7 +192,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
3439 pr_debug("timeout value is zero\n");
3440 return -EINVAL;
3441 }
3442 -
3443 + if (info->timeout >= INT_MAX / 1000) {
3444 + pr_debug("timeout value is too big\n");
3445 + return -EINVAL;
3446 + }
3447 if (info->label[0] == '\0' ||
3448 strnlen(info->label,
3449 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
3450 diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
3451 index 3ba31c194cce..0858fe17e14a 100644
3452 --- a/net/netfilter/xt_LED.c
3453 +++ b/net/netfilter/xt_LED.c
3454 @@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
3455 goto exit_alloc;
3456 }
3457
3458 - /* See if we need to set up a timer */
3459 - if (ledinfo->delay > 0)
3460 - setup_timer(&ledinternal->timer, led_timeout_callback,
3461 - (unsigned long)ledinternal);
3462 + /* Since the letinternal timer can be shared between multiple targets,
3463 + * always set it up, even if the current target does not need it
3464 + */
3465 + setup_timer(&ledinternal->timer, led_timeout_callback,
3466 + (unsigned long)ledinternal);
3467
3468 list_add_tail(&ledinternal->list, &xt_led_triggers);
3469
3470 @@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
3471
3472 list_del(&ledinternal->list);
3473
3474 - if (ledinfo->delay > 0)
3475 - del_timer_sync(&ledinternal->timer);
3476 + del_timer_sync(&ledinternal->timer);
3477
3478 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
3479
3480 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
3481 index 0a07f9014944..ae0f9ab1a70d 100644
3482 --- a/scripts/Makefile.lib
3483 +++ b/scripts/Makefile.lib
3484 @@ -290,11 +290,11 @@ cmd_dt_S_dtb= \
3485 echo '\#include <asm-generic/vmlinux.lds.h>'; \
3486 echo '.section .dtb.init.rodata,"a"'; \
3487 echo '.balign STRUCT_ALIGNMENT'; \
3488 - echo '.global __dtb_$(*F)_begin'; \
3489 - echo '__dtb_$(*F)_begin:'; \
3490 + echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
3491 + echo '__dtb_$(subst -,_,$(*F))_begin:'; \
3492 echo '.incbin "$<" '; \
3493 - echo '__dtb_$(*F)_end:'; \
3494 - echo '.global __dtb_$(*F)_end'; \
3495 + echo '__dtb_$(subst -,_,$(*F))_end:'; \
3496 + echo '.global __dtb_$(subst -,_,$(*F))_end'; \
3497 echo '.balign STRUCT_ALIGNMENT'; \
3498 ) > $@
3499
3500 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3501 index 0b408617b2c9..799ad3e1d24b 100644
3502 --- a/sound/core/seq/seq_clientmgr.c
3503 +++ b/sound/core/seq/seq_clientmgr.c
3504 @@ -906,7 +906,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
3505 static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
3506 struct snd_seq_event *event,
3507 struct file *file, int blocking,
3508 - int atomic, int hop)
3509 + int atomic, int hop,
3510 + struct mutex *mutexp)
3511 {
3512 struct snd_seq_event_cell *cell;
3513 int err;
3514 @@ -944,7 +945,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
3515 return -ENXIO; /* queue is not allocated */
3516
3517 /* allocate an event cell */
3518 - err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
3519 + err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
3520 + file, mutexp);
3521 if (err < 0)
3522 return err;
3523
3524 @@ -1013,12 +1015,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
3525 return -ENXIO;
3526
3527 /* allocate the pool now if the pool is not allocated yet */
3528 + mutex_lock(&client->ioctl_mutex);
3529 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
3530 - mutex_lock(&client->ioctl_mutex);
3531 err = snd_seq_pool_init(client->pool);
3532 - mutex_unlock(&client->ioctl_mutex);
3533 if (err < 0)
3534 - return -ENOMEM;
3535 + goto out;
3536 }
3537
3538 /* only process whole events */
3539 @@ -1069,7 +1070,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
3540 /* ok, enqueue it */
3541 err = snd_seq_client_enqueue_event(client, &event, file,
3542 !(file->f_flags & O_NONBLOCK),
3543 - 0, 0);
3544 + 0, 0, &client->ioctl_mutex);
3545 if (err < 0)
3546 break;
3547
3548 @@ -1080,6 +1081,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
3549 written += len;
3550 }
3551
3552 + out:
3553 + mutex_unlock(&client->ioctl_mutex);
3554 return written ? written : err;
3555 }
3556
3557 @@ -1835,6 +1838,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
3558 (! snd_seq_write_pool_allocated(client) ||
3559 info->output_pool != client->pool->size)) {
3560 if (snd_seq_write_pool_allocated(client)) {
3561 + /* is the pool in use? */
3562 + if (atomic_read(&client->pool->counter))
3563 + return -EBUSY;
3564 /* remove all existing cells */
3565 snd_seq_pool_mark_closing(client->pool);
3566 snd_seq_queue_client_leave_cells(client->number);
3567 @@ -2259,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
3568 if (! cptr->accept_output)
3569 result = -EPERM;
3570 else /* send it */
3571 - result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
3572 + result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
3573 + atomic, hop, NULL);
3574
3575 snd_seq_client_unlock(cptr);
3576 return result;
3577 diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
3578 index 3490d21ab9e7..9acbed1ac982 100644
3579 --- a/sound/core/seq/seq_fifo.c
3580 +++ b/sound/core/seq/seq_fifo.c
3581 @@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
3582 return -EINVAL;
3583
3584 snd_use_lock_use(&f->use_lock);
3585 - err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
3586 + err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
3587 if (err < 0) {
3588 if ((err == -ENOMEM) || (err == -EAGAIN))
3589 atomic_inc(&f->overflow);
3590 diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
3591 index 5847c4475bf3..4c8cbcd89887 100644
3592 --- a/sound/core/seq/seq_memory.c
3593 +++ b/sound/core/seq/seq_memory.c
3594 @@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
3595 */
3596 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
3597 struct snd_seq_event_cell **cellp,
3598 - int nonblock, struct file *file)
3599 + int nonblock, struct file *file,
3600 + struct mutex *mutexp)
3601 {
3602 struct snd_seq_event_cell *cell;
3603 unsigned long flags;
3604 @@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
3605 set_current_state(TASK_INTERRUPTIBLE);
3606 add_wait_queue(&pool->output_sleep, &wait);
3607 spin_unlock_irq(&pool->lock);
3608 + if (mutexp)
3609 + mutex_unlock(mutexp);
3610 schedule();
3611 + if (mutexp)
3612 + mutex_lock(mutexp);
3613 spin_lock_irq(&pool->lock);
3614 remove_wait_queue(&pool->output_sleep, &wait);
3615 /* interrupted? */
3616 @@ -288,7 +293,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
3617 */
3618 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
3619 struct snd_seq_event_cell **cellp, int nonblock,
3620 - struct file *file)
3621 + struct file *file, struct mutex *mutexp)
3622 {
3623 int ncells, err;
3624 unsigned int extlen;
3625 @@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
3626 if (ncells >= pool->total_elements)
3627 return -ENOMEM;
3628
3629 - err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
3630 + err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
3631 if (err < 0)
3632 return err;
3633
3634 @@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
3635 int size = sizeof(struct snd_seq_event);
3636 if (len < size)
3637 size = len;
3638 - err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
3639 + err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
3640 + mutexp);
3641 if (err < 0)
3642 goto __error;
3643 if (cell->event.data.ext.ptr == NULL)
3644 diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
3645 index 32f959c17786..3abe306c394a 100644
3646 --- a/sound/core/seq/seq_memory.h
3647 +++ b/sound/core/seq/seq_memory.h
3648 @@ -66,7 +66,8 @@ struct snd_seq_pool {
3649 void snd_seq_cell_free(struct snd_seq_event_cell *cell);
3650
3651 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
3652 - struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
3653 + struct snd_seq_event_cell **cellp, int nonblock,
3654 + struct file *file, struct mutex *mutexp);
3655
3656 /* return number of unused (free) cells */
3657 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
3658 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3659 index 2c3065c1f3fb..b3851b991120 100644
3660 --- a/sound/pci/hda/patch_conexant.c
3661 +++ b/sound/pci/hda/patch_conexant.c
3662 @@ -849,6 +849,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3663 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
3664 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
3665 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
3666 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
3667 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
3668 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
3669 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
3670 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
3671 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3672 index 974b74e91ef0..cd427ea8861d 100644
3673 --- a/sound/pci/hda/patch_realtek.c
3674 +++ b/sound/pci/hda/patch_realtek.c
3675 @@ -4760,6 +4760,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
3676 }
3677 }
3678
3679 +/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
3680 +static void alc295_fixup_disable_dac3(struct hda_codec *codec,
3681 + const struct hda_fixup *fix, int action)
3682 +{
3683 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3684 + hda_nid_t conn[2] = { 0x02, 0x03 };
3685 + snd_hda_override_conn_list(codec, 0x17, 2, conn);
3686 + }
3687 +}
3688 +
3689 /* Hook to update amp GPIO4 for automute */
3690 static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
3691 struct hda_jack_callback *jack)
3692 @@ -4909,6 +4919,7 @@ enum {
3693 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
3694 ALC255_FIXUP_DELL_SPK_NOISE,
3695 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
3696 + ALC295_FIXUP_DISABLE_DAC3,
3697 ALC280_FIXUP_HP_HEADSET_MIC,
3698 ALC221_FIXUP_HP_FRONT_MIC,
3699 ALC292_FIXUP_TPT460,
3700 @@ -5601,6 +5612,10 @@ static const struct hda_fixup alc269_fixups[] = {
3701 .chained = true,
3702 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
3703 },
3704 + [ALC295_FIXUP_DISABLE_DAC3] = {
3705 + .type = HDA_FIXUP_FUNC,
3706 + .v.func = alc295_fixup_disable_dac3,
3707 + },
3708 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
3709 .type = HDA_FIXUP_PINS,
3710 .v.pins = (const struct hda_pintbl[]) {
3711 @@ -5664,6 +5679,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3712 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
3713 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3714 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
3715 + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
3716 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
3717 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3718 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
3719 @@ -5785,9 +5801,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3720 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
3721 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3722 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3723 + SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
3724 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3725 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3726 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
3727 + SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3728 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3729 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3730 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3731 diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
3732 index f5d34153e21f..f0c9e2562474 100644
3733 --- a/sound/soc/codecs/rt5651.c
3734 +++ b/sound/soc/codecs/rt5651.c
3735 @@ -1736,6 +1736,7 @@ static const struct regmap_config rt5651_regmap = {
3736 .num_reg_defaults = ARRAY_SIZE(rt5651_reg),
3737 .ranges = rt5651_ranges,
3738 .num_ranges = ARRAY_SIZE(rt5651_ranges),
3739 + .use_single_rw = true,
3740 };
3741
3742 #if defined(CONFIG_OF)
3743 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
3744 index 1589325855bc..3dba5550a665 100644
3745 --- a/sound/soc/codecs/sgtl5000.c
3746 +++ b/sound/soc/codecs/sgtl5000.c
3747 @@ -774,15 +774,26 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
3748 static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
3749 enum snd_soc_bias_level level)
3750 {
3751 + struct sgtl5000_priv *sgtl = snd_soc_codec_get_drvdata(codec);
3752 + int ret;
3753 +
3754 switch (level) {
3755 case SND_SOC_BIAS_ON:
3756 case SND_SOC_BIAS_PREPARE:
3757 case SND_SOC_BIAS_STANDBY:
3758 + regcache_cache_only(sgtl->regmap, false);
3759 + ret = regcache_sync(sgtl->regmap);
3760 + if (ret) {
3761 + regcache_cache_only(sgtl->regmap, true);
3762 + return ret;
3763 + }
3764 +
3765 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
3766 SGTL5000_REFTOP_POWERUP,
3767 SGTL5000_REFTOP_POWERUP);
3768 break;
3769 case SND_SOC_BIAS_OFF:
3770 + regcache_cache_only(sgtl->regmap, true);
3771 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
3772 SGTL5000_REFTOP_POWERUP, 0);
3773 break;
3774 diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
3775 index e97d7016d771..ce7e6e5c1cea 100644
3776 --- a/tools/perf/util/trigger.h
3777 +++ b/tools/perf/util/trigger.h
3778 @@ -11,7 +11,7 @@
3779 * States and transits:
3780 *
3781 *
3782 - * OFF--(on)--> READY --(hit)--> HIT
3783 + * OFF--> ON --> READY --(hit)--> HIT
3784 * ^ |
3785 * | (ready)
3786 * | |
3787 @@ -26,8 +26,9 @@ struct trigger {
3788 volatile enum {
3789 TRIGGER_ERROR = -2,
3790 TRIGGER_OFF = -1,
3791 - TRIGGER_READY = 0,
3792 - TRIGGER_HIT = 1,
3793 + TRIGGER_ON = 0,
3794 + TRIGGER_READY = 1,
3795 + TRIGGER_HIT = 2,
3796 } state;
3797 const char *name;
3798 };
3799 @@ -49,7 +50,7 @@ static inline bool trigger_is_error(struct trigger *t)
3800 static inline void trigger_on(struct trigger *t)
3801 {
3802 TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
3803 - t->state = TRIGGER_READY;
3804 + t->state = TRIGGER_ON;
3805 }
3806
3807 static inline void trigger_ready(struct trigger *t)