Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0100-5.4.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3481 - (show annotations) (download)
Mon May 11 14:34:05 2020 UTC (3 years, 11 months ago) by niro
File size: 114322 byte(s)
-linux-5.4.1
1 diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
2 index e3a796c0d3a2..2d19c9f4c1fe 100644
3 --- a/Documentation/admin-guide/hw-vuln/mds.rst
4 +++ b/Documentation/admin-guide/hw-vuln/mds.rst
5 @@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
6
7 ============ =============================================================
8
9 -Not specifying this option is equivalent to "mds=full".
10 -
11 +Not specifying this option is equivalent to "mds=full". For processors
12 +that are affected by both TAA (TSX Asynchronous Abort) and MDS,
13 +specifying just "mds=off" without an accompanying "tsx_async_abort=off"
14 +will have no effect as the same mitigation is used for both
15 +vulnerabilities.
16
17 Mitigation selection guide
18 --------------------------
19 diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
20 index fddbd7579c53..af6865b822d2 100644
21 --- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
22 +++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
23 @@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
24 CPU is not vulnerable to cross-thread TAA attacks.
25 ============ =============================================================
26
27 -Not specifying this option is equivalent to "tsx_async_abort=full".
28 +Not specifying this option is equivalent to "tsx_async_abort=full". For
29 +processors that are affected by both TAA and MDS, specifying just
30 +"tsx_async_abort=off" without an accompanying "mds=off" will have no
31 +effect as the same mitigation is used for both vulnerabilities.
32
33 The kernel command line also allows to control the TSX feature using the
34 parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
35 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
36 index 8dee8f68fe15..9983ac73b66d 100644
37 --- a/Documentation/admin-guide/kernel-parameters.txt
38 +++ b/Documentation/admin-guide/kernel-parameters.txt
39 @@ -2473,6 +2473,12 @@
40 SMT on vulnerable CPUs
41 off - Unconditionally disable MDS mitigation
42
43 + On TAA-affected machines, mds=off can be prevented by
44 + an active TAA mitigation as both vulnerabilities are
45 + mitigated with the same mechanism so in order to disable
46 + this mitigation, you need to specify tsx_async_abort=off
47 + too.
48 +
49 Not specifying this option is equivalent to
50 mds=full.
51
52 @@ -4931,6 +4937,11 @@
53 vulnerable to cross-thread TAA attacks.
54 off - Unconditionally disable TAA mitigation
55
56 + On MDS-affected machines, tsx_async_abort=off can be
57 + prevented by an active MDS mitigation as both vulnerabilities
58 + are mitigated with the same mechanism so in order to disable
59 + this mitigation, you need to specify mds=off too.
60 +
61 Not specifying this option is equivalent to
62 tsx_async_abort=full. On CPUs which are MDS affected
63 and deploy MDS mitigation, TAA mitigation is not
64 diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
65 index ae661e65354e..f9499b20d840 100644
66 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
67 +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
68 @@ -81,6 +81,12 @@ Optional properties:
69 Definition: Name of external front end module used. Some valid FEM names
70 for example: "microsemi-lx5586", "sky85703-11"
71 and "sky85803" etc.
72 +- qcom,snoc-host-cap-8bit-quirk:
73 + Usage: Optional
74 + Value type: <empty>
75 + Definition: Quirk specifying that the firmware expects the 8bit version
76 + of the host capability QMI request
77 +
78
79 Example (to supply PCI based wifi block details):
80
81 diff --git a/Makefile b/Makefile
82 index d4d36c61940b..641a62423fd6 100644
83 --- a/Makefile
84 +++ b/Makefile
85 @@ -1,7 +1,7 @@
86 # SPDX-License-Identifier: GPL-2.0
87 VERSION = 5
88 PATCHLEVEL = 4
89 -SUBLEVEL = 0
90 +SUBLEVEL = 1
91 EXTRAVERSION =
92 NAME = Kleptomaniac Octopus
93
94 diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
95 index 8561498e653c..d84d1417ddb6 100644
96 --- a/arch/powerpc/include/asm/asm-prototypes.h
97 +++ b/arch/powerpc/include/asm/asm-prototypes.h
98 @@ -152,9 +152,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
99 /* Patch sites */
100 extern s32 patch__call_flush_count_cache;
101 extern s32 patch__flush_count_cache_return;
102 +extern s32 patch__flush_link_stack_return;
103 +extern s32 patch__call_kvm_flush_link_stack;
104 extern s32 patch__memset_nocache, patch__memcpy_nocache;
105
106 extern long flush_count_cache;
107 +extern long kvm_flush_link_stack;
108
109 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
110 void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
111 diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
112 index 759597bf0fd8..ccf44c135389 100644
113 --- a/arch/powerpc/include/asm/security_features.h
114 +++ b/arch/powerpc/include/asm/security_features.h
115 @@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
116 // Software required to flush count cache on context switch
117 #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
118
119 +// Software required to flush link stack on context switch
120 +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
121 +
122
123 // Features enabled by default
124 #define SEC_FTR_DEFAULT \
125 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
126 index 6467bdab8d40..3fd3ef352e3f 100644
127 --- a/arch/powerpc/kernel/entry_64.S
128 +++ b/arch/powerpc/kernel/entry_64.S
129 @@ -537,6 +537,7 @@ flush_count_cache:
130 /* Save LR into r9 */
131 mflr r9
132
133 + // Flush the link stack
134 .rept 64
135 bl .+4
136 .endr
137 @@ -546,6 +547,11 @@ flush_count_cache:
138 .balign 32
139 /* Restore LR */
140 1: mtlr r9
141 +
142 + // If we're just flushing the link stack, return here
143 +3: nop
144 + patch_site 3b patch__flush_link_stack_return
145 +
146 li r9,0x7fff
147 mtctr r9
148
149 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
150 index 7cfcb294b11c..bd91dceb7010 100644
151 --- a/arch/powerpc/kernel/security.c
152 +++ b/arch/powerpc/kernel/security.c
153 @@ -24,6 +24,7 @@ enum count_cache_flush_type {
154 COUNT_CACHE_FLUSH_HW = 0x4,
155 };
156 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
157 +static bool link_stack_flush_enabled;
158
159 bool barrier_nospec_enabled;
160 static bool no_nospec;
161 @@ -212,11 +213,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
162
163 if (ccd)
164 seq_buf_printf(&s, "Indirect branch cache disabled");
165 +
166 + if (link_stack_flush_enabled)
167 + seq_buf_printf(&s, ", Software link stack flush");
168 +
169 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
170 seq_buf_printf(&s, "Mitigation: Software count cache flush");
171
172 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
173 seq_buf_printf(&s, " (hardware accelerated)");
174 +
175 + if (link_stack_flush_enabled)
176 + seq_buf_printf(&s, ", Software link stack flush");
177 +
178 } else if (btb_flush_enabled) {
179 seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
180 } else {
181 @@ -377,18 +386,49 @@ static __init int stf_barrier_debugfs_init(void)
182 device_initcall(stf_barrier_debugfs_init);
183 #endif /* CONFIG_DEBUG_FS */
184
185 +static void no_count_cache_flush(void)
186 +{
187 + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
188 + pr_info("count-cache-flush: software flush disabled.\n");
189 +}
190 +
191 static void toggle_count_cache_flush(bool enable)
192 {
193 - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
194 + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
195 + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
196 + enable = false;
197 +
198 + if (!enable) {
199 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
200 - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
201 - pr_info("count-cache-flush: software flush disabled.\n");
202 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
203 + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
204 +#endif
205 + pr_info("link-stack-flush: software flush disabled.\n");
206 + link_stack_flush_enabled = false;
207 + no_count_cache_flush();
208 return;
209 }
210
211 + // This enables the branch from _switch to flush_count_cache
212 patch_branch_site(&patch__call_flush_count_cache,
213 (u64)&flush_count_cache, BRANCH_SET_LINK);
214
215 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
216 + // This enables the branch from guest_exit_cont to kvm_flush_link_stack
217 + patch_branch_site(&patch__call_kvm_flush_link_stack,
218 + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
219 +#endif
220 +
221 + pr_info("link-stack-flush: software flush enabled.\n");
222 + link_stack_flush_enabled = true;
223 +
224 + // If we just need to flush the link stack, patch an early return
225 + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
226 + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
227 + no_count_cache_flush();
228 + return;
229 + }
230 +
231 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
232 count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
233 pr_info("count-cache-flush: full software flush sequence enabled.\n");
234 @@ -407,11 +447,20 @@ void setup_count_cache_flush(void)
235 if (no_spectrev2 || cpu_mitigations_off()) {
236 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
237 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
238 - pr_warn("Spectre v2 mitigations not under software control, can't disable\n");
239 + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
240
241 enable = false;
242 }
243
244 + /*
245 + * There's no firmware feature flag/hypervisor bit to tell us we need to
246 + * flush the link stack on context switch. So we set it here if we see
247 + * either of the Spectre v2 mitigations that aim to protect userspace.
248 + */
249 + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
250 + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
251 + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
252 +
253 toggle_count_cache_flush(enable);
254 }
255
256 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
257 index faebcbb8c4db..0496e66aaa56 100644
258 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
259 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
260 @@ -11,6 +11,7 @@
261 */
262
263 #include <asm/ppc_asm.h>
264 +#include <asm/code-patching-asm.h>
265 #include <asm/kvm_asm.h>
266 #include <asm/reg.h>
267 #include <asm/mmu.h>
268 @@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
269 1:
270 #endif /* CONFIG_KVM_XICS */
271
272 + /*
273 + * Possibly flush the link stack here, before we do a blr in
274 + * guest_exit_short_path.
275 + */
276 +1: nop
277 + patch_site 1b patch__call_kvm_flush_link_stack
278 +
279 /* If we came in through the P9 short path, go back out to C now */
280 lwz r0, STACK_SLOT_SHORT_PATH(r1)
281 cmpwi r0, 0
282 @@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
283 mtlr r0
284 blr
285
286 +.balign 32
287 +.global kvm_flush_link_stack
288 +kvm_flush_link_stack:
289 + /* Save LR into r0 */
290 + mflr r0
291 +
292 + /* Flush the link stack. On Power8 it's up to 32 entries in size. */
293 + .rept 32
294 + bl .+4
295 + .endr
296 +
297 + /* And on Power9 it's up to 64. */
298 +BEGIN_FTR_SECTION
299 + .rept 32
300 + bl .+4
301 + .endr
302 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
303 +
304 + /* Restore LR */
305 + mtlr r0
306 + blr
307 +
308 kvmppc_guest_external:
309 /* External interrupt, first check for host_ipi. If this is
310 * set, we know the host wants us out so let's do it now
311 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
312 index f83ca5aa8b77..f07baf0388bc 100644
313 --- a/arch/x86/entry/entry_32.S
314 +++ b/arch/x86/entry/entry_32.S
315 @@ -172,7 +172,7 @@
316 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
317 .if \no_user_check == 0
318 /* coming from usermode? */
319 - testl $SEGMENT_RPL_MASK, PT_CS(%esp)
320 + testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
321 jz .Lend_\@
322 .endif
323 /* On user-cr3? */
324 @@ -205,64 +205,76 @@
325 #define CS_FROM_ENTRY_STACK (1 << 31)
326 #define CS_FROM_USER_CR3 (1 << 30)
327 #define CS_FROM_KERNEL (1 << 29)
328 +#define CS_FROM_ESPFIX (1 << 28)
329
330 .macro FIXUP_FRAME
331 /*
332 * The high bits of the CS dword (__csh) are used for CS_FROM_*.
333 * Clear them in case hardware didn't do this for us.
334 */
335 - andl $0x0000ffff, 3*4(%esp)
336 + andl $0x0000ffff, 4*4(%esp)
337
338 #ifdef CONFIG_VM86
339 - testl $X86_EFLAGS_VM, 4*4(%esp)
340 + testl $X86_EFLAGS_VM, 5*4(%esp)
341 jnz .Lfrom_usermode_no_fixup_\@
342 #endif
343 - testl $SEGMENT_RPL_MASK, 3*4(%esp)
344 + testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
345 jnz .Lfrom_usermode_no_fixup_\@
346
347 - orl $CS_FROM_KERNEL, 3*4(%esp)
348 + orl $CS_FROM_KERNEL, 4*4(%esp)
349
350 /*
351 * When we're here from kernel mode; the (exception) stack looks like:
352 *
353 - * 5*4(%esp) - <previous context>
354 - * 4*4(%esp) - flags
355 - * 3*4(%esp) - cs
356 - * 2*4(%esp) - ip
357 - * 1*4(%esp) - orig_eax
358 - * 0*4(%esp) - gs / function
359 + * 6*4(%esp) - <previous context>
360 + * 5*4(%esp) - flags
361 + * 4*4(%esp) - cs
362 + * 3*4(%esp) - ip
363 + * 2*4(%esp) - orig_eax
364 + * 1*4(%esp) - gs / function
365 + * 0*4(%esp) - fs
366 *
367 * Lets build a 5 entry IRET frame after that, such that struct pt_regs
368 * is complete and in particular regs->sp is correct. This gives us
369 - * the original 5 enties as gap:
370 + * the original 6 enties as gap:
371 *
372 - * 12*4(%esp) - <previous context>
373 - * 11*4(%esp) - gap / flags
374 - * 10*4(%esp) - gap / cs
375 - * 9*4(%esp) - gap / ip
376 - * 8*4(%esp) - gap / orig_eax
377 - * 7*4(%esp) - gap / gs / function
378 - * 6*4(%esp) - ss
379 - * 5*4(%esp) - sp
380 - * 4*4(%esp) - flags
381 - * 3*4(%esp) - cs
382 - * 2*4(%esp) - ip
383 - * 1*4(%esp) - orig_eax
384 - * 0*4(%esp) - gs / function
385 + * 14*4(%esp) - <previous context>
386 + * 13*4(%esp) - gap / flags
387 + * 12*4(%esp) - gap / cs
388 + * 11*4(%esp) - gap / ip
389 + * 10*4(%esp) - gap / orig_eax
390 + * 9*4(%esp) - gap / gs / function
391 + * 8*4(%esp) - gap / fs
392 + * 7*4(%esp) - ss
393 + * 6*4(%esp) - sp
394 + * 5*4(%esp) - flags
395 + * 4*4(%esp) - cs
396 + * 3*4(%esp) - ip
397 + * 2*4(%esp) - orig_eax
398 + * 1*4(%esp) - gs / function
399 + * 0*4(%esp) - fs
400 */
401
402 pushl %ss # ss
403 pushl %esp # sp (points at ss)
404 - addl $6*4, (%esp) # point sp back at the previous context
405 - pushl 6*4(%esp) # flags
406 - pushl 6*4(%esp) # cs
407 - pushl 6*4(%esp) # ip
408 - pushl 6*4(%esp) # orig_eax
409 - pushl 6*4(%esp) # gs / function
410 + addl $7*4, (%esp) # point sp back at the previous context
411 + pushl 7*4(%esp) # flags
412 + pushl 7*4(%esp) # cs
413 + pushl 7*4(%esp) # ip
414 + pushl 7*4(%esp) # orig_eax
415 + pushl 7*4(%esp) # gs / function
416 + pushl 7*4(%esp) # fs
417 .Lfrom_usermode_no_fixup_\@:
418 .endm
419
420 .macro IRET_FRAME
421 + /*
422 + * We're called with %ds, %es, %fs, and %gs from the interrupted
423 + * frame, so we shouldn't use them. Also, we may be in ESPFIX
424 + * mode and therefore have a nonzero SS base and an offset ESP,
425 + * so any attempt to access the stack needs to use SS. (except for
426 + * accesses through %esp, which automatically use SS.)
427 + */
428 testl $CS_FROM_KERNEL, 1*4(%esp)
429 jz .Lfinished_frame_\@
430
431 @@ -276,31 +288,40 @@
432 movl 5*4(%esp), %eax # (modified) regs->sp
433
434 movl 4*4(%esp), %ecx # flags
435 - movl %ecx, -4(%eax)
436 + movl %ecx, %ss:-1*4(%eax)
437
438 movl 3*4(%esp), %ecx # cs
439 andl $0x0000ffff, %ecx
440 - movl %ecx, -8(%eax)
441 + movl %ecx, %ss:-2*4(%eax)
442
443 movl 2*4(%esp), %ecx # ip
444 - movl %ecx, -12(%eax)
445 + movl %ecx, %ss:-3*4(%eax)
446
447 movl 1*4(%esp), %ecx # eax
448 - movl %ecx, -16(%eax)
449 + movl %ecx, %ss:-4*4(%eax)
450
451 popl %ecx
452 - lea -16(%eax), %esp
453 + lea -4*4(%eax), %esp
454 popl %eax
455 .Lfinished_frame_\@:
456 .endm
457
458 -.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0
459 +.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
460 cld
461 .if \skip_gs == 0
462 PUSH_GS
463 .endif
464 - FIXUP_FRAME
465 pushl %fs
466 +
467 + pushl %eax
468 + movl $(__KERNEL_PERCPU), %eax
469 + movl %eax, %fs
470 +.if \unwind_espfix > 0
471 + UNWIND_ESPFIX_STACK
472 +.endif
473 + popl %eax
474 +
475 + FIXUP_FRAME
476 pushl %es
477 pushl %ds
478 pushl \pt_regs_ax
479 @@ -313,8 +334,6 @@
480 movl $(__USER_DS), %edx
481 movl %edx, %ds
482 movl %edx, %es
483 - movl $(__KERNEL_PERCPU), %edx
484 - movl %edx, %fs
485 .if \skip_gs == 0
486 SET_KERNEL_GS %edx
487 .endif
488 @@ -324,8 +343,8 @@
489 .endif
490 .endm
491
492 -.macro SAVE_ALL_NMI cr3_reg:req
493 - SAVE_ALL
494 +.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
495 + SAVE_ALL unwind_espfix=\unwind_espfix
496
497 BUG_IF_WRONG_CR3
498
499 @@ -357,6 +376,7 @@
500 2: popl %es
501 3: popl %fs
502 POP_GS \pop
503 + IRET_FRAME
504 .pushsection .fixup, "ax"
505 4: movl $0, (%esp)
506 jmp 1b
507 @@ -395,7 +415,8 @@
508
509 .macro CHECK_AND_APPLY_ESPFIX
510 #ifdef CONFIG_X86_ESPFIX32
511 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
512 +#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
513 +#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
514
515 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
516
517 @@ -1075,7 +1096,6 @@ restore_all:
518 /* Restore user state */
519 RESTORE_REGS pop=4 # skip orig_eax/error_code
520 .Lirq_return:
521 - IRET_FRAME
522 /*
523 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
524 * when returning from IPI handler and when returning from
525 @@ -1128,30 +1148,43 @@ ENDPROC(entry_INT80_32)
526 * We can't call C functions using the ESPFIX stack. This code reads
527 * the high word of the segment base from the GDT and swiches to the
528 * normal stack and adjusts ESP with the matching offset.
529 + *
530 + * We might be on user CR3 here, so percpu data is not mapped and we can't
531 + * access the GDT through the percpu segment. Instead, use SGDT to find
532 + * the cpu_entry_area alias of the GDT.
533 */
534 #ifdef CONFIG_X86_ESPFIX32
535 /* fixup the stack */
536 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
537 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
538 + pushl %ecx
539 + subl $2*4, %esp
540 + sgdt (%esp)
541 + movl 2(%esp), %ecx /* GDT address */
542 + /*
543 + * Careful: ECX is a linear pointer, so we need to force base
544 + * zero. %cs is the only known-linear segment we have right now.
545 + */
546 + mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
547 + mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */
548 shl $16, %eax
549 + addl $2*4, %esp
550 + popl %ecx
551 addl %esp, %eax /* the adjusted stack pointer */
552 pushl $__KERNEL_DS
553 pushl %eax
554 lss (%esp), %esp /* switch to the normal stack segment */
555 #endif
556 .endm
557 +
558 .macro UNWIND_ESPFIX_STACK
559 + /* It's safe to clobber %eax, all other regs need to be preserved */
560 #ifdef CONFIG_X86_ESPFIX32
561 movl %ss, %eax
562 /* see if on espfix stack */
563 cmpw $__ESPFIX_SS, %ax
564 - jne 27f
565 - movl $__KERNEL_DS, %eax
566 - movl %eax, %ds
567 - movl %eax, %es
568 + jne .Lno_fixup_\@
569 /* switch to normal stack */
570 FIXUP_ESPFIX_STACK
571 -27:
572 +.Lno_fixup_\@:
573 #endif
574 .endm
575
576 @@ -1341,11 +1374,6 @@ END(spurious_interrupt_bug)
577
578 #ifdef CONFIG_XEN_PV
579 ENTRY(xen_hypervisor_callback)
580 - pushl $-1 /* orig_ax = -1 => not a system call */
581 - SAVE_ALL
582 - ENCODE_FRAME_POINTER
583 - TRACE_IRQS_OFF
584 -
585 /*
586 * Check to see if we got the event in the critical
587 * region in xen_iret_direct, after we've reenabled
588 @@ -1353,16 +1381,17 @@ ENTRY(xen_hypervisor_callback)
589 * iret instruction's behaviour where it delivers a
590 * pending interrupt when enabling interrupts:
591 */
592 - movl PT_EIP(%esp), %eax
593 - cmpl $xen_iret_start_crit, %eax
594 + cmpl $xen_iret_start_crit, (%esp)
595 jb 1f
596 - cmpl $xen_iret_end_crit, %eax
597 + cmpl $xen_iret_end_crit, (%esp)
598 jae 1f
599 -
600 - jmp xen_iret_crit_fixup
601 -
602 -ENTRY(xen_do_upcall)
603 -1: mov %esp, %eax
604 + call xen_iret_crit_fixup
605 +1:
606 + pushl $-1 /* orig_ax = -1 => not a system call */
607 + SAVE_ALL
608 + ENCODE_FRAME_POINTER
609 + TRACE_IRQS_OFF
610 + mov %esp, %eax
611 call xen_evtchn_do_upcall
612 #ifndef CONFIG_PREEMPTION
613 call xen_maybe_preempt_hcall
614 @@ -1449,10 +1478,9 @@ END(page_fault)
615
616 common_exception_read_cr2:
617 /* the function address is in %gs's slot on the stack */
618 - SAVE_ALL switch_stacks=1 skip_gs=1
619 + SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
620
621 ENCODE_FRAME_POINTER
622 - UNWIND_ESPFIX_STACK
623
624 /* fixup %gs */
625 GS_TO_REG %ecx
626 @@ -1474,9 +1502,8 @@ END(common_exception_read_cr2)
627
628 common_exception:
629 /* the function address is in %gs's slot on the stack */
630 - SAVE_ALL switch_stacks=1 skip_gs=1
631 + SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
632 ENCODE_FRAME_POINTER
633 - UNWIND_ESPFIX_STACK
634
635 /* fixup %gs */
636 GS_TO_REG %ecx
637 @@ -1515,6 +1542,10 @@ ENTRY(nmi)
638 ASM_CLAC
639
640 #ifdef CONFIG_X86_ESPFIX32
641 + /*
642 + * ESPFIX_SS is only ever set on the return to user path
643 + * after we've switched to the entry stack.
644 + */
645 pushl %eax
646 movl %ss, %eax
647 cmpw $__ESPFIX_SS, %ax
648 @@ -1550,6 +1581,11 @@ ENTRY(nmi)
649 movl %ebx, %esp
650
651 .Lnmi_return:
652 +#ifdef CONFIG_X86_ESPFIX32
653 + testl $CS_FROM_ESPFIX, PT_CS(%esp)
654 + jnz .Lnmi_from_espfix
655 +#endif
656 +
657 CHECK_AND_APPLY_ESPFIX
658 RESTORE_ALL_NMI cr3_reg=%edi pop=4
659 jmp .Lirq_return
660 @@ -1557,23 +1593,42 @@ ENTRY(nmi)
661 #ifdef CONFIG_X86_ESPFIX32
662 .Lnmi_espfix_stack:
663 /*
664 - * create the pointer to lss back
665 + * Create the pointer to LSS back
666 */
667 pushl %ss
668 pushl %esp
669 addl $4, (%esp)
670 - /* copy the iret frame of 12 bytes */
671 - .rept 3
672 - pushl 16(%esp)
673 - .endr
674 - pushl %eax
675 - SAVE_ALL_NMI cr3_reg=%edi
676 +
677 + /* Copy the (short) IRET frame */
678 + pushl 4*4(%esp) # flags
679 + pushl 4*4(%esp) # cs
680 + pushl 4*4(%esp) # ip
681 +
682 + pushl %eax # orig_ax
683 +
684 + SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
685 ENCODE_FRAME_POINTER
686 - FIXUP_ESPFIX_STACK # %eax == %esp
687 +
688 + /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
689 + xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
690 +
691 xorl %edx, %edx # zero error code
692 - call do_nmi
693 + movl %esp, %eax # pt_regs pointer
694 + jmp .Lnmi_from_sysenter_stack
695 +
696 +.Lnmi_from_espfix:
697 RESTORE_ALL_NMI cr3_reg=%edi
698 - lss 12+4(%esp), %esp # back to espfix stack
699 + /*
700 + * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
701 + * fix up the gap and long frame:
702 + *
703 + * 3 - original frame (exception)
704 + * 2 - ESPFIX block (above)
705 + * 6 - gap (FIXUP_FRAME)
706 + * 5 - long frame (FIXUP_FRAME)
707 + * 1 - orig_ax
708 + */
709 + lss (1+5+6)*4(%esp), %esp # back to espfix stack
710 jmp .Lirq_return
711 #endif
712 END(nmi)
713 diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
714 index 8348f7d69fd5..ea866c7bf31d 100644
715 --- a/arch/x86/include/asm/cpu_entry_area.h
716 +++ b/arch/x86/include/asm/cpu_entry_area.h
717 @@ -78,8 +78,12 @@ struct cpu_entry_area {
718
719 /*
720 * The GDT is just below entry_stack and thus serves (on x86_64) as
721 - * a a read-only guard page.
722 + * a read-only guard page. On 32-bit the GDT must be writeable, so
723 + * it needs an extra guard page.
724 */
725 +#ifdef CONFIG_X86_32
726 + char guard_entry_stack[PAGE_SIZE];
727 +#endif
728 struct entry_stack_page entry_stack_page;
729
730 /*
731 @@ -94,7 +98,6 @@ struct cpu_entry_area {
732 */
733 struct cea_exception_stacks estacks;
734 #endif
735 -#ifdef CONFIG_CPU_SUP_INTEL
736 /*
737 * Per CPU debug store for Intel performance monitoring. Wastes a
738 * full page at the moment.
739 @@ -105,11 +108,13 @@ struct cpu_entry_area {
740 * Reserve enough fixmap PTEs.
741 */
742 struct debug_store_buffers cpu_debug_buffers;
743 -#endif
744 };
745
746 -#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
747 -#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
748 +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
749 +#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
750 +
751 +/* Total size includes the readonly IDT mapping page as well: */
752 +#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
753
754 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
755 DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
756 @@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
757 extern void setup_cpu_entry_areas(void);
758 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
759
760 +/* Single page reserved for the readonly IDT mapping: */
761 #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
762 #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
763
764 #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
765
766 #define CPU_ENTRY_AREA_MAP_SIZE \
767 - (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
768 + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
769
770 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
771
772 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
773 index b0bc0fff5f1f..1636eb8e5a5b 100644
774 --- a/arch/x86/include/asm/pgtable_32_types.h
775 +++ b/arch/x86/include/asm/pgtable_32_types.h
776 @@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
777 * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
778 * to avoid include recursion hell
779 */
780 -#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
781 +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39)
782
783 -#define CPU_ENTRY_AREA_BASE \
784 - ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
785 - & PMD_MASK)
786 +/* The +1 is for the readonly IDT page: */
787 +#define CPU_ENTRY_AREA_BASE \
788 + ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
789
790 #define LDT_BASE_ADDR \
791 ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
792 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
793 index ac3892920419..6669164abadc 100644
794 --- a/arch/x86/include/asm/segment.h
795 +++ b/arch/x86/include/asm/segment.h
796 @@ -31,6 +31,18 @@
797 */
798 #define SEGMENT_RPL_MASK 0x3
799
800 +/*
801 + * When running on Xen PV, the actual privilege level of the kernel is 1,
802 + * not 0. Testing the Requested Privilege Level in a segment selector to
803 + * determine whether the context is user mode or kernel mode with
804 + * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level
805 + * matches the 0x3 mask.
806 + *
807 + * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV
808 + * kernels because privilege level 2 is never used.
809 + */
810 +#define USER_SEGMENT_RPL_MASK 0x2
811 +
812 /* User mode is privilege level 3: */
813 #define USER_RPL 0x3
814
815 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
816 index 4c7b0fa15a19..8bf64899f56a 100644
817 --- a/arch/x86/kernel/cpu/bugs.c
818 +++ b/arch/x86/kernel/cpu/bugs.c
819 @@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
820 static void __init ssb_select_mitigation(void);
821 static void __init l1tf_select_mitigation(void);
822 static void __init mds_select_mitigation(void);
823 +static void __init mds_print_mitigation(void);
824 static void __init taa_select_mitigation(void);
825
826 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
827 @@ -108,6 +109,12 @@ void __init check_bugs(void)
828 mds_select_mitigation();
829 taa_select_mitigation();
830
831 + /*
832 + * As MDS and TAA mitigations are inter-related, print MDS
833 + * mitigation until after TAA mitigation selection is done.
834 + */
835 + mds_print_mitigation();
836 +
837 arch_smt_update();
838
839 #ifdef CONFIG_X86_32
840 @@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void)
841 (mds_nosmt || cpu_mitigations_auto_nosmt()))
842 cpu_smt_disable(false);
843 }
844 +}
845 +
846 +static void __init mds_print_mitigation(void)
847 +{
848 + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
849 + return;
850
851 pr_info("%s\n", mds_strings[mds_mitigation]);
852 }
853 @@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void)
854 return;
855 }
856
857 - /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
858 - if (taa_mitigation == TAA_MITIGATION_OFF)
859 + /*
860 + * TAA mitigation via VERW is turned off if both
861 + * tsx_async_abort=off and mds=off are specified.
862 + */
863 + if (taa_mitigation == TAA_MITIGATION_OFF &&
864 + mds_mitigation == MDS_MITIGATION_OFF)
865 goto out;
866
867 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
868 @@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void)
869 if (taa_nosmt || cpu_mitigations_auto_nosmt())
870 cpu_smt_disable(false);
871
872 + /*
873 + * Update MDS mitigation, if necessary, as the mds_user_clear is
874 + * now enabled for TAA mitigation.
875 + */
876 + if (mds_mitigation == MDS_MITIGATION_OFF &&
877 + boot_cpu_has_bug(X86_BUG_MDS)) {
878 + mds_mitigation = MDS_MITIGATION_FULL;
879 + mds_select_mitigation();
880 + }
881 out:
882 pr_info("%s\n", taa_strings[taa_mitigation]);
883 }
884 diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
885 index 0b8cedb20d6d..d5c9b13bafdf 100644
886 --- a/arch/x86/kernel/doublefault.c
887 +++ b/arch/x86/kernel/doublefault.c
888 @@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = {
889 .ss = __KERNEL_DS,
890 .ds = __USER_DS,
891 .fs = __KERNEL_PERCPU,
892 +#ifndef CONFIG_X86_32_LAZY_GS
893 + .gs = __KERNEL_STACK_CANARY,
894 +#endif
895
896 .__cr3 = __pa_nodebug(swapper_pg_dir),
897 };
898 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
899 index 30f9cb2c0b55..2e6a0676c1f4 100644
900 --- a/arch/x86/kernel/head_32.S
901 +++ b/arch/x86/kernel/head_32.S
902 @@ -571,6 +571,16 @@ ENTRY(initial_page_table)
903 # error "Kernel PMDs should be 1, 2 or 3"
904 # endif
905 .align PAGE_SIZE /* needs to be page-sized too */
906 +
907 +#ifdef CONFIG_PAGE_TABLE_ISOLATION
908 + /*
909 + * PTI needs another page so sync_initial_pagetable() works correctly
910 + * and does not scribble over the data which is placed behind the
911 + * actual initial_page_table. See clone_pgd_range().
912 + */
913 + .fill 1024, 4, 0
914 +#endif
915 +
916 #endif
917
918 .data
919 diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
920 index 752ad11d6868..d9643647a9ce 100644
921 --- a/arch/x86/mm/cpu_entry_area.c
922 +++ b/arch/x86/mm/cpu_entry_area.c
923 @@ -178,7 +178,9 @@ static __init void setup_cpu_entry_area_ptes(void)
924 #ifdef CONFIG_X86_32
925 unsigned long start, end;
926
927 - BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
928 + /* The +1 is for the readonly IDT: */
929 + BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
930 + BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
931 BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
932
933 start = CPU_ENTRY_AREA_BASE;
934 diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
935 index b02a36b2c14f..a42015b305f4 100644
936 --- a/arch/x86/tools/gen-insn-attr-x86.awk
937 +++ b/arch/x86/tools/gen-insn-attr-x86.awk
938 @@ -69,7 +69,7 @@ BEGIN {
939
940 lprefix1_expr = "\\((66|!F3)\\)"
941 lprefix2_expr = "\\(F3\\)"
942 - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
943 + lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
944 lprefix_expr = "\\((66|F2|F3)\\)"
945 max_lprefix = 4
946
947 @@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
948 return add_flags(imm, mod)
949 }
950
951 -/^[0-9a-f]+\:/ {
952 +/^[0-9a-f]+:/ {
953 if (NR == 1)
954 next
955 # get index
956 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
957 index c15db060a242..cd177772fe4d 100644
958 --- a/arch/x86/xen/xen-asm_32.S
959 +++ b/arch/x86/xen/xen-asm_32.S
960 @@ -126,10 +126,9 @@ hyper_iret:
961 .globl xen_iret_start_crit, xen_iret_end_crit
962
963 /*
964 - * This is called by xen_hypervisor_callback in entry.S when it sees
965 + * This is called by xen_hypervisor_callback in entry_32.S when it sees
966 * that the EIP at the time of interrupt was between
967 - * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
968 - * %eax so we can do a more refined determination of what to do.
969 + * xen_iret_start_crit and xen_iret_end_crit.
970 *
971 * The stack format at this point is:
972 * ----------------
973 @@ -138,70 +137,46 @@ hyper_iret:
974 * eflags } outer exception info
975 * cs }
976 * eip }
977 - * ---------------- <- edi (copy dest)
978 - * eax : outer eax if it hasn't been restored
979 * ----------------
980 - * eflags } nested exception info
981 - * cs } (no ss/esp because we're nested
982 - * eip } from the same ring)
983 - * orig_eax }<- esi (copy src)
984 - * - - - - - - - -
985 - * fs }
986 - * es }
987 - * ds } SAVE_ALL state
988 - * eax }
989 - * : :
990 - * ebx }<- esp
991 + * eax : outer eax if it hasn't been restored
992 * ----------------
993 + * eflags }
994 + * cs } nested exception info
995 + * eip }
996 + * return address : (into xen_hypervisor_callback)
997 *
998 - * In order to deliver the nested exception properly, we need to shift
999 - * everything from the return addr up to the error code so it sits
1000 - * just under the outer exception info. This means that when we
1001 - * handle the exception, we do it in the context of the outer
1002 - * exception rather than starting a new one.
1003 + * In order to deliver the nested exception properly, we need to discard the
1004 + * nested exception frame such that when we handle the exception, we do it
1005 + * in the context of the outer exception rather than starting a new one.
1006 *
1007 - * The only caveat is that if the outer eax hasn't been restored yet
1008 - * (ie, it's still on stack), we need to insert its value into the
1009 - * SAVE_ALL state before going on, since it's usermode state which we
1010 - * eventually need to restore.
1011 + * The only caveat is that if the outer eax hasn't been restored yet (i.e.
1012 + * it's still on stack), we need to restore its value here.
1013 */
1014 ENTRY(xen_iret_crit_fixup)
1015 /*
1016 * Paranoia: Make sure we're really coming from kernel space.
1017 * One could imagine a case where userspace jumps into the
1018 * critical range address, but just before the CPU delivers a
1019 - * GP, it decides to deliver an interrupt instead. Unlikely?
1020 - * Definitely. Easy to avoid? Yes. The Intel documents
1021 - * explicitly say that the reported EIP for a bad jump is the
1022 - * jump instruction itself, not the destination, but some
1023 - * virtual environments get this wrong.
1024 + * PF, it decides to deliver an interrupt instead. Unlikely?
1025 + * Definitely. Easy to avoid? Yes.
1026 */
1027 - movl PT_CS(%esp), %ecx
1028 - andl $SEGMENT_RPL_MASK, %ecx
1029 - cmpl $USER_RPL, %ecx
1030 - je 2f
1031 -
1032 - lea PT_ORIG_EAX(%esp), %esi
1033 - lea PT_EFLAGS(%esp), %edi
1034 + testb $2, 2*4(%esp) /* nested CS */
1035 + jnz 2f
1036
1037 /*
1038 * If eip is before iret_restore_end then stack
1039 * hasn't been restored yet.
1040 */
1041 - cmp $iret_restore_end, %eax
1042 + cmpl $iret_restore_end, 1*4(%esp)
1043 jae 1f
1044
1045 - movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
1046 - movl %eax, PT_EAX(%esp)
1047 + movl 4*4(%esp), %eax /* load outer EAX */
1048 + ret $4*4 /* discard nested EIP, CS, and EFLAGS as
1049 + * well as the just restored EAX */
1050
1051 - lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
1052 -
1053 - /* set up the copy */
1054 -1: std
1055 - mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
1056 - rep movsl
1057 - cld
1058 -
1059 - lea 4(%edi), %esp /* point esp to new frame */
1060 -2: jmp xen_do_upcall
1061 +1:
1062 + ret $3*4 /* discard nested EIP, CS, and EFLAGS */
1063
1064 +2:
1065 + ret
1066 +END(xen_iret_crit_fixup)
1067 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1068 index 19e75999bb15..57532465fb83 100644
1069 --- a/drivers/block/nbd.c
1070 +++ b/drivers/block/nbd.c
1071 @@ -1032,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1072 sockfd_put(sock);
1073 return -ENOMEM;
1074 }
1075 +
1076 + config->socks = socks;
1077 +
1078 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
1079 if (!nsock) {
1080 sockfd_put(sock);
1081 return -ENOMEM;
1082 }
1083
1084 - config->socks = socks;
1085 -
1086 nsock->fallback_index = -1;
1087 nsock->dead = false;
1088 mutex_init(&nsock->tx_lock);
1089 diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
1090 index fe2e307009f4..cf4a56095817 100644
1091 --- a/drivers/bluetooth/hci_bcsp.c
1092 +++ b/drivers/bluetooth/hci_bcsp.c
1093 @@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1094 if (*ptr == 0xc0) {
1095 BT_ERR("Short BCSP packet");
1096 kfree_skb(bcsp->rx_skb);
1097 + bcsp->rx_skb = NULL;
1098 bcsp->rx_state = BCSP_W4_PKT_START;
1099 bcsp->rx_count = 0;
1100 } else
1101 @@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1102 bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
1103 BT_ERR("Error in BCSP hdr checksum");
1104 kfree_skb(bcsp->rx_skb);
1105 + bcsp->rx_skb = NULL;
1106 bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1107 bcsp->rx_count = 0;
1108 continue;
1109 @@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1110 bscp_get_crc(bcsp));
1111
1112 kfree_skb(bcsp->rx_skb);
1113 + bcsp->rx_skb = NULL;
1114 bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1115 bcsp->rx_count = 0;
1116 continue;
1117 diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
1118 index 285706618f8a..d9a4c6c691e0 100644
1119 --- a/drivers/bluetooth/hci_ll.c
1120 +++ b/drivers/bluetooth/hci_ll.c
1121 @@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
1122
1123 serdev_device_set_flow_control(serdev, true);
1124
1125 - if (hu->oper_speed)
1126 - speed = hu->oper_speed;
1127 - else if (hu->proto->oper_speed)
1128 - speed = hu->proto->oper_speed;
1129 - else
1130 - speed = 0;
1131 -
1132 do {
1133 /* Reset the Bluetooth device */
1134 gpiod_set_value_cansleep(lldev->enable_gpio, 0);
1135 @@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
1136 return err;
1137 }
1138
1139 - if (speed) {
1140 - __le32 speed_le = cpu_to_le32(speed);
1141 - struct sk_buff *skb;
1142 -
1143 - skb = __hci_cmd_sync(hu->hdev,
1144 - HCI_VS_UPDATE_UART_HCI_BAUDRATE,
1145 - sizeof(speed_le), &speed_le,
1146 - HCI_INIT_TIMEOUT);
1147 - if (!IS_ERR(skb)) {
1148 - kfree_skb(skb);
1149 - serdev_device_set_baudrate(serdev, speed);
1150 - }
1151 - }
1152 -
1153 err = download_firmware(lldev);
1154 if (!err)
1155 break;
1156 @@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
1157 }
1158
1159 /* Operational speed if any */
1160 + if (hu->oper_speed)
1161 + speed = hu->oper_speed;
1162 + else if (hu->proto->oper_speed)
1163 + speed = hu->proto->oper_speed;
1164 + else
1165 + speed = 0;
1166 +
1167 + if (speed) {
1168 + __le32 speed_le = cpu_to_le32(speed);
1169 + struct sk_buff *skb;
1170
1171 + skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
1172 + sizeof(speed_le), &speed_le,
1173 + HCI_INIT_TIMEOUT);
1174 + if (!IS_ERR(skb)) {
1175 + kfree_skb(skb);
1176 + serdev_device_set_baudrate(serdev, speed);
1177 + }
1178 + }
1179
1180 return 0;
1181 }
1182 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1183 index 48a224a6b178..bc19d6c16aaa 100644
1184 --- a/drivers/cpufreq/cpufreq.c
1185 +++ b/drivers/cpufreq/cpufreq.c
1186 @@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1187 struct freq_attr *fattr = to_attr(attr);
1188 ssize_t ret;
1189
1190 + if (!fattr->show)
1191 + return -EIO;
1192 +
1193 down_read(&policy->rwsem);
1194 ret = fattr->show(policy, buf);
1195 up_read(&policy->rwsem);
1196 @@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
1197 struct freq_attr *fattr = to_attr(attr);
1198 ssize_t ret = -EINVAL;
1199
1200 + if (!fattr->store)
1201 + return -EIO;
1202 +
1203 /*
1204 * cpus_read_trylock() is used here to work around a circular lock
1205 * dependency problem with respect to the cpufreq_register_driver().
1206 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1207 index f87f6495652f..eb9782fc93fe 100644
1208 --- a/drivers/md/dm-crypt.c
1209 +++ b/drivers/md/dm-crypt.c
1210 @@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1211 }
1212
1213 ret = -ENOMEM;
1214 - cc->io_queue = alloc_workqueue("kcryptd_io/%s",
1215 - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1216 - 1, devname);
1217 + cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
1218 if (!cc->io_queue) {
1219 ti->error = "Couldn't create kcryptd io queue";
1220 goto bad;
1221 }
1222
1223 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1224 - cc->crypt_queue = alloc_workqueue("kcryptd/%s",
1225 - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1226 + cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1227 1, devname);
1228 else
1229 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
1230 - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1231 + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1232 num_online_cpus(), devname);
1233 if (!cc->crypt_queue) {
1234 ti->error = "Couldn't create kcryptd queue";
1235 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1236 index 299c7b1c9718..8a62c920bb65 100644
1237 --- a/drivers/md/raid10.c
1238 +++ b/drivers/md/raid10.c
1239 @@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
1240
1241 out_free_pages:
1242 while (--j >= 0)
1243 - resync_free_pages(&rps[j * 2]);
1244 + resync_free_pages(&rps[j]);
1245
1246 j = 0;
1247 out_free_bio:
1248 diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1249 index 003319d7816d..31f78d6a05a4 100644
1250 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1251 +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1252 @@ -796,7 +796,11 @@ static int vivid_thread_vid_cap(void *data)
1253 if (kthread_should_stop())
1254 break;
1255
1256 - mutex_lock(&dev->mutex);
1257 + if (!mutex_trylock(&dev->mutex)) {
1258 + schedule_timeout_uninterruptible(1);
1259 + continue;
1260 + }
1261 +
1262 cur_jiffies = jiffies;
1263 if (dev->cap_seq_resync) {
1264 dev->jiffies_vid_cap = cur_jiffies;
1265 @@ -956,8 +960,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1266
1267 /* shutdown control thread */
1268 vivid_grab_controls(dev, false);
1269 - mutex_unlock(&dev->mutex);
1270 kthread_stop(dev->kthread_vid_cap);
1271 dev->kthread_vid_cap = NULL;
1272 - mutex_lock(&dev->mutex);
1273 }
1274 diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1275 index ce5bcda2348c..1e165a6a2207 100644
1276 --- a/drivers/media/platform/vivid/vivid-kthread-out.c
1277 +++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1278 @@ -143,7 +143,11 @@ static int vivid_thread_vid_out(void *data)
1279 if (kthread_should_stop())
1280 break;
1281
1282 - mutex_lock(&dev->mutex);
1283 + if (!mutex_trylock(&dev->mutex)) {
1284 + schedule_timeout_uninterruptible(1);
1285 + continue;
1286 + }
1287 +
1288 cur_jiffies = jiffies;
1289 if (dev->out_seq_resync) {
1290 dev->jiffies_vid_out = cur_jiffies;
1291 @@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1292
1293 /* shutdown control thread */
1294 vivid_grab_controls(dev, false);
1295 - mutex_unlock(&dev->mutex);
1296 kthread_stop(dev->kthread_vid_out);
1297 dev->kthread_vid_out = NULL;
1298 - mutex_lock(&dev->mutex);
1299 }
1300 diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
1301 index 9acc709b0740..2b7522e16efc 100644
1302 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c
1303 +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
1304 @@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
1305 if (kthread_should_stop())
1306 break;
1307
1308 - mutex_lock(&dev->mutex);
1309 + if (!mutex_trylock(&dev->mutex)) {
1310 + schedule_timeout_uninterruptible(1);
1311 + continue;
1312 + }
1313 +
1314 cur_jiffies = jiffies;
1315 if (dev->sdr_cap_seq_resync) {
1316 dev->jiffies_sdr_cap = cur_jiffies;
1317 @@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
1318 }
1319
1320 /* shutdown control thread */
1321 - mutex_unlock(&dev->mutex);
1322 kthread_stop(dev->kthread_sdr_cap);
1323 dev->kthread_sdr_cap = NULL;
1324 - mutex_lock(&dev->mutex);
1325 }
1326
1327 static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
1328 diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
1329 index 8cbaa0c998ed..2d030732feac 100644
1330 --- a/drivers/media/platform/vivid/vivid-vid-cap.c
1331 +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
1332 @@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
1333 if (vb2_is_streaming(&dev->vb_vid_out_q))
1334 dev->can_loop_video = vivid_vid_can_loop(dev);
1335
1336 - if (dev->kthread_vid_cap)
1337 - return 0;
1338 -
1339 dev->vid_cap_seq_count = 0;
1340 dprintk(dev, 1, "%s\n", __func__);
1341 for (i = 0; i < VIDEO_MAX_FRAME; i++)
1342 diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
1343 index 148b663a6075..a0364ac497f9 100644
1344 --- a/drivers/media/platform/vivid/vivid-vid-out.c
1345 +++ b/drivers/media/platform/vivid/vivid-vid-out.c
1346 @@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
1347 if (vb2_is_streaming(&dev->vb_vid_cap_q))
1348 dev->can_loop_video = vivid_vid_can_loop(dev);
1349
1350 - if (dev->kthread_vid_out)
1351 - return 0;
1352 -
1353 dev->vid_out_seq_count = 0;
1354 dprintk(dev, 1, "%s\n", __func__);
1355 if (dev->start_streaming_error) {
1356 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
1357 index 37a850421fbb..c683a244b9fa 100644
1358 --- a/drivers/media/rc/imon.c
1359 +++ b/drivers/media/rc/imon.c
1360 @@ -1598,8 +1598,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
1361 spin_unlock_irqrestore(&ictx->kc_lock, flags);
1362
1363 /* send touchscreen events through input subsystem if touchpad data */
1364 - if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
1365 - buf[7] == 0x86) {
1366 + if (ictx->touch && len == 8 && buf[7] == 0x86) {
1367 imon_touch_event(ictx, buf);
1368 return;
1369
1370 diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
1371 index 3fc9829a9233..f9616158bcf4 100644
1372 --- a/drivers/media/rc/mceusb.c
1373 +++ b/drivers/media/rc/mceusb.c
1374 @@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
1375 datasize = 4;
1376 break;
1377 case MCE_CMD_G_REVISION:
1378 - datasize = 2;
1379 + datasize = 4;
1380 break;
1381 case MCE_RSP_EQWAKESUPPORT:
1382 case MCE_RSP_GETWAKESOURCE:
1383 @@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1384 char *inout;
1385 u8 cmd, subcmd, *data;
1386 struct device *dev = ir->dev;
1387 - int start, skip = 0;
1388 u32 carrier, period;
1389
1390 - /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
1391 - if (ir->flags.microsoft_gen1 && !out && !offset)
1392 - skip = 2;
1393 -
1394 - if (len <= skip)
1395 + if (offset < 0 || offset >= buf_len)
1396 return;
1397
1398 dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
1399 @@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1400
1401 inout = out ? "Request" : "Got";
1402
1403 - start = offset + skip;
1404 - cmd = buf[start] & 0xff;
1405 - subcmd = buf[start + 1] & 0xff;
1406 - data = buf + start + 2;
1407 + cmd = buf[offset];
1408 + subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
1409 + data = &buf[offset] + 2;
1410 +
1411 + /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
1412 + if (ir->flags.microsoft_gen1 && !out && !offset) {
1413 + dev_dbg(dev, "MCE gen 1 header");
1414 + return;
1415 + }
1416 +
1417 + /* Trace IR data header or trailer */
1418 + if (cmd != MCE_CMD_PORT_IR &&
1419 + (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
1420 + if (cmd == MCE_IRDATA_TRAILER)
1421 + dev_dbg(dev, "End of raw IR data");
1422 + else
1423 + dev_dbg(dev, "Raw IR data, %d pulse/space samples",
1424 + cmd & MCE_PACKET_LENGTH_MASK);
1425 + return;
1426 + }
1427 +
1428 + /* Unexpected end of buffer? */
1429 + if (offset + len > buf_len)
1430 + return;
1431
1432 + /* Decode MCE command/response */
1433 switch (cmd) {
1434 case MCE_CMD_NULL:
1435 if (subcmd == MCE_CMD_NULL)
1436 @@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1437 dev_dbg(dev, "Get hw/sw rev?");
1438 else
1439 dev_dbg(dev, "hw/sw rev %*ph",
1440 - 4, &buf[start + 2]);
1441 + 4, &buf[offset + 2]);
1442 break;
1443 case MCE_CMD_RESUME:
1444 dev_dbg(dev, "Device resume requested");
1445 @@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1446 default:
1447 break;
1448 }
1449 -
1450 - if (cmd == MCE_IRDATA_TRAILER)
1451 - dev_dbg(dev, "End of raw IR data");
1452 - else if ((cmd != MCE_CMD_PORT_IR) &&
1453 - ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
1454 - dev_dbg(dev, "Raw IR data, %d pulse/space samples",
1455 - cmd & MCE_PACKET_LENGTH_MASK);
1456 #endif
1457 }
1458
1459 @@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
1460 }
1461
1462 /*
1463 + * Handle PORT_SYS/IR command response received from the MCE device.
1464 + *
1465 + * Assumes single response with all its data (not truncated)
1466 + * in buf_in[]. The response itself determines its total length
1467 + * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
1468 + *
1469 * We don't do anything but print debug spew for many of the command bits
1470 * we receive from the hardware, but some of them are useful information
1471 * we want to store so that we can use them.
1472 */
1473 -static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1474 +static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
1475 {
1476 + u8 cmd = buf_in[0];
1477 + u8 subcmd = buf_in[1];
1478 + u8 *hi = &buf_in[2]; /* read only when required */
1479 + u8 *lo = &buf_in[3]; /* read only when required */
1480 struct ir_raw_event rawir = {};
1481 - u8 hi = ir->buf_in[index + 1] & 0xff;
1482 - u8 lo = ir->buf_in[index + 2] & 0xff;
1483 u32 carrier_cycles;
1484 u32 cycles_fix;
1485
1486 - switch (ir->buf_in[index]) {
1487 - /* the one and only 5-byte return value command */
1488 - case MCE_RSP_GETPORTSTATUS:
1489 - if ((ir->buf_in[index + 4] & 0xff) == 0x00)
1490 - ir->txports_cabled |= 1 << hi;
1491 - break;
1492 + if (cmd == MCE_CMD_PORT_SYS) {
1493 + switch (subcmd) {
1494 + /* the one and only 5-byte return value command */
1495 + case MCE_RSP_GETPORTSTATUS:
1496 + if (buf_in[5] == 0)
1497 + ir->txports_cabled |= 1 << *hi;
1498 + break;
1499 +
1500 + /* 1-byte return value commands */
1501 + case MCE_RSP_EQEMVER:
1502 + ir->emver = *hi;
1503 + break;
1504 +
1505 + /* No return value commands */
1506 + case MCE_RSP_CMD_ILLEGAL:
1507 + ir->need_reset = true;
1508 + break;
1509 +
1510 + default:
1511 + break;
1512 + }
1513 +
1514 + return;
1515 + }
1516
1517 + if (cmd != MCE_CMD_PORT_IR)
1518 + return;
1519 +
1520 + switch (subcmd) {
1521 /* 2-byte return value commands */
1522 case MCE_RSP_EQIRTIMEOUT:
1523 - ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
1524 + ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
1525 break;
1526 case MCE_RSP_EQIRNUMPORTS:
1527 - ir->num_txports = hi;
1528 - ir->num_rxports = lo;
1529 + ir->num_txports = *hi;
1530 + ir->num_rxports = *lo;
1531 break;
1532 case MCE_RSP_EQIRRXCFCNT:
1533 /*
1534 @@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1535 */
1536 if (ir->carrier_report_enabled && ir->learning_active &&
1537 ir->pulse_tunit > 0) {
1538 - carrier_cycles = (hi << 8 | lo);
1539 + carrier_cycles = (*hi << 8 | *lo);
1540 /*
1541 * Adjust carrier cycle count by adding
1542 * 1 missed count per pulse "on"
1543 @@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1544 break;
1545
1546 /* 1-byte return value commands */
1547 - case MCE_RSP_EQEMVER:
1548 - ir->emver = hi;
1549 - break;
1550 case MCE_RSP_EQIRTXPORTS:
1551 - ir->tx_mask = hi;
1552 + ir->tx_mask = *hi;
1553 break;
1554 case MCE_RSP_EQIRRXPORTEN:
1555 - ir->learning_active = ((hi & 0x02) == 0x02);
1556 - if (ir->rxports_active != hi) {
1557 + ir->learning_active = ((*hi & 0x02) == 0x02);
1558 + if (ir->rxports_active != *hi) {
1559 dev_info(ir->dev, "%s-range (0x%x) receiver active",
1560 - ir->learning_active ? "short" : "long", hi);
1561 - ir->rxports_active = hi;
1562 + ir->learning_active ? "short" : "long", *hi);
1563 + ir->rxports_active = *hi;
1564 }
1565 break;
1566 +
1567 + /* No return value commands */
1568 case MCE_RSP_CMD_ILLEGAL:
1569 case MCE_RSP_TX_TIMEOUT:
1570 ir->need_reset = true;
1571 break;
1572 +
1573 default:
1574 break;
1575 }
1576 @@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1577 ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
1578 mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
1579 ir->rem + 2, false);
1580 - mceusb_handle_command(ir, i);
1581 + if (i + ir->rem < buf_len)
1582 + mceusb_handle_command(ir, &ir->buf_in[i - 1]);
1583 ir->parser_state = CMD_DATA;
1584 break;
1585 case PARSE_IRDATA:
1586 @@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1587 ir->rem--;
1588 break;
1589 case CMD_HEADER:
1590 - /* decode mce packets of the form (84),AA,BB,CC,DD */
1591 - /* IR data packets can span USB messages - rem */
1592 ir->cmd = ir->buf_in[i];
1593 if ((ir->cmd == MCE_CMD_PORT_IR) ||
1594 ((ir->cmd & MCE_PORT_MASK) !=
1595 MCE_COMMAND_IRDATA)) {
1596 + /*
1597 + * got PORT_SYS, PORT_IR, or unknown
1598 + * command response prefix
1599 + */
1600 ir->parser_state = SUBCMD;
1601 continue;
1602 }
1603 + /*
1604 + * got IR data prefix (0x80 + num_bytes)
1605 + * decode MCE packets of the form {0x83, AA, BB, CC}
1606 + * IR data packets can span USB messages
1607 + */
1608 ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
1609 mceusb_dev_printdata(ir, ir->buf_in, buf_len,
1610 i, ir->rem + 1, false);
1611 @@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1612 if (ir->parser_state != CMD_HEADER && !ir->rem)
1613 ir->parser_state = CMD_HEADER;
1614 }
1615 +
1616 + /*
1617 + * Accept IR data spanning multiple rx buffers.
1618 + * Reject MCE command response spanning multiple rx buffers.
1619 + */
1620 + if (ir->parser_state != PARSE_IRDATA || !ir->rem)
1621 + ir->parser_state = CMD_HEADER;
1622 +
1623 if (event) {
1624 dev_dbg(ir->dev, "processed IR data");
1625 ir_raw_event_handle(ir->rc);
1626 diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1627 index 1826ff825c2e..1a801dc286f8 100644
1628 --- a/drivers/media/usb/b2c2/flexcop-usb.c
1629 +++ b/drivers/media/usb/b2c2/flexcop-usb.c
1630 @@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
1631 struct flexcop_device *fc = NULL;
1632 int ret;
1633
1634 + if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1635 + return -ENODEV;
1636 +
1637 if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
1638 err("out of memory\n");
1639 return -ENOMEM;
1640 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
1641 index f02fa0a67aa4..fac19ec46089 100644
1642 --- a/drivers/media/usb/dvb-usb/cxusb.c
1643 +++ b/drivers/media/usb/dvb-usb/cxusb.c
1644 @@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
1645 {
1646 u8 ircode[4];
1647
1648 - cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
1649 + if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
1650 + return 0;
1651
1652 if (ircode[2] || ircode[3])
1653 rc_keydown(d->rc_dev, RC_PROTO_NEC,
1654 diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
1655 index cdc66adda755..93d36aab824f 100644
1656 --- a/drivers/media/usb/usbvision/usbvision-video.c
1657 +++ b/drivers/media/usb/usbvision/usbvision-video.c
1658 @@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
1659 if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1660 return -ERESTARTSYS;
1661
1662 + if (usbvision->remove_pending) {
1663 + err_code = -ENODEV;
1664 + goto unlock;
1665 + }
1666 if (usbvision->user) {
1667 err_code = -EBUSY;
1668 } else {
1669 @@ -377,6 +381,7 @@ unlock:
1670 static int usbvision_v4l2_close(struct file *file)
1671 {
1672 struct usb_usbvision *usbvision = video_drvdata(file);
1673 + int r;
1674
1675 PDEBUG(DBG_IO, "close");
1676
1677 @@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
1678 usbvision_scratch_free(usbvision);
1679
1680 usbvision->user--;
1681 + r = usbvision->remove_pending;
1682 mutex_unlock(&usbvision->v4l2_lock);
1683
1684 - if (usbvision->remove_pending) {
1685 + if (r) {
1686 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1687 usbvision_release(usbvision);
1688 return 0;
1689 @@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
1690 {
1691 struct usb_usbvision *usbvision = video_drvdata(file);
1692
1693 + if (!usbvision->dev)
1694 + return -ENODEV;
1695 +
1696 strscpy(vc->driver, "USBVision", sizeof(vc->driver));
1697 strscpy(vc->card,
1698 usbvision_device_data[usbvision->dev_model].model_string,
1699 @@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
1700
1701 if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1702 return -ERESTARTSYS;
1703 +
1704 + if (usbvision->remove_pending) {
1705 + err_code = -ENODEV;
1706 + goto out;
1707 + }
1708 err_code = v4l2_fh_open(file);
1709 if (err_code)
1710 goto out;
1711 @@ -1093,21 +1107,24 @@ out:
1712 static int usbvision_radio_close(struct file *file)
1713 {
1714 struct usb_usbvision *usbvision = video_drvdata(file);
1715 + int r;
1716
1717 PDEBUG(DBG_IO, "");
1718
1719 mutex_lock(&usbvision->v4l2_lock);
1720 /* Set packet size to 0 */
1721 usbvision->iface_alt = 0;
1722 - usb_set_interface(usbvision->dev, usbvision->iface,
1723 - usbvision->iface_alt);
1724 + if (usbvision->dev)
1725 + usb_set_interface(usbvision->dev, usbvision->iface,
1726 + usbvision->iface_alt);
1727
1728 usbvision_audio_off(usbvision);
1729 usbvision->radio = 0;
1730 usbvision->user--;
1731 + r = usbvision->remove_pending;
1732 mutex_unlock(&usbvision->v4l2_lock);
1733
1734 - if (usbvision->remove_pending) {
1735 + if (r) {
1736 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1737 v4l2_fh_release(file);
1738 usbvision_release(usbvision);
1739 @@ -1539,6 +1556,7 @@ err_usb:
1740 static void usbvision_disconnect(struct usb_interface *intf)
1741 {
1742 struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
1743 + int u;
1744
1745 PDEBUG(DBG_PROBE, "");
1746
1747 @@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
1748 v4l2_device_disconnect(&usbvision->v4l2_dev);
1749 usbvision_i2c_unregister(usbvision);
1750 usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
1751 + u = usbvision->user;
1752
1753 usb_put_dev(usbvision->dev);
1754 usbvision->dev = NULL; /* USB device is no more */
1755
1756 mutex_unlock(&usbvision->v4l2_lock);
1757
1758 - if (usbvision->user) {
1759 + if (u) {
1760 printk(KERN_INFO "%s: In use, disconnect pending\n",
1761 __func__);
1762 wake_up_interruptible(&usbvision->wait_frame);
1763 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
1764 index 66ee168ddc7e..428235ca2635 100644
1765 --- a/drivers/media/usb/uvc/uvc_driver.c
1766 +++ b/drivers/media/usb/uvc/uvc_driver.c
1767 @@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
1768 sizeof(dev->name) - len);
1769 }
1770
1771 + /* Initialize the media device. */
1772 +#ifdef CONFIG_MEDIA_CONTROLLER
1773 + dev->mdev.dev = &intf->dev;
1774 + strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1775 + if (udev->serial)
1776 + strscpy(dev->mdev.serial, udev->serial,
1777 + sizeof(dev->mdev.serial));
1778 + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
1779 + dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1780 + media_device_init(&dev->mdev);
1781 +
1782 + dev->vdev.mdev = &dev->mdev;
1783 +#endif
1784 +
1785 /* Parse the Video Class control descriptor. */
1786 if (uvc_parse_control(dev) < 0) {
1787 uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
1788 @@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
1789 "linux-uvc-devel mailing list.\n");
1790 }
1791
1792 - /* Initialize the media device and register the V4L2 device. */
1793 -#ifdef CONFIG_MEDIA_CONTROLLER
1794 - dev->mdev.dev = &intf->dev;
1795 - strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1796 - if (udev->serial)
1797 - strscpy(dev->mdev.serial, udev->serial,
1798 - sizeof(dev->mdev.serial));
1799 - usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
1800 - dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1801 - media_device_init(&dev->mdev);
1802 -
1803 - dev->vdev.mdev = &dev->mdev;
1804 -#endif
1805 + /* Register the V4L2 device. */
1806 if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
1807 goto error;
1808
1809 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
1810 index a0b4d265c6eb..347bb92e4130 100644
1811 --- a/drivers/net/wireless/ath/ath10k/pci.c
1812 +++ b/drivers/net/wireless/ath/ath10k/pci.c
1813 @@ -3490,7 +3490,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1814 struct ath10k_pci *ar_pci;
1815 enum ath10k_hw_rev hw_rev;
1816 struct ath10k_bus_params bus_params = {};
1817 - bool pci_ps;
1818 + bool pci_ps, is_qca988x = false;
1819 int (*pci_soft_reset)(struct ath10k *ar);
1820 int (*pci_hard_reset)(struct ath10k *ar);
1821 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
1822 @@ -3500,6 +3500,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1823 case QCA988X_2_0_DEVICE_ID:
1824 hw_rev = ATH10K_HW_QCA988X;
1825 pci_ps = false;
1826 + is_qca988x = true;
1827 pci_soft_reset = ath10k_pci_warm_reset;
1828 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
1829 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1830 @@ -3619,25 +3620,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1831 goto err_deinit_irq;
1832 }
1833
1834 + bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1835 + bus_params.link_can_suspend = true;
1836 + /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
1837 + * fall off the bus during chip_reset. These chips have the same pci
1838 + * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
1839 + */
1840 + if (is_qca988x) {
1841 + bus_params.chip_id =
1842 + ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
1843 + if (bus_params.chip_id != 0xffffffff) {
1844 + if (!ath10k_pci_chip_is_supported(pdev->device,
1845 + bus_params.chip_id))
1846 + goto err_unsupported;
1847 + }
1848 + }
1849 +
1850 ret = ath10k_pci_chip_reset(ar);
1851 if (ret) {
1852 ath10k_err(ar, "failed to reset chip: %d\n", ret);
1853 goto err_free_irq;
1854 }
1855
1856 - bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1857 - bus_params.link_can_suspend = true;
1858 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
1859 - if (bus_params.chip_id == 0xffffffff) {
1860 - ath10k_err(ar, "failed to get chip id\n");
1861 - goto err_free_irq;
1862 - }
1863 + if (bus_params.chip_id == 0xffffffff)
1864 + goto err_unsupported;
1865
1866 - if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
1867 - ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
1868 - pdev->device, bus_params.chip_id);
1869 + if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
1870 goto err_free_irq;
1871 - }
1872
1873 ret = ath10k_core_register(ar, &bus_params);
1874 if (ret) {
1875 @@ -3647,6 +3657,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1876
1877 return 0;
1878
1879 +err_unsupported:
1880 + ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
1881 + pdev->device, bus_params.chip_id);
1882 +
1883 err_free_irq:
1884 ath10k_pci_free_irq(ar);
1885 ath10k_pci_rx_retry_sync(ar);
1886 diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
1887 index 3b63b6257c43..545ac1f06997 100644
1888 --- a/drivers/net/wireless/ath/ath10k/qmi.c
1889 +++ b/drivers/net/wireless/ath/ath10k/qmi.c
1890 @@ -581,22 +581,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
1891 {
1892 struct wlfw_host_cap_resp_msg_v01 resp = {};
1893 struct wlfw_host_cap_req_msg_v01 req = {};
1894 + struct qmi_elem_info *req_ei;
1895 struct ath10k *ar = qmi->ar;
1896 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1897 struct qmi_txn txn;
1898 int ret;
1899
1900 req.daemon_support_valid = 1;
1901 req.daemon_support = 0;
1902
1903 - ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
1904 - wlfw_host_cap_resp_msg_v01_ei, &resp);
1905 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
1906 + &resp);
1907 if (ret < 0)
1908 goto out;
1909
1910 + if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
1911 + req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
1912 + else
1913 + req_ei = wlfw_host_cap_req_msg_v01_ei;
1914 +
1915 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
1916 QMI_WLFW_HOST_CAP_REQ_V01,
1917 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
1918 - wlfw_host_cap_req_msg_v01_ei, &req);
1919 + req_ei, &req);
1920 if (ret < 0) {
1921 qmi_txn_cancel(&txn);
1922 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
1923 diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1924 index 1fe05c6218c3..86fcf4e1de5f 100644
1925 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1926 +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1927 @@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
1928 {}
1929 };
1930
1931 +struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
1932 + {
1933 + .data_type = QMI_OPT_FLAG,
1934 + .elem_len = 1,
1935 + .elem_size = sizeof(u8),
1936 + .array_type = NO_ARRAY,
1937 + .tlv_type = 0x10,
1938 + .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1939 + daemon_support_valid),
1940 + },
1941 + {
1942 + .data_type = QMI_UNSIGNED_1_BYTE,
1943 + .elem_len = 1,
1944 + .elem_size = sizeof(u8),
1945 + .array_type = NO_ARRAY,
1946 + .tlv_type = 0x10,
1947 + .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1948 + daemon_support),
1949 + },
1950 + {}
1951 +};
1952 +
1953 struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
1954 {
1955 .data_type = QMI_STRUCT,
1956 diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1957 index bca1186e1560..4d107e1364a8 100644
1958 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1959 +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1960 @@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
1961
1962 #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
1963 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
1964 +extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
1965
1966 struct wlfw_host_cap_resp_msg_v01 {
1967 struct qmi_response_type_v01 resp;
1968 diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
1969 index b491361e6ed4..fc15a0037f0e 100644
1970 --- a/drivers/net/wireless/ath/ath10k/snoc.c
1971 +++ b/drivers/net/wireless/ath/ath10k/snoc.c
1972 @@ -1261,6 +1261,15 @@ out:
1973 return ret;
1974 }
1975
1976 +static void ath10k_snoc_quirks_init(struct ath10k *ar)
1977 +{
1978 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1979 + struct device *dev = &ar_snoc->dev->dev;
1980 +
1981 + if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1982 + set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1983 +}
1984 +
1985 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1986 {
1987 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1988 @@ -1678,6 +1687,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
1989 ar->ce_priv = &ar_snoc->ce;
1990 msa_size = drv_data->msa_size;
1991
1992 + ath10k_snoc_quirks_init(ar);
1993 +
1994 ret = ath10k_snoc_resource_init(ar);
1995 if (ret) {
1996 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1997 diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
1998 index d62f53501fbb..9db823e46314 100644
1999 --- a/drivers/net/wireless/ath/ath10k/snoc.h
2000 +++ b/drivers/net/wireless/ath/ath10k/snoc.h
2001 @@ -63,6 +63,7 @@ enum ath10k_snoc_flags {
2002 ATH10K_SNOC_FLAG_REGISTERED,
2003 ATH10K_SNOC_FLAG_UNREGISTERING,
2004 ATH10K_SNOC_FLAG_RECOVERY,
2005 + ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
2006 };
2007
2008 struct ath10k_snoc {
2009 diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
2010 index e1420f67f776..9ebe74ee4aef 100644
2011 --- a/drivers/net/wireless/ath/ath10k/usb.c
2012 +++ b/drivers/net/wireless/ath/ath10k/usb.c
2013 @@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
2014 struct ath10k_urb_context *urb_context = NULL;
2015 unsigned long flags;
2016
2017 + /* bail if this pipe is not initialized */
2018 + if (!pipe->ar_usb)
2019 + return NULL;
2020 +
2021 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
2022 if (!list_empty(&pipe->urb_list_head)) {
2023 urb_context = list_first_entry(&pipe->urb_list_head,
2024 @@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
2025 {
2026 unsigned long flags;
2027
2028 + /* bail if this pipe is not initialized */
2029 + if (!pipe->ar_usb)
2030 + return;
2031 +
2032 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
2033
2034 pipe->urb_cnt++;
2035 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2036 index 2b29bf4730f6..b4885a700296 100644
2037 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2038 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2039 @@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
2040
2041 static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
2042 {
2043 - u32 data, ko, kg;
2044 + u32 data = 0, ko, kg;
2045
2046 if (!AR_SREV_9462_20_OR_LATER(ah))
2047 return;
2048 diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
2049 index 04bc488385e6..4af012968cb6 100644
2050 --- a/drivers/staging/comedi/drivers/usbduxfast.c
2051 +++ b/drivers/staging/comedi/drivers/usbduxfast.c
2052 @@ -1,6 +1,6 @@
2053 // SPDX-License-Identifier: GPL-2.0+
2054 /*
2055 - * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
2056 + * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
2057 */
2058
2059 /*
2060 @@ -8,7 +8,7 @@
2061 * Description: University of Stirling USB DAQ & INCITE Technology Limited
2062 * Devices: [ITL] USB-DUX-FAST (usbduxfast)
2063 * Author: Bernd Porr <mail@berndporr.me.uk>
2064 - * Updated: 10 Oct 2014
2065 + * Updated: 16 Nov 2019
2066 * Status: stable
2067 */
2068
2069 @@ -22,6 +22,7 @@
2070 *
2071 *
2072 * Revision history:
2073 + * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
2074 * 0.9: Dropping the first data packet which seems to be from the last transfer.
2075 * Buffer overflows in the FX2 are handed over to comedi.
2076 * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
2077 @@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
2078 struct comedi_cmd *cmd)
2079 {
2080 int err = 0;
2081 + int err2 = 0;
2082 unsigned int steps;
2083 unsigned int arg;
2084
2085 @@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
2086 */
2087 steps = (cmd->convert_arg * 30) / 1000;
2088 if (cmd->chanlist_len != 1)
2089 - err |= comedi_check_trigger_arg_min(&steps,
2090 - MIN_SAMPLING_PERIOD);
2091 - err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
2092 - arg = (steps * 1000) / 30;
2093 - err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
2094 + err2 |= comedi_check_trigger_arg_min(&steps,
2095 + MIN_SAMPLING_PERIOD);
2096 + else
2097 + err2 |= comedi_check_trigger_arg_min(&steps, 1);
2098 + err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
2099 + if (err2) {
2100 + err |= err2;
2101 + arg = (steps * 1000) / 30;
2102 + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
2103 + }
2104
2105 if (cmd->stop_src == TRIG_COUNT)
2106 err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
2107 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
2108 index ac92725458b5..ba1eaabc7796 100644
2109 --- a/drivers/usb/misc/appledisplay.c
2110 +++ b/drivers/usb/misc/appledisplay.c
2111 @@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
2112 0,
2113 pdata->msgdata, 2,
2114 ACD_USB_TIMEOUT);
2115 - brightness = pdata->msgdata[1];
2116 + if (retval < 2) {
2117 + if (retval >= 0)
2118 + retval = -EMSGSIZE;
2119 + } else {
2120 + brightness = pdata->msgdata[1];
2121 + }
2122 mutex_unlock(&pdata->sysfslock);
2123
2124 if (retval < 0)
2125 @@ -299,6 +304,7 @@ error:
2126 if (pdata) {
2127 if (pdata->urb) {
2128 usb_kill_urb(pdata->urb);
2129 + cancel_delayed_work_sync(&pdata->work);
2130 if (pdata->urbdata)
2131 usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
2132 pdata->urbdata, pdata->urb->transfer_dma);
2133 diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
2134 index 34e6cd6f40d3..87067c3d6109 100644
2135 --- a/drivers/usb/misc/chaoskey.c
2136 +++ b/drivers/usb/misc/chaoskey.c
2137 @@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
2138 !dev->reading,
2139 (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
2140
2141 - if (result < 0)
2142 + if (result < 0) {
2143 + usb_kill_urb(dev->urb);
2144 goto out;
2145 + }
2146
2147 - if (result == 0)
2148 + if (result == 0) {
2149 result = -ETIMEDOUT;
2150 - else
2151 + usb_kill_urb(dev->urb);
2152 + } else {
2153 result = dev->valid;
2154 + }
2155 out:
2156 /* Let the device go back to sleep eventually */
2157 usb_autopm_put_interface(dev->interface);
2158 @@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
2159
2160 static int chaoskey_resume(struct usb_interface *interface)
2161 {
2162 + struct chaoskey *dev;
2163 + struct usb_device *udev = interface_to_usbdev(interface);
2164 +
2165 usb_dbg(interface, "resume");
2166 + dev = usb_get_intfdata(interface);
2167 +
2168 + /*
2169 + * We may have lost power.
2170 + * In that case the device that needs a long time
2171 + * for the first requests needs an extended timeout
2172 + * again
2173 + */
2174 + if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
2175 + dev->reads_started = false;
2176 +
2177 return 0;
2178 }
2179 #else
2180 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2181 index 979bef9bfb6b..f5143eedbc48 100644
2182 --- a/drivers/usb/serial/cp210x.c
2183 +++ b/drivers/usb/serial/cp210x.c
2184 @@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
2185 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
2186 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
2187 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
2188 + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
2189 { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
2190 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
2191 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
2192 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2193 index 18110225d506..2ec4eeacebc7 100644
2194 --- a/drivers/usb/serial/mos7720.c
2195 +++ b/drivers/usb/serial/mos7720.c
2196 @@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
2197 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2198 dev = serial->dev;
2199
2200 - /* setting configuration feature to one */
2201 - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2202 - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
2203 -
2204 if (product == MOSCHIP_DEVICE_ID_7715) {
2205 struct urb *urb = serial->port[0]->interrupt_in_urb;
2206
2207 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
2208 index a698d46ba773..ab4bf8d6d7df 100644
2209 --- a/drivers/usb/serial/mos7840.c
2210 +++ b/drivers/usb/serial/mos7840.c
2211 @@ -119,11 +119,15 @@
2212 /* This driver also supports
2213 * ATEN UC2324 device using Moschip MCS7840
2214 * ATEN UC2322 device using Moschip MCS7820
2215 + * MOXA UPort 2210 device using Moschip MCS7820
2216 */
2217 #define USB_VENDOR_ID_ATENINTL 0x0557
2218 #define ATENINTL_DEVICE_ID_UC2324 0x2011
2219 #define ATENINTL_DEVICE_ID_UC2322 0x7820
2220
2221 +#define USB_VENDOR_ID_MOXA 0x110a
2222 +#define MOXA_DEVICE_ID_2210 0x2210
2223 +
2224 /* Interrupt Routine Defines */
2225
2226 #define SERIAL_IIR_RLS 0x06
2227 @@ -195,6 +199,7 @@ static const struct usb_device_id id_table[] = {
2228 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
2229 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
2230 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
2231 + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
2232 {} /* terminating entry */
2233 };
2234 MODULE_DEVICE_TABLE(usb, id_table);
2235 @@ -2020,6 +2025,7 @@ static int mos7840_probe(struct usb_serial *serial,
2236 const struct usb_device_id *id)
2237 {
2238 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2239 + u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
2240 u8 *buf;
2241 int device_type;
2242
2243 @@ -2030,6 +2036,11 @@ static int mos7840_probe(struct usb_serial *serial,
2244 goto out;
2245 }
2246
2247 + if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
2248 + device_type = MOSCHIP_DEVICE_ID_7820;
2249 + goto out;
2250 + }
2251 +
2252 buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
2253 if (!buf)
2254 return -ENOMEM;
2255 @@ -2279,11 +2290,6 @@ out:
2256 goto error;
2257 } else
2258 dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
2259 -
2260 - /* setting configuration feature to one */
2261 - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2262 - 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
2263 - MOS_WDR_TIMEOUT);
2264 }
2265 return 0;
2266 error:
2267 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2268 index 06ab016be0b6..e9491d400a24 100644
2269 --- a/drivers/usb/serial/option.c
2270 +++ b/drivers/usb/serial/option.c
2271 @@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
2272 #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
2273
2274 #define DELL_PRODUCT_5821E 0x81d7
2275 +#define DELL_PRODUCT_5821E_ESIM 0x81e0
2276
2277 #define KYOCERA_VENDOR_ID 0x0c88
2278 #define KYOCERA_PRODUCT_KPC650 0x17da
2279 @@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
2280 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
2281 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
2282 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2283 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
2284 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2285 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
2286 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
2287 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
2288 @@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
2289 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
2290 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
2291 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
2292 + { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
2293 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2294 + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
2295 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2296 { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
2297 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
2298 { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
2299 diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
2300 index 2f86b28fa3da..7bbae7a08642 100644
2301 --- a/drivers/usb/usbip/Kconfig
2302 +++ b/drivers/usb/usbip/Kconfig
2303 @@ -4,6 +4,7 @@ config USBIP_CORE
2304 tristate "USB/IP support"
2305 depends on NET
2306 select USB_COMMON
2307 + select SGL_ALLOC
2308 ---help---
2309 This enables pushing USB packets over IP to allow remote
2310 machines direct access to USB devices. It provides the
2311 diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
2312 index 66edfeea68fe..e2b019532234 100644
2313 --- a/drivers/usb/usbip/stub_rx.c
2314 +++ b/drivers/usb/usbip/stub_rx.c
2315 @@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
2316 if (pipe == -1)
2317 return;
2318
2319 + /*
2320 + * Smatch reported the error case where use_sg is true and buf_len is 0.
2321 + * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
2322 + * released by stub event handler and connection will be shut down.
2323 + */
2324 priv = stub_priv_alloc(sdev, pdu);
2325 if (!priv)
2326 return;
2327
2328 buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
2329
2330 + if (use_sg && !buf_len) {
2331 + dev_err(&udev->dev, "sg buffer with zero length\n");
2332 + goto err_malloc;
2333 + }
2334 +
2335 /* allocate urb transfer buffer, if needed */
2336 if (buf_len) {
2337 if (use_sg) {
2338 sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
2339 if (!sgl)
2340 goto err_malloc;
2341 +
2342 + /* Check if the server's HCD supports SG */
2343 + if (!udev->bus->sg_tablesize) {
2344 + /*
2345 + * If the server's HCD doesn't support SG, break
2346 + * a single SG request into several URBs and map
2347 + * each SG list entry to corresponding URB
2348 + * buffer. The previously allocated SG list is
2349 + * stored in priv->sgl (If the server's HCD
2350 + * support SG, SG list is stored only in
2351 + * urb->sg) and it is used as an indicator that
2352 + * the server split single SG request into
2353 + * several URBs. Later, priv->sgl is used by
2354 + * stub_complete() and stub_send_ret_submit() to
2355 + * reassemble the divied URBs.
2356 + */
2357 + support_sg = 0;
2358 + num_urbs = nents;
2359 + priv->completed_urbs = 0;
2360 + pdu->u.cmd_submit.transfer_flags &=
2361 + ~URB_DMA_MAP_SG;
2362 + }
2363 } else {
2364 buffer = kzalloc(buf_len, GFP_KERNEL);
2365 if (!buffer)
2366 @@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
2367 }
2368 }
2369
2370 - /* Check if the server's HCD supports SG */
2371 - if (use_sg && !udev->bus->sg_tablesize) {
2372 - /*
2373 - * If the server's HCD doesn't support SG, break a single SG
2374 - * request into several URBs and map each SG list entry to
2375 - * corresponding URB buffer. The previously allocated SG
2376 - * list is stored in priv->sgl (If the server's HCD support SG,
2377 - * SG list is stored only in urb->sg) and it is used as an
2378 - * indicator that the server split single SG request into
2379 - * several URBs. Later, priv->sgl is used by stub_complete() and
2380 - * stub_send_ret_submit() to reassemble the divied URBs.
2381 - */
2382 - support_sg = 0;
2383 - num_urbs = nents;
2384 - priv->completed_urbs = 0;
2385 - pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
2386 - }
2387 -
2388 /* allocate urb array */
2389 priv->num_urbs = num_urbs;
2390 priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
2391 diff --git a/fs/exec.c b/fs/exec.c
2392 index 555e93c7dec8..c27231234764 100644
2393 --- a/fs/exec.c
2394 +++ b/fs/exec.c
2395 @@ -1015,7 +1015,7 @@ static int exec_mmap(struct mm_struct *mm)
2396 /* Notify parent that we're no longer interested in the old VM */
2397 tsk = current;
2398 old_mm = current->mm;
2399 - mm_release(tsk, old_mm);
2400 + exec_mm_release(tsk, old_mm);
2401
2402 if (old_mm) {
2403 sync_mm_rss(old_mm);
2404 diff --git a/include/linux/compat.h b/include/linux/compat.h
2405 index 16dafd9f4b86..c4c389c7e1b4 100644
2406 --- a/include/linux/compat.h
2407 +++ b/include/linux/compat.h
2408 @@ -410,8 +410,6 @@ struct compat_kexec_segment;
2409 struct compat_mq_attr;
2410 struct compat_msgbuf;
2411
2412 -extern void compat_exit_robust_list(struct task_struct *curr);
2413 -
2414 #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
2415
2416 #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
2417 diff --git a/include/linux/futex.h b/include/linux/futex.h
2418 index ccaef0097785..5cc3fed27d4c 100644
2419 --- a/include/linux/futex.h
2420 +++ b/include/linux/futex.h
2421 @@ -2,7 +2,9 @@
2422 #ifndef _LINUX_FUTEX_H
2423 #define _LINUX_FUTEX_H
2424
2425 +#include <linux/sched.h>
2426 #include <linux/ktime.h>
2427 +
2428 #include <uapi/linux/futex.h>
2429
2430 struct inode;
2431 @@ -48,15 +50,35 @@ union futex_key {
2432 #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
2433
2434 #ifdef CONFIG_FUTEX
2435 -extern void exit_robust_list(struct task_struct *curr);
2436 +enum {
2437 + FUTEX_STATE_OK,
2438 + FUTEX_STATE_EXITING,
2439 + FUTEX_STATE_DEAD,
2440 +};
2441
2442 -long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2443 - u32 __user *uaddr2, u32 val2, u32 val3);
2444 -#else
2445 -static inline void exit_robust_list(struct task_struct *curr)
2446 +static inline void futex_init_task(struct task_struct *tsk)
2447 {
2448 + tsk->robust_list = NULL;
2449 +#ifdef CONFIG_COMPAT
2450 + tsk->compat_robust_list = NULL;
2451 +#endif
2452 + INIT_LIST_HEAD(&tsk->pi_state_list);
2453 + tsk->pi_state_cache = NULL;
2454 + tsk->futex_state = FUTEX_STATE_OK;
2455 + mutex_init(&tsk->futex_exit_mutex);
2456 }
2457
2458 +void futex_exit_recursive(struct task_struct *tsk);
2459 +void futex_exit_release(struct task_struct *tsk);
2460 +void futex_exec_release(struct task_struct *tsk);
2461 +
2462 +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2463 + u32 __user *uaddr2, u32 val2, u32 val3);
2464 +#else
2465 +static inline void futex_init_task(struct task_struct *tsk) { }
2466 +static inline void futex_exit_recursive(struct task_struct *tsk) { }
2467 +static inline void futex_exit_release(struct task_struct *tsk) { }
2468 +static inline void futex_exec_release(struct task_struct *tsk) { }
2469 static inline long do_futex(u32 __user *uaddr, int op, u32 val,
2470 ktime_t *timeout, u32 __user *uaddr2,
2471 u32 val2, u32 val3)
2472 @@ -65,12 +87,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
2473 }
2474 #endif
2475
2476 -#ifdef CONFIG_FUTEX_PI
2477 -extern void exit_pi_state_list(struct task_struct *curr);
2478 -#else
2479 -static inline void exit_pi_state_list(struct task_struct *curr)
2480 -{
2481 -}
2482 -#endif
2483 -
2484 #endif
2485 diff --git a/include/linux/sched.h b/include/linux/sched.h
2486 index 67a1d86981a9..775503573ed7 100644
2487 --- a/include/linux/sched.h
2488 +++ b/include/linux/sched.h
2489 @@ -1054,6 +1054,8 @@ struct task_struct {
2490 #endif
2491 struct list_head pi_state_list;
2492 struct futex_pi_state *pi_state_cache;
2493 + struct mutex futex_exit_mutex;
2494 + unsigned int futex_state;
2495 #endif
2496 #ifdef CONFIG_PERF_EVENTS
2497 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
2498 @@ -1442,7 +1444,6 @@ extern struct pid *cad_pid;
2499 */
2500 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
2501 #define PF_EXITING 0x00000004 /* Getting shut down */
2502 -#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
2503 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
2504 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
2505 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
2506 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
2507 index e6770012db18..c49257a3b510 100644
2508 --- a/include/linux/sched/mm.h
2509 +++ b/include/linux/sched/mm.h
2510 @@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
2511 * succeeds.
2512 */
2513 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2514 -/* Remove the current tasks stale references to the old mm_struct */
2515 -extern void mm_release(struct task_struct *, struct mm_struct *);
2516 +/* Remove the current tasks stale references to the old mm_struct on exit() */
2517 +extern void exit_mm_release(struct task_struct *, struct mm_struct *);
2518 +/* Remove the current tasks stale references to the old mm_struct on exec() */
2519 +extern void exec_mm_release(struct task_struct *, struct mm_struct *);
2520
2521 #ifdef CONFIG_MEMCG
2522 extern void mm_update_next_owner(struct mm_struct *mm);
2523 diff --git a/kernel/exit.c b/kernel/exit.c
2524 index a46a50d67002..d351fd09e739 100644
2525 --- a/kernel/exit.c
2526 +++ b/kernel/exit.c
2527 @@ -437,7 +437,7 @@ static void exit_mm(void)
2528 struct mm_struct *mm = current->mm;
2529 struct core_state *core_state;
2530
2531 - mm_release(current, mm);
2532 + exit_mm_release(current, mm);
2533 if (!mm)
2534 return;
2535 sync_mm_rss(mm);
2536 @@ -746,32 +746,12 @@ void __noreturn do_exit(long code)
2537 */
2538 if (unlikely(tsk->flags & PF_EXITING)) {
2539 pr_alert("Fixing recursive fault but reboot is needed!\n");
2540 - /*
2541 - * We can do this unlocked here. The futex code uses
2542 - * this flag just to verify whether the pi state
2543 - * cleanup has been done or not. In the worst case it
2544 - * loops once more. We pretend that the cleanup was
2545 - * done as there is no way to return. Either the
2546 - * OWNER_DIED bit is set by now or we push the blocked
2547 - * task into the wait for ever nirwana as well.
2548 - */
2549 - tsk->flags |= PF_EXITPIDONE;
2550 + futex_exit_recursive(tsk);
2551 set_current_state(TASK_UNINTERRUPTIBLE);
2552 schedule();
2553 }
2554
2555 exit_signals(tsk); /* sets PF_EXITING */
2556 - /*
2557 - * Ensure that all new tsk->pi_lock acquisitions must observe
2558 - * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
2559 - */
2560 - smp_mb();
2561 - /*
2562 - * Ensure that we must observe the pi_state in exit_mm() ->
2563 - * mm_release() -> exit_pi_state_list().
2564 - */
2565 - raw_spin_lock_irq(&tsk->pi_lock);
2566 - raw_spin_unlock_irq(&tsk->pi_lock);
2567
2568 if (unlikely(in_atomic())) {
2569 pr_info("note: %s[%d] exited with preempt_count %d\n",
2570 @@ -846,12 +826,6 @@ void __noreturn do_exit(long code)
2571 * Make sure we are holding no locks:
2572 */
2573 debug_check_no_locks_held();
2574 - /*
2575 - * We can do this unlocked here. The futex code uses this flag
2576 - * just to verify whether the pi state cleanup has been done
2577 - * or not. In the worst case it loops once more.
2578 - */
2579 - tsk->flags |= PF_EXITPIDONE;
2580
2581 if (tsk->io_context)
2582 exit_io_context(tsk);
2583 diff --git a/kernel/fork.c b/kernel/fork.c
2584 index 13b38794efb5..6cabc124378c 100644
2585 --- a/kernel/fork.c
2586 +++ b/kernel/fork.c
2587 @@ -1283,24 +1283,8 @@ static int wait_for_vfork_done(struct task_struct *child,
2588 * restoring the old one. . .
2589 * Eric Biederman 10 January 1998
2590 */
2591 -void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2592 +static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2593 {
2594 - /* Get rid of any futexes when releasing the mm */
2595 -#ifdef CONFIG_FUTEX
2596 - if (unlikely(tsk->robust_list)) {
2597 - exit_robust_list(tsk);
2598 - tsk->robust_list = NULL;
2599 - }
2600 -#ifdef CONFIG_COMPAT
2601 - if (unlikely(tsk->compat_robust_list)) {
2602 - compat_exit_robust_list(tsk);
2603 - tsk->compat_robust_list = NULL;
2604 - }
2605 -#endif
2606 - if (unlikely(!list_empty(&tsk->pi_state_list)))
2607 - exit_pi_state_list(tsk);
2608 -#endif
2609 -
2610 uprobe_free_utask(tsk);
2611
2612 /* Get rid of any cached register state */
2613 @@ -1333,6 +1317,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2614 complete_vfork_done(tsk);
2615 }
2616
2617 +void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
2618 +{
2619 + futex_exit_release(tsk);
2620 + mm_release(tsk, mm);
2621 +}
2622 +
2623 +void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
2624 +{
2625 + futex_exec_release(tsk);
2626 + mm_release(tsk, mm);
2627 +}
2628 +
2629 /**
2630 * dup_mm() - duplicates an existing mm structure
2631 * @tsk: the task_struct with which the new mm will be associated.
2632 @@ -2062,14 +2058,8 @@ static __latent_entropy struct task_struct *copy_process(
2633 #ifdef CONFIG_BLOCK
2634 p->plug = NULL;
2635 #endif
2636 -#ifdef CONFIG_FUTEX
2637 - p->robust_list = NULL;
2638 -#ifdef CONFIG_COMPAT
2639 - p->compat_robust_list = NULL;
2640 -#endif
2641 - INIT_LIST_HEAD(&p->pi_state_list);
2642 - p->pi_state_cache = NULL;
2643 -#endif
2644 + futex_init_task(p);
2645 +
2646 /*
2647 * sigaltstack should be cleared when sharing the same VM
2648 */
2649 diff --git a/kernel/futex.c b/kernel/futex.c
2650 index bd18f60e4c6c..afbf928d6a6b 100644
2651 --- a/kernel/futex.c
2652 +++ b/kernel/futex.c
2653 @@ -325,6 +325,12 @@ static inline bool should_fail_futex(bool fshared)
2654 }
2655 #endif /* CONFIG_FAIL_FUTEX */
2656
2657 +#ifdef CONFIG_COMPAT
2658 +static void compat_exit_robust_list(struct task_struct *curr);
2659 +#else
2660 +static inline void compat_exit_robust_list(struct task_struct *curr) { }
2661 +#endif
2662 +
2663 static inline void futex_get_mm(union futex_key *key)
2664 {
2665 mmgrab(key->private.mm);
2666 @@ -890,7 +896,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
2667 * Kernel cleans up PI-state, but userspace is likely hosed.
2668 * (Robust-futex cleanup is separate and might save the day for userspace.)
2669 */
2670 -void exit_pi_state_list(struct task_struct *curr)
2671 +static void exit_pi_state_list(struct task_struct *curr)
2672 {
2673 struct list_head *next, *head = &curr->pi_state_list;
2674 struct futex_pi_state *pi_state;
2675 @@ -960,7 +966,8 @@ void exit_pi_state_list(struct task_struct *curr)
2676 }
2677 raw_spin_unlock_irq(&curr->pi_lock);
2678 }
2679 -
2680 +#else
2681 +static inline void exit_pi_state_list(struct task_struct *curr) { }
2682 #endif
2683
2684 /*
2685 @@ -1169,16 +1176,47 @@ out_error:
2686 return ret;
2687 }
2688
2689 +/**
2690 + * wait_for_owner_exiting - Block until the owner has exited
2691 + * @exiting: Pointer to the exiting task
2692 + *
2693 + * Caller must hold a refcount on @exiting.
2694 + */
2695 +static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
2696 +{
2697 + if (ret != -EBUSY) {
2698 + WARN_ON_ONCE(exiting);
2699 + return;
2700 + }
2701 +
2702 + if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
2703 + return;
2704 +
2705 + mutex_lock(&exiting->futex_exit_mutex);
2706 + /*
2707 + * No point in doing state checking here. If the waiter got here
2708 + * while the task was in exec()->exec_futex_release() then it can
2709 + * have any FUTEX_STATE_* value when the waiter has acquired the
2710 + * mutex. OK, if running, EXITING or DEAD if it reached exit()
2711 + * already. Highly unlikely and not a problem. Just one more round
2712 + * through the futex maze.
2713 + */
2714 + mutex_unlock(&exiting->futex_exit_mutex);
2715 +
2716 + put_task_struct(exiting);
2717 +}
2718 +
2719 static int handle_exit_race(u32 __user *uaddr, u32 uval,
2720 struct task_struct *tsk)
2721 {
2722 u32 uval2;
2723
2724 /*
2725 - * If PF_EXITPIDONE is not yet set, then try again.
2726 + * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
2727 + * caller that the alleged owner is busy.
2728 */
2729 - if (tsk && !(tsk->flags & PF_EXITPIDONE))
2730 - return -EAGAIN;
2731 + if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
2732 + return -EBUSY;
2733
2734 /*
2735 * Reread the user space value to handle the following situation:
2736 @@ -1196,8 +1234,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
2737 * *uaddr = 0xC0000000; tsk = get_task(PID);
2738 * } if (!tsk->flags & PF_EXITING) {
2739 * ... attach();
2740 - * tsk->flags |= PF_EXITPIDONE; } else {
2741 - * if (!(tsk->flags & PF_EXITPIDONE))
2742 + * tsk->futex_state = } else {
2743 + * FUTEX_STATE_DEAD; if (tsk->futex_state !=
2744 + * FUTEX_STATE_DEAD)
2745 * return -EAGAIN;
2746 * return -ESRCH; <--- FAIL
2747 * }
2748 @@ -1228,7 +1267,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
2749 * it after doing proper sanity checks.
2750 */
2751 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2752 - struct futex_pi_state **ps)
2753 + struct futex_pi_state **ps,
2754 + struct task_struct **exiting)
2755 {
2756 pid_t pid = uval & FUTEX_TID_MASK;
2757 struct futex_pi_state *pi_state;
2758 @@ -1253,22 +1293,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2759 }
2760
2761 /*
2762 - * We need to look at the task state flags to figure out,
2763 - * whether the task is exiting. To protect against the do_exit
2764 - * change of the task flags, we do this protected by
2765 - * p->pi_lock:
2766 + * We need to look at the task state to figure out, whether the
2767 + * task is exiting. To protect against the change of the task state
2768 + * in futex_exit_release(), we do this protected by p->pi_lock:
2769 */
2770 raw_spin_lock_irq(&p->pi_lock);
2771 - if (unlikely(p->flags & PF_EXITING)) {
2772 + if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
2773 /*
2774 - * The task is on the way out. When PF_EXITPIDONE is
2775 - * set, we know that the task has finished the
2776 - * cleanup:
2777 + * The task is on the way out. When the futex state is
2778 + * FUTEX_STATE_DEAD, we know that the task has finished
2779 + * the cleanup:
2780 */
2781 int ret = handle_exit_race(uaddr, uval, p);
2782
2783 raw_spin_unlock_irq(&p->pi_lock);
2784 - put_task_struct(p);
2785 + /*
2786 + * If the owner task is between FUTEX_STATE_EXITING and
2787 + * FUTEX_STATE_DEAD then store the task pointer and keep
2788 + * the reference on the task struct. The calling code will
2789 + * drop all locks, wait for the task to reach
2790 + * FUTEX_STATE_DEAD and then drop the refcount. This is
2791 + * required to prevent a live lock when the current task
2792 + * preempted the exiting task between the two states.
2793 + */
2794 + if (ret == -EBUSY)
2795 + *exiting = p;
2796 + else
2797 + put_task_struct(p);
2798 return ret;
2799 }
2800
2801 @@ -1307,7 +1358,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2802
2803 static int lookup_pi_state(u32 __user *uaddr, u32 uval,
2804 struct futex_hash_bucket *hb,
2805 - union futex_key *key, struct futex_pi_state **ps)
2806 + union futex_key *key, struct futex_pi_state **ps,
2807 + struct task_struct **exiting)
2808 {
2809 struct futex_q *top_waiter = futex_top_waiter(hb, key);
2810
2811 @@ -1322,7 +1374,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
2812 * We are the first waiter - try to look up the owner based on
2813 * @uval and attach to it.
2814 */
2815 - return attach_to_pi_owner(uaddr, uval, key, ps);
2816 + return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
2817 }
2818
2819 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2820 @@ -1350,6 +1402,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2821 * lookup
2822 * @task: the task to perform the atomic lock work for. This will
2823 * be "current" except in the case of requeue pi.
2824 + * @exiting: Pointer to store the task pointer of the owner task
2825 + * which is in the middle of exiting
2826 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
2827 *
2828 * Return:
2829 @@ -1358,11 +1412,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2830 * - <0 - error
2831 *
2832 * The hb->lock and futex_key refs shall be held by the caller.
2833 + *
2834 + * @exiting is only set when the return value is -EBUSY. If so, this holds
2835 + * a refcount on the exiting task on return and the caller needs to drop it
2836 + * after waiting for the exit to complete.
2837 */
2838 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
2839 union futex_key *key,
2840 struct futex_pi_state **ps,
2841 - struct task_struct *task, int set_waiters)
2842 + struct task_struct *task,
2843 + struct task_struct **exiting,
2844 + int set_waiters)
2845 {
2846 u32 uval, newval, vpid = task_pid_vnr(task);
2847 struct futex_q *top_waiter;
2848 @@ -1432,7 +1492,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
2849 * attach to the owner. If that fails, no harm done, we only
2850 * set the FUTEX_WAITERS bit in the user space variable.
2851 */
2852 - return attach_to_pi_owner(uaddr, newval, key, ps);
2853 + return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
2854 }
2855
2856 /**
2857 @@ -1850,6 +1910,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
2858 * @key1: the from futex key
2859 * @key2: the to futex key
2860 * @ps: address to store the pi_state pointer
2861 + * @exiting: Pointer to store the task pointer of the owner task
2862 + * which is in the middle of exiting
2863 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
2864 *
2865 * Try and get the lock on behalf of the top waiter if we can do it atomically.
2866 @@ -1857,16 +1919,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
2867 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
2868 * hb1 and hb2 must be held by the caller.
2869 *
2870 + * @exiting is only set when the return value is -EBUSY. If so, this holds
2871 + * a refcount on the exiting task on return and the caller needs to drop it
2872 + * after waiting for the exit to complete.
2873 + *
2874 * Return:
2875 * - 0 - failed to acquire the lock atomically;
2876 * - >0 - acquired the lock, return value is vpid of the top_waiter
2877 * - <0 - error
2878 */
2879 -static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2880 - struct futex_hash_bucket *hb1,
2881 - struct futex_hash_bucket *hb2,
2882 - union futex_key *key1, union futex_key *key2,
2883 - struct futex_pi_state **ps, int set_waiters)
2884 +static int
2885 +futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
2886 + struct futex_hash_bucket *hb2, union futex_key *key1,
2887 + union futex_key *key2, struct futex_pi_state **ps,
2888 + struct task_struct **exiting, int set_waiters)
2889 {
2890 struct futex_q *top_waiter = NULL;
2891 u32 curval;
2892 @@ -1903,7 +1969,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2893 */
2894 vpid = task_pid_vnr(top_waiter->task);
2895 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
2896 - set_waiters);
2897 + exiting, set_waiters);
2898 if (ret == 1) {
2899 requeue_pi_wake_futex(top_waiter, key2, hb2);
2900 return vpid;
2901 @@ -2032,6 +2098,8 @@ retry_private:
2902 }
2903
2904 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2905 + struct task_struct *exiting = NULL;
2906 +
2907 /*
2908 * Attempt to acquire uaddr2 and wake the top waiter. If we
2909 * intend to requeue waiters, force setting the FUTEX_WAITERS
2910 @@ -2039,7 +2107,8 @@ retry_private:
2911 * faults rather in the requeue loop below.
2912 */
2913 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2914 - &key2, &pi_state, nr_requeue);
2915 + &key2, &pi_state,
2916 + &exiting, nr_requeue);
2917
2918 /*
2919 * At this point the top_waiter has either taken uaddr2 or is
2920 @@ -2066,7 +2135,8 @@ retry_private:
2921 * If that call succeeds then we have pi_state and an
2922 * initial refcount on it.
2923 */
2924 - ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2925 + ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
2926 + &pi_state, &exiting);
2927 }
2928
2929 switch (ret) {
2930 @@ -2084,17 +2154,24 @@ retry_private:
2931 if (!ret)
2932 goto retry;
2933 goto out;
2934 + case -EBUSY:
2935 case -EAGAIN:
2936 /*
2937 * Two reasons for this:
2938 - * - Owner is exiting and we just wait for the
2939 + * - EBUSY: Owner is exiting and we just wait for the
2940 * exit to complete.
2941 - * - The user space value changed.
2942 + * - EAGAIN: The user space value changed.
2943 */
2944 double_unlock_hb(hb1, hb2);
2945 hb_waiters_dec(hb2);
2946 put_futex_key(&key2);
2947 put_futex_key(&key1);
2948 + /*
2949 + * Handle the case where the owner is in the middle of
2950 + * exiting. Wait for the exit to complete otherwise
2951 + * this task might loop forever, aka. live lock.
2952 + */
2953 + wait_for_owner_exiting(ret, exiting);
2954 cond_resched();
2955 goto retry;
2956 default:
2957 @@ -2801,6 +2878,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2958 {
2959 struct hrtimer_sleeper timeout, *to;
2960 struct futex_pi_state *pi_state = NULL;
2961 + struct task_struct *exiting = NULL;
2962 struct rt_mutex_waiter rt_waiter;
2963 struct futex_hash_bucket *hb;
2964 struct futex_q q = futex_q_init;
2965 @@ -2822,7 +2900,8 @@ retry:
2966 retry_private:
2967 hb = queue_lock(&q);
2968
2969 - ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2970 + ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
2971 + &exiting, 0);
2972 if (unlikely(ret)) {
2973 /*
2974 * Atomic work succeeded and we got the lock,
2975 @@ -2835,15 +2914,22 @@ retry_private:
2976 goto out_unlock_put_key;
2977 case -EFAULT:
2978 goto uaddr_faulted;
2979 + case -EBUSY:
2980 case -EAGAIN:
2981 /*
2982 * Two reasons for this:
2983 - * - Task is exiting and we just wait for the
2984 + * - EBUSY: Task is exiting and we just wait for the
2985 * exit to complete.
2986 - * - The user space value changed.
2987 + * - EAGAIN: The user space value changed.
2988 */
2989 queue_unlock(hb);
2990 put_futex_key(&q.key);
2991 + /*
2992 + * Handle the case where the owner is in the middle of
2993 + * exiting. Wait for the exit to complete otherwise
2994 + * this task might loop forever, aka. live lock.
2995 + */
2996 + wait_for_owner_exiting(ret, exiting);
2997 cond_resched();
2998 goto retry;
2999 default:
3000 @@ -3452,11 +3538,16 @@ err_unlock:
3001 return ret;
3002 }
3003
3004 +/* Constants for the pending_op argument of handle_futex_death */
3005 +#define HANDLE_DEATH_PENDING true
3006 +#define HANDLE_DEATH_LIST false
3007 +
3008 /*
3009 * Process a futex-list entry, check whether it's owned by the
3010 * dying task, and do notification if so:
3011 */
3012 -static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3013 +static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
3014 + bool pi, bool pending_op)
3015 {
3016 u32 uval, uninitialized_var(nval), mval;
3017 int err;
3018 @@ -3469,6 +3560,42 @@ retry:
3019 if (get_user(uval, uaddr))
3020 return -1;
3021
3022 + /*
3023 + * Special case for regular (non PI) futexes. The unlock path in
3024 + * user space has two race scenarios:
3025 + *
3026 + * 1. The unlock path releases the user space futex value and
3027 + * before it can execute the futex() syscall to wake up
3028 + * waiters it is killed.
3029 + *
3030 + * 2. A woken up waiter is killed before it can acquire the
3031 + * futex in user space.
3032 + *
3033 + * In both cases the TID validation below prevents a wakeup of
3034 + * potential waiters which can cause these waiters to block
3035 + * forever.
3036 + *
3037 + * In both cases the following conditions are met:
3038 + *
3039 + * 1) task->robust_list->list_op_pending != NULL
3040 + * @pending_op == true
3041 + * 2) User space futex value == 0
3042 + * 3) Regular futex: @pi == false
3043 + *
3044 + * If these conditions are met, it is safe to attempt waking up a
3045 + * potential waiter without touching the user space futex value and
3046 + * trying to set the OWNER_DIED bit. The user space futex value is
3047 + * uncontended and the rest of the user space mutex state is
3048 + * consistent, so a woken waiter will just take over the
3049 + * uncontended futex. Setting the OWNER_DIED bit would create
3050 + * inconsistent state and malfunction of the user space owner died
3051 + * handling.
3052 + */
3053 + if (pending_op && !pi && !uval) {
3054 + futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3055 + return 0;
3056 + }
3057 +
3058 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3059 return 0;
3060
3061 @@ -3547,7 +3674,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
3062 *
3063 * We silently return on any sign of list-walking problem.
3064 */
3065 -void exit_robust_list(struct task_struct *curr)
3066 +static void exit_robust_list(struct task_struct *curr)
3067 {
3068 struct robust_list_head __user *head = curr->robust_list;
3069 struct robust_list __user *entry, *next_entry, *pending;
3070 @@ -3588,10 +3715,11 @@ void exit_robust_list(struct task_struct *curr)
3071 * A pending lock might already be on the list, so
3072 * don't process it twice:
3073 */
3074 - if (entry != pending)
3075 + if (entry != pending) {
3076 if (handle_futex_death((void __user *)entry + futex_offset,
3077 - curr, pi))
3078 + curr, pi, HANDLE_DEATH_LIST))
3079 return;
3080 + }
3081 if (rc)
3082 return;
3083 entry = next_entry;
3084 @@ -3605,9 +3733,118 @@ void exit_robust_list(struct task_struct *curr)
3085 cond_resched();
3086 }
3087
3088 - if (pending)
3089 + if (pending) {
3090 handle_futex_death((void __user *)pending + futex_offset,
3091 - curr, pip);
3092 + curr, pip, HANDLE_DEATH_PENDING);
3093 + }
3094 +}
3095 +
3096 +static void futex_cleanup(struct task_struct *tsk)
3097 +{
3098 + if (unlikely(tsk->robust_list)) {
3099 + exit_robust_list(tsk);
3100 + tsk->robust_list = NULL;
3101 + }
3102 +
3103 +#ifdef CONFIG_COMPAT
3104 + if (unlikely(tsk->compat_robust_list)) {
3105 + compat_exit_robust_list(tsk);
3106 + tsk->compat_robust_list = NULL;
3107 + }
3108 +#endif
3109 +
3110 + if (unlikely(!list_empty(&tsk->pi_state_list)))
3111 + exit_pi_state_list(tsk);
3112 +}
3113 +
3114 +/**
3115 + * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3116 + * @tsk: task to set the state on
3117 + *
3118 + * Set the futex exit state of the task lockless. The futex waiter code
3119 + * observes that state when a task is exiting and loops until the task has
3120 + * actually finished the futex cleanup. The worst case for this is that the
3121 + * waiter runs through the wait loop until the state becomes visible.
3122 + *
3123 + * This is called from the recursive fault handling path in do_exit().
3124 + *
3125 + * This is best effort. Either the futex exit code has run already or
3126 + * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3127 + * take it over. If not, the problem is pushed back to user space. If the
3128 + * futex exit code did not run yet, then an already queued waiter might
3129 + * block forever, but there is nothing which can be done about that.
3130 + */
3131 +void futex_exit_recursive(struct task_struct *tsk)
3132 +{
3133 + /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3134 + if (tsk->futex_state == FUTEX_STATE_EXITING)
3135 + mutex_unlock(&tsk->futex_exit_mutex);
3136 + tsk->futex_state = FUTEX_STATE_DEAD;
3137 +}
3138 +
3139 +static void futex_cleanup_begin(struct task_struct *tsk)
3140 +{
3141 + /*
3142 + * Prevent various race issues against a concurrent incoming waiter
3143 + * including live locks by forcing the waiter to block on
3144 + * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3145 + * attach_to_pi_owner().
3146 + */
3147 + mutex_lock(&tsk->futex_exit_mutex);
3148 +
3149 + /*
3150 + * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3151 + *
3152 + * This ensures that all subsequent checks of tsk->futex_state in
3153 + * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3154 + * tsk->pi_lock held.
3155 + *
3156 + * It guarantees also that a pi_state which was queued right before
3157 + * the state change under tsk->pi_lock by a concurrent waiter must
3158 + * be observed in exit_pi_state_list().
3159 + */
3160 + raw_spin_lock_irq(&tsk->pi_lock);
3161 + tsk->futex_state = FUTEX_STATE_EXITING;
3162 + raw_spin_unlock_irq(&tsk->pi_lock);
3163 +}
3164 +
3165 +static void futex_cleanup_end(struct task_struct *tsk, int state)
3166 +{
3167 + /*
3168 + * Lockless store. The only side effect is that an observer might
3169 + * take another loop until it becomes visible.
3170 + */
3171 + tsk->futex_state = state;
3172 + /*
3173 + * Drop the exit protection. This unblocks waiters which observed
3174 + * FUTEX_STATE_EXITING to reevaluate the state.
3175 + */
3176 + mutex_unlock(&tsk->futex_exit_mutex);
3177 +}
3178 +
3179 +void futex_exec_release(struct task_struct *tsk)
3180 +{
3181 + /*
3182 + * The state handling is done for consistency, but in the case of
3183 + * exec() there is no way to prevent futher damage as the PID stays
3184 + * the same. But for the unlikely and arguably buggy case that a
3185 + * futex is held on exec(), this provides at least as much state
3186 + * consistency protection which is possible.
3187 + */
3188 + futex_cleanup_begin(tsk);
3189 + futex_cleanup(tsk);
3190 + /*
3191 + * Reset the state to FUTEX_STATE_OK. The task is alive and about
3192 + * exec a new binary.
3193 + */
3194 + futex_cleanup_end(tsk, FUTEX_STATE_OK);
3195 +}
3196 +
3197 +void futex_exit_release(struct task_struct *tsk)
3198 +{
3199 + futex_cleanup_begin(tsk);
3200 + futex_cleanup(tsk);
3201 + futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3202 }
3203
3204 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3205 @@ -3737,7 +3974,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
3206 *
3207 * We silently return on any sign of list-walking problem.
3208 */
3209 -void compat_exit_robust_list(struct task_struct *curr)
3210 +static void compat_exit_robust_list(struct task_struct *curr)
3211 {
3212 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3213 struct robust_list __user *entry, *next_entry, *pending;
3214 @@ -3784,7 +4021,8 @@ void compat_exit_robust_list(struct task_struct *curr)
3215 if (entry != pending) {
3216 void __user *uaddr = futex_uaddr(entry, futex_offset);
3217
3218 - if (handle_futex_death(uaddr, curr, pi))
3219 + if (handle_futex_death(uaddr, curr, pi,
3220 + HANDLE_DEATH_LIST))
3221 return;
3222 }
3223 if (rc)
3224 @@ -3803,7 +4041,7 @@ void compat_exit_robust_list(struct task_struct *curr)
3225 if (pending) {
3226 void __user *uaddr = futex_uaddr(pending, futex_offset);
3227
3228 - handle_futex_death(uaddr, curr, pip);
3229 + handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
3230 }
3231 }
3232
3233 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3234 index 78bd2e3722c7..d14f6684737d 100644
3235 --- a/sound/pci/hda/patch_hdmi.c
3236 +++ b/sound/pci/hda/patch_hdmi.c
3237 @@ -3454,26 +3454,6 @@ static int nvhdmi_chmap_validate(struct hdac_chmap *chmap,
3238 return 0;
3239 }
3240
3241 -/* map from pin NID to port; port is 0-based */
3242 -/* for Nvidia: assume widget NID starting from 4, with step 1 (4, 5, 6, ...) */
3243 -static int nvhdmi_pin2port(void *audio_ptr, int pin_nid)
3244 -{
3245 - return pin_nid - 4;
3246 -}
3247 -
3248 -/* reverse-map from port to pin NID: see above */
3249 -static int nvhdmi_port2pin(struct hda_codec *codec, int port)
3250 -{
3251 - return port + 4;
3252 -}
3253 -
3254 -static const struct drm_audio_component_audio_ops nvhdmi_audio_ops = {
3255 - .pin2port = nvhdmi_pin2port,
3256 - .pin_eld_notify = generic_acomp_pin_eld_notify,
3257 - .master_bind = generic_acomp_master_bind,
3258 - .master_unbind = generic_acomp_master_unbind,
3259 -};
3260 -
3261 static int patch_nvhdmi(struct hda_codec *codec)
3262 {
3263 struct hdmi_spec *spec;
3264 @@ -3492,8 +3472,6 @@ static int patch_nvhdmi(struct hda_codec *codec)
3265
3266 codec->link_down_at_suspend = 1;
3267
3268 - generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
3269 -
3270 return 0;
3271 }
3272
3273 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3274 index 45eee5cc312e..6cd4ff09c5ee 100644
3275 --- a/sound/usb/mixer.c
3276 +++ b/sound/usb/mixer.c
3277 @@ -2930,6 +2930,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
3278 continue;
3279
3280 iface = usb_ifnum_to_if(dev, intf);
3281 + if (!iface)
3282 + continue;
3283 +
3284 num = iface->num_altsetting;
3285
3286 if (num < 2)
3287 diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
3288 index 7d460b1f1735..94b903d95afa 100644
3289 --- a/sound/usb/mixer_scarlett_gen2.c
3290 +++ b/sound/usb/mixer_scarlett_gen2.c
3291 @@ -261,34 +261,34 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
3292 },
3293
3294 .ports = {
3295 - {
3296 + [SCARLETT2_PORT_TYPE_NONE] = {
3297 .id = 0x000,
3298 .num = { 1, 0, 8, 8, 8 },
3299 .src_descr = "Off",
3300 .src_num_offset = 0,
3301 },
3302 - {
3303 + [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3304 .id = 0x080,
3305 .num = { 4, 4, 4, 4, 4 },
3306 .src_descr = "Analogue %d",
3307 .src_num_offset = 1,
3308 .dst_descr = "Analogue Output %02d Playback"
3309 },
3310 - {
3311 + [SCARLETT2_PORT_TYPE_SPDIF] = {
3312 .id = 0x180,
3313 .num = { 2, 2, 2, 2, 2 },
3314 .src_descr = "S/PDIF %d",
3315 .src_num_offset = 1,
3316 .dst_descr = "S/PDIF Output %d Playback"
3317 },
3318 - {
3319 + [SCARLETT2_PORT_TYPE_MIX] = {
3320 .id = 0x300,
3321 .num = { 10, 18, 18, 18, 18 },
3322 .src_descr = "Mix %c",
3323 .src_num_offset = 65,
3324 .dst_descr = "Mixer Input %02d Capture"
3325 },
3326 - {
3327 + [SCARLETT2_PORT_TYPE_PCM] = {
3328 .id = 0x600,
3329 .num = { 6, 6, 6, 6, 6 },
3330 .src_descr = "PCM %d",
3331 @@ -317,44 +317,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
3332 },
3333
3334 .ports = {
3335 - {
3336 + [SCARLETT2_PORT_TYPE_NONE] = {
3337 .id = 0x000,
3338 .num = { 1, 0, 8, 8, 4 },
3339 .src_descr = "Off",
3340 .src_num_offset = 0,
3341 },
3342 - {
3343 + [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3344 .id = 0x080,
3345 .num = { 8, 6, 6, 6, 6 },
3346 .src_descr = "Analogue %d",
3347 .src_num_offset = 1,
3348 .dst_descr = "Analogue Output %02d Playback"
3349 },
3350 - {
3351 + [SCARLETT2_PORT_TYPE_SPDIF] = {
3352 + .id = 0x180,
3353 /* S/PDIF outputs aren't available at 192KHz
3354 * but are included in the USB mux I/O
3355 * assignment message anyway
3356 */
3357 - .id = 0x180,
3358 .num = { 2, 2, 2, 2, 2 },
3359 .src_descr = "S/PDIF %d",
3360 .src_num_offset = 1,
3361 .dst_descr = "S/PDIF Output %d Playback"
3362 },
3363 - {
3364 + [SCARLETT2_PORT_TYPE_ADAT] = {
3365 .id = 0x200,
3366 .num = { 8, 0, 0, 0, 0 },
3367 .src_descr = "ADAT %d",
3368 .src_num_offset = 1,
3369 },
3370 - {
3371 + [SCARLETT2_PORT_TYPE_MIX] = {
3372 .id = 0x300,
3373 .num = { 10, 18, 18, 18, 18 },
3374 .src_descr = "Mix %c",
3375 .src_num_offset = 65,
3376 .dst_descr = "Mixer Input %02d Capture"
3377 },
3378 - {
3379 + [SCARLETT2_PORT_TYPE_PCM] = {
3380 .id = 0x600,
3381 .num = { 20, 18, 18, 14, 10 },
3382 .src_descr = "PCM %d",
3383 @@ -387,20 +387,20 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
3384 },
3385
3386 .ports = {
3387 - {
3388 + [SCARLETT2_PORT_TYPE_NONE] = {
3389 .id = 0x000,
3390 .num = { 1, 0, 8, 8, 6 },
3391 .src_descr = "Off",
3392 .src_num_offset = 0,
3393 },
3394 - {
3395 + [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3396 .id = 0x080,
3397 .num = { 8, 10, 10, 10, 10 },
3398 .src_descr = "Analogue %d",
3399 .src_num_offset = 1,
3400 .dst_descr = "Analogue Output %02d Playback"
3401 },
3402 - {
3403 + [SCARLETT2_PORT_TYPE_SPDIF] = {
3404 /* S/PDIF outputs aren't available at 192KHz
3405 * but are included in the USB mux I/O
3406 * assignment message anyway
3407 @@ -411,21 +411,21 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
3408 .src_num_offset = 1,
3409 .dst_descr = "S/PDIF Output %d Playback"
3410 },
3411 - {
3412 + [SCARLETT2_PORT_TYPE_ADAT] = {
3413 .id = 0x200,
3414 .num = { 8, 8, 8, 4, 0 },
3415 .src_descr = "ADAT %d",
3416 .src_num_offset = 1,
3417 .dst_descr = "ADAT Output %d Playback"
3418 },
3419 - {
3420 + [SCARLETT2_PORT_TYPE_MIX] = {
3421 .id = 0x300,
3422 .num = { 10, 18, 18, 18, 18 },
3423 .src_descr = "Mix %c",
3424 .src_num_offset = 65,
3425 .dst_descr = "Mixer Input %02d Capture"
3426 },
3427 - {
3428 + [SCARLETT2_PORT_TYPE_PCM] = {
3429 .id = 0x600,
3430 .num = { 20, 18, 18, 14, 10 },
3431 .src_descr = "PCM %d",
3432 diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
3433 index b02a36b2c14f..a42015b305f4 100644
3434 --- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
3435 +++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
3436 @@ -69,7 +69,7 @@ BEGIN {
3437
3438 lprefix1_expr = "\\((66|!F3)\\)"
3439 lprefix2_expr = "\\(F3\\)"
3440 - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
3441 + lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
3442 lprefix_expr = "\\((66|F2|F3)\\)"
3443 max_lprefix = 4
3444
3445 @@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
3446 return add_flags(imm, mod)
3447 }
3448
3449 -/^[0-9a-f]+\:/ {
3450 +/^[0-9a-f]+:/ {
3451 if (NR == 1)
3452 next
3453 # get index
3454 diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
3455 index 3c3a022654f3..6da0ac3f0135 100644
3456 --- a/tools/testing/selftests/x86/mov_ss_trap.c
3457 +++ b/tools/testing/selftests/x86/mov_ss_trap.c
3458 @@ -257,7 +257,8 @@ int main()
3459 err(1, "sigaltstack");
3460 sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
3461 nr = SYS_getpid;
3462 - asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
3463 + /* Clear EBP first to make sure we segfault cleanly. */
3464 + asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr)
3465 : [ss] "m" (ss) : "flags", "rcx"
3466 #ifdef __x86_64__
3467 , "r11"
3468 diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
3469 index 3e49a7873f3e..57c4f67f16ef 100644
3470 --- a/tools/testing/selftests/x86/sigreturn.c
3471 +++ b/tools/testing/selftests/x86/sigreturn.c
3472 @@ -451,6 +451,19 @@ static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
3473 ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
3474 ctx->uc_mcontext.gregs[REG_CX] = 0;
3475
3476 +#ifdef __i386__
3477 + /*
3478 + * Make sure the kernel doesn't inadvertently use DS or ES-relative
3479 + * accesses in a region where user DS or ES is loaded.
3480 + *
3481 + * Skip this for 64-bit builds because long mode doesn't care about
3482 + * DS and ES and skipping it increases test coverage a little bit,
3483 + * since 64-bit kernels can still run the 32-bit build.
3484 + */
3485 + ctx->uc_mcontext.gregs[REG_DS] = 0;
3486 + ctx->uc_mcontext.gregs[REG_ES] = 0;
3487 +#endif
3488 +
3489 memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
3490 requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */
3491
3492 diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
3493 index 2813aa821c82..d1d8ba2a4a40 100644
3494 --- a/tools/usb/usbip/libsrc/usbip_host_common.c
3495 +++ b/tools/usb/usbip/libsrc/usbip_host_common.c
3496 @@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
3497 }
3498
3499 value = atoi(status);
3500 -
3501 + close(fd);
3502 return value;
3503 }
3504