Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0177-4.9.78-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 64270 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
2 index d11eff61fc9a..5cd58439ad2d 100644
3 --- a/Documentation/x86/pti.txt
4 +++ b/Documentation/x86/pti.txt
5 @@ -78,7 +78,7 @@ this protection comes at a cost:
6 non-PTI SYSCALL entry code, so requires mapping fewer
7 things into the userspace page tables. The downside is
8 that stacks must be switched at entry time.
9 - d. Global pages are disabled for all kernel structures not
10 + c. Global pages are disabled for all kernel structures not
11 mapped into both kernel and userspace page tables. This
12 feature of the MMU allows different processes to share TLB
13 entries mapping the kernel. Losing the feature means more
14 diff --git a/Makefile b/Makefile
15 index aba553531d6a..8a6f158a1176 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 4
20 PATCHLEVEL = 9
21 -SUBLEVEL = 77
22 +SUBLEVEL = 78
23 EXTRAVERSION =
24 NAME = Roaring Lionus
25
26 diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
27 index cf2f5240e176..27cc913ca0f5 100644
28 --- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
29 +++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
30 @@ -53,7 +53,8 @@
31 };
32
33 pinctrl: pin-controller@10000 {
34 - pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
35 + pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
36 + &pmx_gpio_header_gpo>;
37 pinctrl-names = "default";
38
39 pmx_uart0: pmx-uart0 {
40 @@ -85,11 +86,16 @@
41 * ground.
42 */
43 pmx_gpio_header: pmx-gpio-header {
44 - marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
45 + marvell,pins = "mpp17", "mpp29", "mpp28",
46 "mpp35", "mpp34", "mpp40";
47 marvell,function = "gpio";
48 };
49
50 + pmx_gpio_header_gpo: pxm-gpio-header-gpo {
51 + marvell,pins = "mpp7";
52 + marvell,function = "gpo";
53 + };
54 +
55 pmx_gpio_init: pmx-init {
56 marvell,pins = "mpp38";
57 marvell,function = "gpio";
58 diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
59 index 714da336ec86..5d49b60841e3 100644
60 --- a/arch/arm/configs/sunxi_defconfig
61 +++ b/arch/arm/configs/sunxi_defconfig
62 @@ -11,6 +11,7 @@ CONFIG_SMP=y
63 CONFIG_NR_CPUS=8
64 CONFIG_AEABI=y
65 CONFIG_HIGHMEM=y
66 +CONFIG_CMA=y
67 CONFIG_ARM_APPENDED_DTB=y
68 CONFIG_ARM_ATAG_DTB_COMPAT=y
69 CONFIG_CPU_FREQ=y
70 @@ -35,6 +36,7 @@ CONFIG_CAN_SUN4I=y
71 # CONFIG_WIRELESS is not set
72 CONFIG_DEVTMPFS=y
73 CONFIG_DEVTMPFS_MOUNT=y
74 +CONFIG_DMA_CMA=y
75 CONFIG_BLK_DEV_SD=y
76 CONFIG_ATA=y
77 CONFIG_AHCI_SUNXI=y
78 diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
79 index 85baadab02d3..2e6e9e99977b 100644
80 --- a/arch/arm64/kvm/handle_exit.c
81 +++ b/arch/arm64/kvm/handle_exit.c
82 @@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
83
84 ret = kvm_psci_call(vcpu);
85 if (ret < 0) {
86 - kvm_inject_undefined(vcpu);
87 + vcpu_set_reg(vcpu, 0, ~0UL);
88 return 1;
89 }
90
91 @@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
92
93 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
94 {
95 - kvm_inject_undefined(vcpu);
96 + vcpu_set_reg(vcpu, 0, ~0UL);
97 return 1;
98 }
99
100 diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
101 index 3446b6fb3acb..9da4e2292fc7 100644
102 --- a/arch/mips/ar7/platform.c
103 +++ b/arch/mips/ar7/platform.c
104 @@ -576,7 +576,7 @@ static int __init ar7_register_uarts(void)
105 uart_port.type = PORT_AR7;
106 uart_port.uartclk = clk_get_rate(bus_clk) / 2;
107 uart_port.iotype = UPIO_MEM32;
108 - uart_port.flags = UPF_FIXED_TYPE;
109 + uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
110 uart_port.regshift = 2;
111
112 uart_port.line = 0;
113 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
114 index bdc9aeaf2e45..a76dc738ec61 100644
115 --- a/arch/x86/entry/entry_32.S
116 +++ b/arch/x86/entry/entry_32.S
117 @@ -229,6 +229,17 @@ ENTRY(__switch_to_asm)
118 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
119 #endif
120
121 +#ifdef CONFIG_RETPOLINE
122 + /*
123 + * When switching from a shallower to a deeper call stack
124 + * the RSB may either underflow or use entries populated
125 + * with userspace addresses. On CPUs where those concerns
126 + * exist, overwrite the RSB with entries which capture
127 + * speculative execution to prevent attack.
128 + */
129 + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
130 +#endif
131 +
132 /* restore callee-saved registers */
133 popl %esi
134 popl %edi
135 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
136 index b9c901ce6582..e729e1528584 100644
137 --- a/arch/x86/entry/entry_64.S
138 +++ b/arch/x86/entry/entry_64.S
139 @@ -427,6 +427,17 @@ ENTRY(__switch_to_asm)
140 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
141 #endif
142
143 +#ifdef CONFIG_RETPOLINE
144 + /*
145 + * When switching from a shallower to a deeper call stack
146 + * the RSB may either underflow or use entries populated
147 + * with userspace addresses. On CPUs where those concerns
148 + * exist, overwrite the RSB with entries which capture
149 + * speculative execution to prevent attack.
150 + */
151 + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
152 +#endif
153 +
154 /* restore callee-saved registers */
155 popq %r15
156 popq %r14
157 @@ -1053,7 +1064,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
158 #endif
159
160 #ifdef CONFIG_X86_MCE
161 -idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
162 +idtentry machine_check do_mce has_error_code=0 paranoid=1
163 #endif
164
165 /*
166 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
167 index 4467568a531b..8537a21acd8b 100644
168 --- a/arch/x86/include/asm/cpufeatures.h
169 +++ b/arch/x86/include/asm/cpufeatures.h
170 @@ -197,9 +197,9 @@
171 #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
172 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
173
174 -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
175 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
176 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
177 +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
178
179 /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
180 #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
181 @@ -235,6 +235,7 @@
182 #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
183 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
184 #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
185 +#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
186 #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
187 #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
188 #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
189 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
190 index 402a11c803c3..4ad41087ce0e 100644
191 --- a/arch/x86/include/asm/nospec-branch.h
192 +++ b/arch/x86/include/asm/nospec-branch.h
193 @@ -11,7 +11,7 @@
194 * Fill the CPU return stack buffer.
195 *
196 * Each entry in the RSB, if used for a speculative 'ret', contains an
197 - * infinite 'pause; jmp' loop to capture speculative execution.
198 + * infinite 'pause; lfence; jmp' loop to capture speculative execution.
199 *
200 * This is required in various cases for retpoline and IBRS-based
201 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
202 @@ -38,11 +38,13 @@
203 call 772f; \
204 773: /* speculation trap */ \
205 pause; \
206 + lfence; \
207 jmp 773b; \
208 772: \
209 call 774f; \
210 775: /* speculation trap */ \
211 pause; \
212 + lfence; \
213 jmp 775b; \
214 774: \
215 dec reg; \
216 @@ -73,6 +75,7 @@
217 call .Ldo_rop_\@
218 .Lspec_trap_\@:
219 pause
220 + lfence
221 jmp .Lspec_trap_\@
222 .Ldo_rop_\@:
223 mov \reg, (%_ASM_SP)
224 @@ -165,6 +168,7 @@
225 " .align 16\n" \
226 "901: call 903f;\n" \
227 "902: pause;\n" \
228 + " lfence;\n" \
229 " jmp 902b;\n" \
230 " .align 16\n" \
231 "903: addl $4, %%esp;\n" \
232 @@ -190,6 +194,9 @@ enum spectre_v2_mitigation {
233 SPECTRE_V2_IBRS,
234 };
235
236 +extern char __indirect_thunk_start[];
237 +extern char __indirect_thunk_end[];
238 +
239 /*
240 * On VMEXIT we must ensure that no RSB predictions learned in the guest
241 * can be followed in the host, by overwriting the RSB completely. Both
242 @@ -199,16 +206,17 @@ enum spectre_v2_mitigation {
243 static inline void vmexit_fill_RSB(void)
244 {
245 #ifdef CONFIG_RETPOLINE
246 - unsigned long loops = RSB_CLEAR_LOOPS / 2;
247 + unsigned long loops;
248
249 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
250 ALTERNATIVE("jmp 910f",
251 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
252 X86_FEATURE_RETPOLINE)
253 "910:"
254 - : "=&r" (loops), ASM_CALL_CONSTRAINT
255 - : "r" (loops) : "memory" );
256 + : "=r" (loops), ASM_CALL_CONSTRAINT
257 + : : "memory" );
258 #endif
259 }
260 +
261 #endif /* __ASSEMBLY__ */
262 #endif /* __NOSPEC_BRANCH_H__ */
263 diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
264 index 01fd0a7f48cd..688315b7a922 100644
265 --- a/arch/x86/include/asm/traps.h
266 +++ b/arch/x86/include/asm/traps.h
267 @@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
268 #ifdef CONFIG_X86_32
269 dotraplinkage void do_iret_error(struct pt_regs *, long);
270 #endif
271 +dotraplinkage void do_mce(struct pt_regs *, long);
272
273 static inline int get_si_code(unsigned long condition)
274 {
275 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
276 index f3557a1eb562..b5229abd1629 100644
277 --- a/arch/x86/kernel/apic/vector.c
278 +++ b/arch/x86/kernel/apic/vector.c
279 @@ -361,14 +361,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
280 irq_data->chip_data = data;
281 irq_data->hwirq = virq + i;
282 err = assign_irq_vector_policy(virq + i, node, data, info);
283 - if (err)
284 + if (err) {
285 + irq_data->chip_data = NULL;
286 + free_apic_chip_data(data);
287 goto error;
288 + }
289 }
290
291 return 0;
292
293 error:
294 - x86_vector_free_irqs(domain, virq, i + 1);
295 + x86_vector_free_irqs(domain, virq, i);
296 return err;
297 }
298
299 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
300 index 49d25ddf0e9f..8cacf62ec458 100644
301 --- a/arch/x86/kernel/cpu/bugs.c
302 +++ b/arch/x86/kernel/cpu/bugs.c
303 @@ -22,6 +22,7 @@
304 #include <asm/alternative.h>
305 #include <asm/pgtable.h>
306 #include <asm/cacheflush.h>
307 +#include <asm/intel-family.h>
308
309 static void __init spectre_v2_select_mitigation(void);
310
311 @@ -154,6 +155,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
312 return SPECTRE_V2_CMD_NONE;
313 }
314
315 +/* Check for Skylake-like CPUs (for RSB handling) */
316 +static bool __init is_skylake_era(void)
317 +{
318 + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
319 + boot_cpu_data.x86 == 6) {
320 + switch (boot_cpu_data.x86_model) {
321 + case INTEL_FAM6_SKYLAKE_MOBILE:
322 + case INTEL_FAM6_SKYLAKE_DESKTOP:
323 + case INTEL_FAM6_SKYLAKE_X:
324 + case INTEL_FAM6_KABYLAKE_MOBILE:
325 + case INTEL_FAM6_KABYLAKE_DESKTOP:
326 + return true;
327 + }
328 + }
329 + return false;
330 +}
331 +
332 static void __init spectre_v2_select_mitigation(void)
333 {
334 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
335 @@ -212,6 +230,24 @@ static void __init spectre_v2_select_mitigation(void)
336
337 spectre_v2_enabled = mode;
338 pr_info("%s\n", spectre_v2_strings[mode]);
339 +
340 + /*
341 + * If neither SMEP or KPTI are available, there is a risk of
342 + * hitting userspace addresses in the RSB after a context switch
343 + * from a shallow call stack to a deeper one. To prevent this fill
344 + * the entire RSB, even when using IBRS.
345 + *
346 + * Skylake era CPUs have a separate issue with *underflow* of the
347 + * RSB, when they will predict 'ret' targets from the generic BTB.
348 + * The proper mitigation for this is IBRS. If IBRS is not supported
349 + * or deactivated in favour of retpolines the RSB fill on context
350 + * switch is required.
351 + */
352 + if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
353 + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
354 + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
355 + pr_info("Filling RSB on context switch\n");
356 + }
357 }
358
359 #undef pr_fmt
360 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
361 index 7b9ae04ddf5d..d198ae02f2b7 100644
362 --- a/arch/x86/kernel/cpu/common.c
363 +++ b/arch/x86/kernel/cpu/common.c
364 @@ -883,8 +883,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
365
366 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
367
368 - /* Assume for now that ALL x86 CPUs are insecure */
369 - setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
370 + if (c->x86_vendor != X86_VENDOR_AMD)
371 + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
372
373 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
374 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
375 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
376 index 8ca5f8ad008e..fe5cd6ea1f0e 100644
377 --- a/arch/x86/kernel/cpu/mcheck/mce.c
378 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
379 @@ -1754,6 +1754,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
380 void (*machine_check_vector)(struct pt_regs *, long error_code) =
381 unexpected_machine_check;
382
383 +dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
384 +{
385 + machine_check_vector(regs, error_code);
386 +}
387 +
388 /*
389 * Called for each booted CPU to set up machine checks.
390 * Must be called with preempt off:
391 diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
392 index 1db8dc490b66..b0dd9aec183d 100644
393 --- a/arch/x86/kernel/cpu/scattered.c
394 +++ b/arch/x86/kernel/cpu/scattered.c
395 @@ -31,7 +31,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
396 const struct cpuid_bit *cb;
397
398 static const struct cpuid_bit cpuid_bits[] = {
399 - { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
400 { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
401 { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
402 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
403 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
404 index 4d74f7386a61..dc20da1c78f0 100644
405 --- a/arch/x86/kernel/kprobes/opt.c
406 +++ b/arch/x86/kernel/kprobes/opt.c
407 @@ -37,6 +37,7 @@
408 #include <asm/alternative.h>
409 #include <asm/insn.h>
410 #include <asm/debugreg.h>
411 +#include <asm/nospec-branch.h>
412
413 #include "common.h"
414
415 @@ -192,7 +193,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
416 }
417
418 /* Check whether insn is indirect jump */
419 -static int insn_is_indirect_jump(struct insn *insn)
420 +static int __insn_is_indirect_jump(struct insn *insn)
421 {
422 return ((insn->opcode.bytes[0] == 0xff &&
423 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
424 @@ -226,6 +227,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
425 return (start <= target && target <= start + len);
426 }
427
428 +static int insn_is_indirect_jump(struct insn *insn)
429 +{
430 + int ret = __insn_is_indirect_jump(insn);
431 +
432 +#ifdef CONFIG_RETPOLINE
433 + /*
434 + * Jump to x86_indirect_thunk_* is treated as an indirect jump.
435 + * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
436 + * older gcc may use indirect jump. So we add this check instead of
437 + * replace indirect-jump check.
438 + */
439 + if (!ret)
440 + ret = insn_jump_into_range(insn,
441 + (unsigned long)__indirect_thunk_start,
442 + (unsigned long)__indirect_thunk_end -
443 + (unsigned long)__indirect_thunk_start);
444 +#endif
445 + return ret;
446 +}
447 +
448 /* Decode whole function to ensure any instructions don't jump into target */
449 static int can_optimize(unsigned long paddr)
450 {
451 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
452 index 44bf5cf417d3..d07a9390023e 100644
453 --- a/arch/x86/kernel/tsc.c
454 +++ b/arch/x86/kernel/tsc.c
455 @@ -693,7 +693,6 @@ unsigned long native_calibrate_tsc(void)
456 case INTEL_FAM6_KABYLAKE_DESKTOP:
457 crystal_khz = 24000; /* 24.0 MHz */
458 break;
459 - case INTEL_FAM6_SKYLAKE_X:
460 case INTEL_FAM6_ATOM_DENVERTON:
461 crystal_khz = 25000; /* 25.0 MHz */
462 break;
463 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
464 index dbf67f64d5ec..c7194e97c3d4 100644
465 --- a/arch/x86/kernel/vmlinux.lds.S
466 +++ b/arch/x86/kernel/vmlinux.lds.S
467 @@ -105,6 +105,13 @@ SECTIONS
468 SOFTIRQENTRY_TEXT
469 *(.fixup)
470 *(.gnu.warning)
471 +
472 +#ifdef CONFIG_RETPOLINE
473 + __indirect_thunk_start = .;
474 + *(.text.__x86.indirect_thunk)
475 + __indirect_thunk_end = .;
476 +#endif
477 +
478 /* End of text section */
479 _etext = .;
480 } :text = 0x9090
481 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
482 index cb45c6cb465f..dfb2ba91b670 100644
483 --- a/arch/x86/lib/retpoline.S
484 +++ b/arch/x86/lib/retpoline.S
485 @@ -9,7 +9,7 @@
486 #include <asm/nospec-branch.h>
487
488 .macro THUNK reg
489 - .section .text.__x86.indirect_thunk.\reg
490 + .section .text.__x86.indirect_thunk
491
492 ENTRY(__x86_indirect_thunk_\reg)
493 CFI_STARTPROC
494 @@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
495 * than one per register with the correct names. So we do it
496 * the simple and nasty way...
497 */
498 -#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
499 +#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
500 +#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
501 #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
502
503 GENERATE_THUNK(_ASM_AX)
504 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
505 index 8b5ff88aa4f8..74dea7f14c20 100644
506 --- a/arch/x86/mm/fault.c
507 +++ b/arch/x86/mm/fault.c
508 @@ -191,14 +191,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
509 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
510 * faulted on a pte with its pkey=4.
511 */
512 -static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
513 +static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
514 + u32 *pkey)
515 {
516 /* This is effectively an #ifdef */
517 if (!boot_cpu_has(X86_FEATURE_OSPKE))
518 return;
519
520 /* Fault not from Protection Keys: nothing to do */
521 - if (si_code != SEGV_PKUERR)
522 + if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
523 return;
524 /*
525 * force_sig_info_fault() is called from a number of
526 @@ -237,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
527 lsb = PAGE_SHIFT;
528 info.si_addr_lsb = lsb;
529
530 - fill_sig_info_pkey(si_code, &info, pkey);
531 + fill_sig_info_pkey(si_signo, si_code, &info, pkey);
532
533 force_sig_info(si_signo, &info, tsk);
534 }
535 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
536 index 33e363dcc63b..aee39524375c 100644
537 --- a/drivers/ata/libata-core.c
538 +++ b/drivers/ata/libata-core.c
539 @@ -4322,6 +4322,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
540 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
541 */
542 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
543 + { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
544
545 /* Devices we expect to fail diagnostics */
546
547 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
548 index 39d28375aa37..0983470929bd 100644
549 --- a/drivers/infiniband/ulp/isert/ib_isert.c
550 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
551 @@ -747,6 +747,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
552 {
553 struct isert_conn *isert_conn = cma_id->qp->qp_context;
554
555 + ib_drain_qp(isert_conn->qp);
556 list_del_init(&isert_conn->node);
557 isert_conn->cm_id = NULL;
558 isert_put_conn(isert_conn);
559 diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
560 index caa5a62c42fb..15929d862459 100644
561 --- a/drivers/input/misc/twl4030-vibra.c
562 +++ b/drivers/input/misc/twl4030-vibra.c
563 @@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
564 twl4030_vibra_suspend, twl4030_vibra_resume);
565
566 static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
567 - struct device_node *node)
568 + struct device_node *parent)
569 {
570 + struct device_node *node;
571 +
572 if (pdata && pdata->coexist)
573 return true;
574
575 - node = of_find_node_by_name(node, "codec");
576 + node = of_get_child_by_name(parent, "codec");
577 if (node) {
578 of_node_put(node);
579 return true;
580 diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
581 index 5690eb7ff954..15e0d352c4cc 100644
582 --- a/drivers/input/misc/twl6040-vibra.c
583 +++ b/drivers/input/misc/twl6040-vibra.c
584 @@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
585 int vddvibr_uV = 0;
586 int error;
587
588 - of_node_get(twl6040_core_dev->of_node);
589 - twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
590 + twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
591 "vibra");
592 if (!twl6040_core_node) {
593 dev_err(&pdev->dev, "parent of node is missing?\n");
594 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
595 index f26807c75be4..af83d2e34913 100644
596 --- a/drivers/input/mouse/alps.c
597 +++ b/drivers/input/mouse/alps.c
598 @@ -1247,29 +1247,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
599 case SS4_PACKET_ID_MULTI:
600 if (priv->flags & ALPS_BUTTONPAD) {
601 if (IS_SS4PLUS_DEV(priv->dev_id)) {
602 - f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
603 - f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
604 + f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
605 + f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
606 + no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
607 } else {
608 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
609 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
610 + no_data_x = SS4_MFPACKET_NO_AX_BL;
611 }
612 + no_data_y = SS4_MFPACKET_NO_AY_BL;
613
614 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
615 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
616 - no_data_x = SS4_MFPACKET_NO_AX_BL;
617 - no_data_y = SS4_MFPACKET_NO_AY_BL;
618 } else {
619 if (IS_SS4PLUS_DEV(priv->dev_id)) {
620 - f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
621 - f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
622 + f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
623 + f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
624 + no_data_x = SS4_PLUS_MFPACKET_NO_AX;
625 } else {
626 - f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
627 - f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
628 + f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
629 + f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
630 + no_data_x = SS4_MFPACKET_NO_AX;
631 }
632 + no_data_y = SS4_MFPACKET_NO_AY;
633 +
634 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
635 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
636 - no_data_x = SS4_MFPACKET_NO_AX;
637 - no_data_y = SS4_MFPACKET_NO_AY;
638 }
639
640 f->first_mp = 0;
641 diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
642 index 793123717145..9bc2babd9256 100644
643 --- a/drivers/input/mouse/alps.h
644 +++ b/drivers/input/mouse/alps.h
645 @@ -120,10 +120,12 @@ enum SS4_PACKET_ID {
646 #define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
647
648
649 -#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
650 -#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
651 -#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
652 -#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
653 +#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
654 +#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
655 +#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
656 +#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
657 +#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
658 +#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
659
660 /*
661 * enum V7_PACKET_ID - defines the packet type for V7
662 diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
663 index 251ff2aa0633..7a0dbce4dae9 100644
664 --- a/drivers/input/touchscreen/88pm860x-ts.c
665 +++ b/drivers/input/touchscreen/88pm860x-ts.c
666 @@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
667 int data, n, ret;
668 if (!np)
669 return -ENODEV;
670 - np = of_find_node_by_name(np, "touch");
671 + np = of_get_child_by_name(np, "touch");
672 if (!np) {
673 dev_err(&pdev->dev, "Can't find touch node\n");
674 return -EINVAL;
675 @@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
676 if (data) {
677 ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
678 if (ret < 0)
679 - return -EINVAL;
680 + goto err_put_node;
681 }
682 /* set tsi prebias time */
683 if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
684 ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
685 if (ret < 0)
686 - return -EINVAL;
687 + goto err_put_node;
688 }
689 /* set prebias & prechg time of pen detect */
690 data = 0;
691 @@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
692 if (data) {
693 ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
694 if (ret < 0)
695 - return -EINVAL;
696 + goto err_put_node;
697 }
698 of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
699 +
700 + of_node_put(np);
701 +
702 return 0;
703 +
704 +err_put_node:
705 + of_node_put(np);
706 +
707 + return -EINVAL;
708 }
709 #else
710 #define pm860x_touch_dt_init(x, y, z) (-1)
711 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
712 index 4477bf930cf4..e976f4f39334 100644
713 --- a/drivers/md/dm-thin-metadata.c
714 +++ b/drivers/md/dm-thin-metadata.c
715 @@ -81,10 +81,14 @@
716 #define SECTOR_TO_BLOCK_SHIFT 3
717
718 /*
719 + * For btree insert:
720 * 3 for btree insert +
721 * 2 for btree lookup used within space map
722 + * For btree remove:
723 + * 2 for shadow spine +
724 + * 4 for rebalance 3 child node
725 */
726 -#define THIN_MAX_CONCURRENT_LOCKS 5
727 +#define THIN_MAX_CONCURRENT_LOCKS 6
728
729 /* This should be plenty */
730 #define SPACE_MAP_ROOT_SIZE 128
731 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
732 index 7a75b5010f73..e4ececd3df00 100644
733 --- a/drivers/md/persistent-data/dm-btree.c
734 +++ b/drivers/md/persistent-data/dm-btree.c
735 @@ -678,23 +678,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
736 pn->keys[1] = rn->keys[0];
737 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
738
739 - /*
740 - * rejig the spine. This is ugly, since it knows too
741 - * much about the spine
742 - */
743 - if (s->nodes[0] != new_parent) {
744 - unlock_block(s->info, s->nodes[0]);
745 - s->nodes[0] = new_parent;
746 - }
747 - if (key < le64_to_cpu(rn->keys[0])) {
748 - unlock_block(s->info, right);
749 - s->nodes[1] = left;
750 - } else {
751 - unlock_block(s->info, left);
752 - s->nodes[1] = right;
753 - }
754 - s->count = 2;
755 -
756 + unlock_block(s->info, left);
757 + unlock_block(s->info, right);
758 return 0;
759 }
760
761 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
762 index 304732550f0a..7f5ec40e2b4d 100644
763 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
764 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
765 @@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
766 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
767 int err = 0;
768 u8 *packet_ptr;
769 - int i, n = 1, packet_len;
770 + int packet_len;
771 ptrdiff_t cmd_len;
772
773 /* usb device unregistered? */
774 @@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
775 }
776
777 packet_ptr = cmd_head;
778 + packet_len = cmd_len;
779
780 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
781 - if ((dev->udev->speed != USB_SPEED_HIGH) &&
782 - (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
783 - packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
784 - n += cmd_len / packet_len;
785 - } else {
786 - packet_len = cmd_len;
787 - }
788 + if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
789 + packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
790
791 - for (i = 0; i < n; i++) {
792 + do {
793 err = usb_bulk_msg(dev->udev,
794 usb_sndbulkpipe(dev->udev,
795 PCAN_USBPRO_EP_CMDOUT),
796 @@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
797 }
798
799 packet_ptr += packet_len;
800 - }
801 + cmd_len -= packet_len;
802 +
803 + if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
804 + packet_len = cmd_len;
805 +
806 + } while (packet_len > 0);
807
808 return err;
809 }
810 diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
811 index 94733f73d37f..7121453ec047 100644
812 --- a/drivers/nvdimm/btt.c
813 +++ b/drivers/nvdimm/btt.c
814 @@ -183,13 +183,13 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
815 return ret;
816 }
817
818 -static int btt_log_read_pair(struct arena_info *arena, u32 lane,
819 - struct log_entry *ent)
820 +static int btt_log_group_read(struct arena_info *arena, u32 lane,
821 + struct log_group *log)
822 {
823 - WARN_ON(!ent);
824 + WARN_ON(!log);
825 return arena_read_bytes(arena,
826 - arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
827 - 2 * LOG_ENT_SIZE);
828 + arena->logoff + (lane * LOG_GRP_SIZE), log,
829 + LOG_GRP_SIZE);
830 }
831
832 static struct dentry *debugfs_root;
833 @@ -229,6 +229,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
834 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
835 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
836 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
837 + debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
838 + debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
839 }
840
841 static void btt_debugfs_init(struct btt *btt)
842 @@ -247,6 +249,11 @@ static void btt_debugfs_init(struct btt *btt)
843 }
844 }
845
846 +static u32 log_seq(struct log_group *log, int log_idx)
847 +{
848 + return le32_to_cpu(log->ent[log_idx].seq);
849 +}
850 +
851 /*
852 * This function accepts two log entries, and uses the
853 * sequence number to find the 'older' entry.
854 @@ -256,8 +263,10 @@ static void btt_debugfs_init(struct btt *btt)
855 *
856 * TODO The logic feels a bit kludge-y. make it better..
857 */
858 -static int btt_log_get_old(struct log_entry *ent)
859 +static int btt_log_get_old(struct arena_info *a, struct log_group *log)
860 {
861 + int idx0 = a->log_index[0];
862 + int idx1 = a->log_index[1];
863 int old;
864
865 /*
866 @@ -265,23 +274,23 @@ static int btt_log_get_old(struct log_entry *ent)
867 * the next time, the following logic works out to put this
868 * (next) entry into [1]
869 */
870 - if (ent[0].seq == 0) {
871 - ent[0].seq = cpu_to_le32(1);
872 + if (log_seq(log, idx0) == 0) {
873 + log->ent[idx0].seq = cpu_to_le32(1);
874 return 0;
875 }
876
877 - if (ent[0].seq == ent[1].seq)
878 + if (log_seq(log, idx0) == log_seq(log, idx1))
879 return -EINVAL;
880 - if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
881 + if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
882 return -EINVAL;
883
884 - if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
885 - if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
886 + if (log_seq(log, idx0) < log_seq(log, idx1)) {
887 + if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
888 old = 0;
889 else
890 old = 1;
891 } else {
892 - if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
893 + if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
894 old = 1;
895 else
896 old = 0;
897 @@ -306,17 +315,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
898 {
899 int ret;
900 int old_ent, ret_ent;
901 - struct log_entry log[2];
902 + struct log_group log;
903
904 - ret = btt_log_read_pair(arena, lane, log);
905 + ret = btt_log_group_read(arena, lane, &log);
906 if (ret)
907 return -EIO;
908
909 - old_ent = btt_log_get_old(log);
910 + old_ent = btt_log_get_old(arena, &log);
911 if (old_ent < 0 || old_ent > 1) {
912 dev_info(to_dev(arena),
913 "log corruption (%d): lane %d seq [%d, %d]\n",
914 - old_ent, lane, log[0].seq, log[1].seq);
915 + old_ent, lane, log.ent[arena->log_index[0]].seq,
916 + log.ent[arena->log_index[1]].seq);
917 /* TODO set error state? */
918 return -EIO;
919 }
920 @@ -324,7 +334,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
921 ret_ent = (old_flag ? old_ent : (1 - old_ent));
922
923 if (ent != NULL)
924 - memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
925 + memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
926
927 return ret_ent;
928 }
929 @@ -338,17 +348,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
930 u32 sub, struct log_entry *ent)
931 {
932 int ret;
933 - /*
934 - * Ignore the padding in log_entry for calculating log_half.
935 - * The entry is 'committed' when we write the sequence number,
936 - * and we want to ensure that that is the last thing written.
937 - * We don't bother writing the padding as that would be extra
938 - * media wear and write amplification
939 - */
940 - unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
941 - u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
942 + u32 group_slot = arena->log_index[sub];
943 + unsigned int log_half = LOG_ENT_SIZE / 2;
944 void *src = ent;
945 + u64 ns_off;
946
947 + ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
948 + (group_slot * LOG_ENT_SIZE);
949 /* split the 16B write into atomic, durable halves */
950 ret = arena_write_bytes(arena, ns_off, src, log_half);
951 if (ret)
952 @@ -419,16 +425,16 @@ static int btt_log_init(struct arena_info *arena)
953 {
954 int ret;
955 u32 i;
956 - struct log_entry log, zerolog;
957 + struct log_entry ent, zerolog;
958
959 memset(&zerolog, 0, sizeof(zerolog));
960
961 for (i = 0; i < arena->nfree; i++) {
962 - log.lba = cpu_to_le32(i);
963 - log.old_map = cpu_to_le32(arena->external_nlba + i);
964 - log.new_map = cpu_to_le32(arena->external_nlba + i);
965 - log.seq = cpu_to_le32(LOG_SEQ_INIT);
966 - ret = __btt_log_write(arena, i, 0, &log);
967 + ent.lba = cpu_to_le32(i);
968 + ent.old_map = cpu_to_le32(arena->external_nlba + i);
969 + ent.new_map = cpu_to_le32(arena->external_nlba + i);
970 + ent.seq = cpu_to_le32(LOG_SEQ_INIT);
971 + ret = __btt_log_write(arena, i, 0, &ent);
972 if (ret)
973 return ret;
974 ret = __btt_log_write(arena, i, 1, &zerolog);
975 @@ -490,6 +496,123 @@ static int btt_freelist_init(struct arena_info *arena)
976 return 0;
977 }
978
979 +static bool ent_is_padding(struct log_entry *ent)
980 +{
981 + return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
982 + && (ent->seq == 0);
983 +}
984 +
985 +/*
986 + * Detecting valid log indices: We read a log group (see the comments in btt.h
987 + * for a description of a 'log_group' and its 'slots'), and iterate over its
988 + * four slots. We expect that a padding slot will be all-zeroes, and use this
989 + * to detect a padding slot vs. an actual entry.
990 + *
991 + * If a log_group is in the initial state, i.e. hasn't been used since the
992 + * creation of this BTT layout, it will have three of the four slots with
993 + * zeroes. We skip over these log_groups for the detection of log_index. If
994 + * all log_groups are in the initial state (i.e. the BTT has never been
995 + * written to), it is safe to assume the 'new format' of log entries in slots
996 + * (0, 1).
997 + */
998 +static int log_set_indices(struct arena_info *arena)
999 +{
1000 + bool idx_set = false, initial_state = true;
1001 + int ret, log_index[2] = {-1, -1};
1002 + u32 i, j, next_idx = 0;
1003 + struct log_group log;
1004 + u32 pad_count = 0;
1005 +
1006 + for (i = 0; i < arena->nfree; i++) {
1007 + ret = btt_log_group_read(arena, i, &log);
1008 + if (ret < 0)
1009 + return ret;
1010 +
1011 + for (j = 0; j < 4; j++) {
1012 + if (!idx_set) {
1013 + if (ent_is_padding(&log.ent[j])) {
1014 + pad_count++;
1015 + continue;
1016 + } else {
1017 + /* Skip if index has been recorded */
1018 + if ((next_idx == 1) &&
1019 + (j == log_index[0]))
1020 + continue;
1021 + /* valid entry, record index */
1022 + log_index[next_idx] = j;
1023 + next_idx++;
1024 + }
1025 + if (next_idx == 2) {
1026 + /* two valid entries found */
1027 + idx_set = true;
1028 + } else if (next_idx > 2) {
1029 + /* too many valid indices */
1030 + return -ENXIO;
1031 + }
1032 + } else {
1033 + /*
1034 + * once the indices have been set, just verify
1035 + * that all subsequent log groups are either in
1036 + * their initial state or follow the same
1037 + * indices.
1038 + */
1039 + if (j == log_index[0]) {
1040 + /* entry must be 'valid' */
1041 + if (ent_is_padding(&log.ent[j]))
1042 + return -ENXIO;
1043 + } else if (j == log_index[1]) {
1044 + ;
1045 + /*
1046 + * log_index[1] can be padding if the
1047 + * lane never got used and it is still
1048 + * in the initial state (three 'padding'
1049 + * entries)
1050 + */
1051 + } else {
1052 + /* entry must be invalid (padding) */
1053 + if (!ent_is_padding(&log.ent[j]))
1054 + return -ENXIO;
1055 + }
1056 + }
1057 + }
1058 + /*
1059 + * If any of the log_groups have more than one valid,
1060 + * non-padding entry, then the we are no longer in the
1061 + * initial_state
1062 + */
1063 + if (pad_count < 3)
1064 + initial_state = false;
1065 + pad_count = 0;
1066 + }
1067 +
1068 + if (!initial_state && !idx_set)
1069 + return -ENXIO;
1070 +
1071 + /*
1072 + * If all the entries in the log were in the initial state,
1073 + * assume new padding scheme
1074 + */
1075 + if (initial_state)
1076 + log_index[1] = 1;
1077 +
1078 + /*
1079 + * Only allow the known permutations of log/padding indices,
1080 + * i.e. (0, 1), and (0, 2)
1081 + */
1082 + if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
1083 + ; /* known index possibilities */
1084 + else {
1085 + dev_err(to_dev(arena), "Found an unknown padding scheme\n");
1086 + return -ENXIO;
1087 + }
1088 +
1089 + arena->log_index[0] = log_index[0];
1090 + arena->log_index[1] = log_index[1];
1091 + dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
1092 + dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
1093 + return 0;
1094 +}
1095 +
1096 static int btt_rtt_init(struct arena_info *arena)
1097 {
1098 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
1099 @@ -545,8 +668,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
1100 available -= 2 * BTT_PG_SIZE;
1101
1102 /* The log takes a fixed amount of space based on nfree */
1103 - logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
1104 - BTT_PG_SIZE);
1105 + logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
1106 available -= logsize;
1107
1108 /* Calculate optimal split between map and data area */
1109 @@ -563,6 +685,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
1110 arena->mapoff = arena->dataoff + datasize;
1111 arena->logoff = arena->mapoff + mapsize;
1112 arena->info2off = arena->logoff + logsize;
1113 +
1114 + /* Default log indices are (0,1) */
1115 + arena->log_index[0] = 0;
1116 + arena->log_index[1] = 1;
1117 return arena;
1118 }
1119
1120 @@ -653,6 +779,13 @@ static int discover_arenas(struct btt *btt)
1121 arena->external_lba_start = cur_nlba;
1122 parse_arena_meta(arena, super, cur_off);
1123
1124 + ret = log_set_indices(arena);
1125 + if (ret) {
1126 + dev_err(to_dev(arena),
1127 + "Unable to deduce log/padding indices\n");
1128 + goto out;
1129 + }
1130 +
1131 ret = btt_freelist_init(arena);
1132 if (ret)
1133 goto out;
1134 diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
1135 index b2f8651e5395..0f80b6b3d4a3 100644
1136 --- a/drivers/nvdimm/btt.h
1137 +++ b/drivers/nvdimm/btt.h
1138 @@ -26,6 +26,7 @@
1139 #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
1140 #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
1141 #define MAP_ENT_NORMAL 0xC0000000
1142 +#define LOG_GRP_SIZE sizeof(struct log_group)
1143 #define LOG_ENT_SIZE sizeof(struct log_entry)
1144 #define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
1145 #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
1146 @@ -44,12 +45,52 @@ enum btt_init_state {
1147 INIT_READY
1148 };
1149
1150 +/*
1151 + * A log group represents one log 'lane', and consists of four log entries.
1152 + * Two of the four entries are valid entries, and the remaining two are
1153 + * padding. Due to an old bug in the padding location, we need to perform a
1154 + * test to determine the padding scheme being used, and use that scheme
1155 + * thereafter.
1156 + *
1157 + * In kernels prior to 4.15, 'log group' would have actual log entries at
1158 + * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
1159 + * format has log entries at indices (0, 1) and padding at indices (2, 3).
1160 + *
1161 + * Old (pre 4.15) format:
1162 + * +-----------------+-----------------+
1163 + * | ent[0] | ent[1] |
1164 + * | 16B | 16B |
1165 + * | lba/old/new/seq | pad |
1166 + * +-----------------------------------+
1167 + * | ent[2] | ent[3] |
1168 + * | 16B | 16B |
1169 + * | lba/old/new/seq | pad |
1170 + * +-----------------+-----------------+
1171 + *
1172 + * New format:
1173 + * +-----------------+-----------------+
1174 + * | ent[0] | ent[1] |
1175 + * | 16B | 16B |
1176 + * | lba/old/new/seq | lba/old/new/seq |
1177 + * +-----------------------------------+
1178 + * | ent[2] | ent[3] |
1179 + * | 16B | 16B |
1180 + * | pad | pad |
1181 + * +-----------------+-----------------+
1182 + *
1183 + * We detect during start-up which format is in use, and set
1184 + * arena->log_index[(0, 1)] with the detected format.
1185 + */
1186 +
1187 struct log_entry {
1188 __le32 lba;
1189 __le32 old_map;
1190 __le32 new_map;
1191 __le32 seq;
1192 - __le64 padding[2];
1193 +};
1194 +
1195 +struct log_group {
1196 + struct log_entry ent[4];
1197 };
1198
1199 struct btt_sb {
1200 @@ -117,6 +158,7 @@ struct aligned_lock {
1201 * @list: List head for list of arenas
1202 * @debugfs_dir: Debugfs dentry
1203 * @flags: Arena flags - may signify error states.
1204 + * @log_index: Indices of the valid log entries in a log_group
1205 *
1206 * arena_info is a per-arena handle. Once an arena is narrowed down for an
1207 * IO, this struct is passed around for the duration of the IO.
1208 @@ -147,6 +189,7 @@ struct arena_info {
1209 struct dentry *debugfs_dir;
1210 /* Arena flags */
1211 u32 flags;
1212 + int log_index[2];
1213 };
1214
1215 /**
1216 diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
1217 index a268f4d6f3e9..48a365e303e5 100644
1218 --- a/drivers/phy/phy-core.c
1219 +++ b/drivers/phy/phy-core.c
1220 @@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
1221 if (ret)
1222 return ERR_PTR(-ENODEV);
1223
1224 + /* This phy type handled by the usb-phy subsystem for now */
1225 + if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
1226 + return ERR_PTR(-ENODEV);
1227 +
1228 mutex_lock(&phy_provider_mutex);
1229 phy_provider = of_phy_provider_lookup(args.np);
1230 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
1231 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1232 index 99623701fc3d..0b8db8a74d50 100644
1233 --- a/drivers/scsi/hpsa.c
1234 +++ b/drivers/scsi/hpsa.c
1235 @@ -3857,6 +3857,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1236 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
1237 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
1238 volume_offline = hpsa_volume_offline(h, scsi3addr);
1239 + this_device->volume_offline = volume_offline;
1240 if (volume_offline == HPSA_LV_FAILED) {
1241 rc = HPSA_LV_FAILED;
1242 dev_err(&h->pdev->dev,
1243 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
1244 index 184c7db1e0ca..cd9537ddc19f 100644
1245 --- a/drivers/scsi/sg.c
1246 +++ b/drivers/scsi/sg.c
1247 @@ -149,7 +149,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
1248 struct list_head rq_list; /* head of request list */
1249 struct fasync_struct *async_qp; /* used by asynchronous notification */
1250 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
1251 - char low_dma; /* as in parent but possibly overridden to 1 */
1252 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
1253 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
1254 unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
1255 @@ -922,24 +921,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1256 /* strange ..., for backward compatibility */
1257 return sfp->timeout_user;
1258 case SG_SET_FORCE_LOW_DMA:
1259 - result = get_user(val, ip);
1260 - if (result)
1261 - return result;
1262 - if (val) {
1263 - sfp->low_dma = 1;
1264 - if ((0 == sfp->low_dma) && !sfp->res_in_use) {
1265 - val = (int) sfp->reserve.bufflen;
1266 - sg_remove_scat(sfp, &sfp->reserve);
1267 - sg_build_reserve(sfp, val);
1268 - }
1269 - } else {
1270 - if (atomic_read(&sdp->detaching))
1271 - return -ENODEV;
1272 - sfp->low_dma = sdp->device->host->unchecked_isa_dma;
1273 - }
1274 + /*
1275 + * N.B. This ioctl never worked properly, but failed to
1276 + * return an error value. So returning '0' to keep compability
1277 + * with legacy applications.
1278 + */
1279 return 0;
1280 case SG_GET_LOW_DMA:
1281 - return put_user((int) sfp->low_dma, ip);
1282 + return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
1283 case SG_GET_SCSI_ID:
1284 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
1285 return -EFAULT;
1286 @@ -1860,6 +1849,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1287 int sg_tablesize = sfp->parentdp->sg_tablesize;
1288 int blk_size = buff_size, order;
1289 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1290 + struct sg_device *sdp = sfp->parentdp;
1291
1292 if (blk_size < 0)
1293 return -EFAULT;
1294 @@ -1885,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1295 scatter_elem_sz_prev = num;
1296 }
1297
1298 - if (sfp->low_dma)
1299 + if (sdp->device->host->unchecked_isa_dma)
1300 gfp_mask |= GFP_DMA;
1301
1302 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1303 @@ -2148,8 +2138,6 @@ sg_add_sfp(Sg_device * sdp)
1304 sfp->timeout = SG_DEFAULT_TIMEOUT;
1305 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
1306 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
1307 - sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
1308 - sdp->device->host->unchecked_isa_dma : 1;
1309 sfp->cmd_q = SG_DEF_COMMAND_Q;
1310 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
1311 sfp->parentdp = sdp;
1312 @@ -2608,7 +2596,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
1313 jiffies_to_msecs(fp->timeout),
1314 fp->reserve.bufflen,
1315 (int) fp->reserve.k_use_sg,
1316 - (int) fp->low_dma);
1317 + (int) sdp->device->host->unchecked_isa_dma);
1318 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
1319 (int) fp->cmd_q, (int) fp->force_packid,
1320 (int) fp->keep_orphan);
1321 diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
1322 index c404017c1b5a..b96e5b189269 100644
1323 --- a/drivers/usb/usbip/vhci_sysfs.c
1324 +++ b/drivers/usb/usbip/vhci_sysfs.c
1325 @@ -361,6 +361,7 @@ static void set_status_attr(int id)
1326 status->attr.attr.name = status->name;
1327 status->attr.attr.mode = S_IRUGO;
1328 status->attr.show = status_show;
1329 + sysfs_attr_init(&status->attr.attr);
1330 }
1331
1332 static int init_status_attrs(void)
1333 diff --git a/fs/pipe.c b/fs/pipe.c
1334 index 8e0d9f26dfad..9faecf1b4a27 100644
1335 --- a/fs/pipe.c
1336 +++ b/fs/pipe.c
1337 @@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = {
1338
1339 /*
1340 * Currently we rely on the pipe array holding a power-of-2 number
1341 - * of pages.
1342 + * of pages. Returns 0 on error.
1343 */
1344 static inline unsigned int round_pipe_size(unsigned int size)
1345 {
1346 unsigned long nr_pages;
1347
1348 + if (size < pipe_min_size)
1349 + size = pipe_min_size;
1350 +
1351 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1352 + if (nr_pages == 0)
1353 + return 0;
1354 +
1355 return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
1356 }
1357
1358 @@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1359 long ret = 0;
1360
1361 size = round_pipe_size(arg);
1362 + if (size == 0)
1363 + return -EINVAL;
1364 nr_pages = size >> PAGE_SHIFT;
1365
1366 if (!nr_pages)
1367 @@ -1123,13 +1131,18 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1368 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1369 size_t *lenp, loff_t *ppos)
1370 {
1371 + unsigned int rounded_pipe_max_size;
1372 int ret;
1373
1374 ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
1375 if (ret < 0 || !write)
1376 return ret;
1377
1378 - pipe_max_size = round_pipe_size(pipe_max_size);
1379 + rounded_pipe_max_size = round_pipe_size(pipe_max_size);
1380 + if (rounded_pipe_max_size == 0)
1381 + return -EINVAL;
1382 +
1383 + pipe_max_size = rounded_pipe_max_size;
1384 return ret;
1385 }
1386
1387 diff --git a/fs/proc/array.c b/fs/proc/array.c
1388 index c932ec454625..794b52a6c20d 100644
1389 --- a/fs/proc/array.c
1390 +++ b/fs/proc/array.c
1391 @@ -423,8 +423,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1392 * safe because the task has stopped executing permanently.
1393 */
1394 if (permitted && (task->flags & PF_DUMPCORE)) {
1395 - eip = KSTK_EIP(task);
1396 - esp = KSTK_ESP(task);
1397 + if (try_get_task_stack(task)) {
1398 + eip = KSTK_EIP(task);
1399 + esp = KSTK_ESP(task);
1400 + put_task_stack(task);
1401 + }
1402 }
1403 }
1404
1405 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
1406 index 6f8fbcf10dfb..a3d04934aa96 100644
1407 --- a/include/linux/vermagic.h
1408 +++ b/include/linux/vermagic.h
1409 @@ -24,10 +24,16 @@
1410 #ifndef MODULE_ARCH_VERMAGIC
1411 #define MODULE_ARCH_VERMAGIC ""
1412 #endif
1413 +#ifdef RETPOLINE
1414 +#define MODULE_VERMAGIC_RETPOLINE "retpoline "
1415 +#else
1416 +#define MODULE_VERMAGIC_RETPOLINE ""
1417 +#endif
1418
1419 #define VERMAGIC_STRING \
1420 UTS_RELEASE " " \
1421 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
1422 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
1423 - MODULE_ARCH_VERMAGIC
1424 + MODULE_ARCH_VERMAGIC \
1425 + MODULE_VERMAGIC_RETPOLINE
1426
1427 diff --git a/include/scsi/sg.h b/include/scsi/sg.h
1428 index 3afec7032448..20bc71c3e0b8 100644
1429 --- a/include/scsi/sg.h
1430 +++ b/include/scsi/sg.h
1431 @@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
1432 #define SG_DEFAULT_RETRIES 0
1433
1434 /* Defaults, commented if they differ from original sg driver */
1435 -#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
1436 #define SG_DEF_FORCE_PACK_ID 0
1437 #define SG_DEF_KEEP_ORPHAN 0
1438 #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
1439 diff --git a/kernel/futex.c b/kernel/futex.c
1440 index 88bad86180ac..bb2265ae5cbc 100644
1441 --- a/kernel/futex.c
1442 +++ b/kernel/futex.c
1443 @@ -1711,6 +1711,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1444 struct futex_q *this, *next;
1445 WAKE_Q(wake_q);
1446
1447 + if (nr_wake < 0 || nr_requeue < 0)
1448 + return -EINVAL;
1449 +
1450 if (requeue_pi) {
1451 /*
1452 * Requeue PI only works on two distinct uaddrs. This
1453 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
1454 index df5c32a0c6ed..3042881169b4 100644
1455 --- a/kernel/sched/deadline.c
1456 +++ b/kernel/sched/deadline.c
1457 @@ -723,6 +723,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1458 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1459 return;
1460 dl_se->dl_throttled = 1;
1461 + if (dl_se->runtime > 0)
1462 + dl_se->runtime = 0;
1463 }
1464 }
1465
1466 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1467 index e872f7f05e8a..2d5cc7dfee14 100644
1468 --- a/kernel/time/timer.c
1469 +++ b/kernel/time/timer.c
1470 @@ -1696,7 +1696,7 @@ void run_local_timers(void)
1471 hrtimer_run_queues();
1472 /* Raise the softirq only if required. */
1473 if (time_before(jiffies, base->clk)) {
1474 - if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
1475 + if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1476 return;
1477 /* CPU is awake, so check the deferrable base. */
1478 base++;
1479 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1480 index 03c0a48c3ac4..9549ed120556 100644
1481 --- a/kernel/trace/trace_events.c
1482 +++ b/kernel/trace/trace_events.c
1483 @@ -2200,6 +2200,7 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
1484 {
1485 struct trace_event_call *call, *p;
1486 const char *last_system = NULL;
1487 + bool first = false;
1488 int last_i;
1489 int i;
1490
1491 @@ -2207,15 +2208,28 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
1492 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1493 /* events are usually grouped together with systems */
1494 if (!last_system || call->class->system != last_system) {
1495 + first = true;
1496 last_i = 0;
1497 last_system = call->class->system;
1498 }
1499
1500 + /*
1501 + * Since calls are grouped by systems, the likelyhood that the
1502 + * next call in the iteration belongs to the same system as the
1503 + * previous call is high. As an optimization, we skip seaching
1504 + * for a map[] that matches the call's system if the last call
1505 + * was from the same system. That's what last_i is for. If the
1506 + * call has the same system as the previous call, then last_i
1507 + * will be the index of the first map[] that has a matching
1508 + * system.
1509 + */
1510 for (i = last_i; i < len; i++) {
1511 if (call->class->system == map[i]->system) {
1512 /* Save the first system if need be */
1513 - if (!last_i)
1514 + if (first) {
1515 last_i = i;
1516 + first = false;
1517 + }
1518 update_event_printk(call, map[i]);
1519 }
1520 }
1521 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1522 index 181c2ad0cb54..ebfea5f94b66 100644
1523 --- a/kernel/workqueue.c
1524 +++ b/kernel/workqueue.c
1525 @@ -48,6 +48,7 @@
1526 #include <linux/nodemask.h>
1527 #include <linux/moduleparam.h>
1528 #include <linux/uaccess.h>
1529 +#include <linux/nmi.h>
1530
1531 #include "workqueue_internal.h"
1532
1533 @@ -4424,6 +4425,12 @@ void show_workqueue_state(void)
1534 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
1535 show_pwq(pwq);
1536 spin_unlock_irqrestore(&pwq->pool->lock, flags);
1537 + /*
1538 + * We could be printing a lot from atomic context, e.g.
1539 + * sysrq-t -> show_workqueue_state(). Avoid triggering
1540 + * hard lockup.
1541 + */
1542 + touch_nmi_watchdog();
1543 }
1544 }
1545
1546 @@ -4451,6 +4458,12 @@ void show_workqueue_state(void)
1547 pr_cont("\n");
1548 next_pool:
1549 spin_unlock_irqrestore(&pool->lock, flags);
1550 + /*
1551 + * We could be printing a lot from atomic context, e.g.
1552 + * sysrq-t -> show_workqueue_state(). Avoid triggering
1553 + * hard lockup.
1554 + */
1555 + touch_nmi_watchdog();
1556 }
1557
1558 rcu_read_unlock_sched();
1559 diff --git a/net/key/af_key.c b/net/key/af_key.c
1560 index 94bf810ad242..6482b001f19a 100644
1561 --- a/net/key/af_key.c
1562 +++ b/net/key/af_key.c
1563 @@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
1564 #endif
1565 int len;
1566
1567 + if (sp->sadb_address_len <
1568 + DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
1569 + sizeof(uint64_t)))
1570 + return -EINVAL;
1571 +
1572 switch (addr->sa_family) {
1573 case AF_INET:
1574 len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
1575 @@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
1576 uint16_t ext_type;
1577 int ext_len;
1578
1579 + if (len < sizeof(*ehdr))
1580 + return -EINVAL;
1581 +
1582 ext_len = ehdr->sadb_ext_len;
1583 ext_len *= sizeof(uint64_t);
1584 ext_type = ehdr->sadb_ext_type;
1585 diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
1586 index 1bf949c43b76..f6ab3ccf698f 100644
1587 --- a/scripts/gdb/linux/tasks.py
1588 +++ b/scripts/gdb/linux/tasks.py
1589 @@ -96,6 +96,8 @@ def get_thread_info(task):
1590 thread_info_addr = task.address + ia64_task_size
1591 thread_info = thread_info_addr.cast(thread_info_ptr_type)
1592 else:
1593 + if task.type.fields()[0].type == thread_info_type.get_type():
1594 + return task['thread_info']
1595 thread_info = task['stack'].cast(thread_info_ptr_type)
1596 return thread_info.dereference()
1597
1598 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
1599 index e685e779a4b8..9306604f5070 100644
1600 --- a/sound/core/pcm_lib.c
1601 +++ b/sound/core/pcm_lib.c
1602 @@ -578,7 +578,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
1603 {
1604 u_int64_t n = (u_int64_t) a * b;
1605 if (c == 0) {
1606 - snd_BUG_ON(!n);
1607 *r = 0;
1608 return UINT_MAX;
1609 }
1610 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
1611 index 45ef5915462c..16580a82e1c8 100644
1612 --- a/sound/core/seq/seq_clientmgr.c
1613 +++ b/sound/core/seq/seq_clientmgr.c
1614 @@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
1615 rwlock_init(&client->ports_lock);
1616 mutex_init(&client->ports_mutex);
1617 INIT_LIST_HEAD(&client->ports_list_head);
1618 + mutex_init(&client->ioctl_mutex);
1619
1620 /* find free slot in the client table */
1621 spin_lock_irqsave(&clients_lock, flags);
1622 @@ -2127,7 +2128,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
1623 return -EFAULT;
1624 }
1625
1626 + mutex_lock(&client->ioctl_mutex);
1627 err = handler->func(client, &buf);
1628 + mutex_unlock(&client->ioctl_mutex);
1629 if (err >= 0) {
1630 /* Some commands includes a bug in 'dir' field. */
1631 if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
1632 diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
1633 index c6614254ef8a..0611e1e0ed5b 100644
1634 --- a/sound/core/seq/seq_clientmgr.h
1635 +++ b/sound/core/seq/seq_clientmgr.h
1636 @@ -61,6 +61,7 @@ struct snd_seq_client {
1637 struct list_head ports_list_head;
1638 rwlock_t ports_lock;
1639 struct mutex ports_mutex;
1640 + struct mutex ioctl_mutex;
1641 int convert32; /* convert 32->64bit */
1642
1643 /* output pool */
1644 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
1645 index 80bbadc83721..d6e079f4ec09 100644
1646 --- a/sound/pci/hda/patch_cirrus.c
1647 +++ b/sound/pci/hda/patch_cirrus.c
1648 @@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
1649 /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
1650
1651 /* codec SSID */
1652 + SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
1653 SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
1654 SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
1655 SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
1656 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1657 index 4ef3b0067876..71a058fcf884 100644
1658 --- a/sound/pci/hda/patch_realtek.c
1659 +++ b/sound/pci/hda/patch_realtek.c
1660 @@ -5617,6 +5617,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1661 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1662 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
1663 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
1664 + SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1665 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1666 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1667 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
1668 diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
1669 index d897702ce742..faacf0c89976 100644
1670 --- a/tools/objtool/elf.c
1671 +++ b/tools/objtool/elf.c
1672 @@ -26,6 +26,7 @@
1673 #include <stdlib.h>
1674 #include <string.h>
1675 #include <unistd.h>
1676 +#include <errno.h>
1677
1678 #include "elf.h"
1679 #include "warn.h"
1680 @@ -370,7 +371,8 @@ struct elf *elf_open(const char *name)
1681
1682 elf->fd = open(name, O_RDONLY);
1683 if (elf->fd == -1) {
1684 - perror("open");
1685 + fprintf(stderr, "objtool: Can't open '%s': %s\n",
1686 + name, strerror(errno));
1687 goto err;
1688 }
1689
1690 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
1691 index cffdd9cf3ebf..ff375310efe4 100644
1692 --- a/tools/perf/Makefile.config
1693 +++ b/tools/perf/Makefile.config
1694 @@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
1695
1696 include $(srctree)/tools/scripts/Makefile.arch
1697
1698 -$(call detected_var,ARCH)
1699 +$(call detected_var,SRCARCH)
1700
1701 NO_PERF_REGS := 1
1702
1703 # Additional ARCH settings for ppc
1704 -ifeq ($(ARCH),powerpc)
1705 +ifeq ($(SRCARCH),powerpc)
1706 NO_PERF_REGS := 0
1707 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
1708 endif
1709
1710 # Additional ARCH settings for x86
1711 -ifeq ($(ARCH),x86)
1712 +ifeq ($(SRCARCH),x86)
1713 $(call detected,CONFIG_X86)
1714 ifeq (${IS_64_BIT}, 1)
1715 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
1716 @@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
1717 NO_PERF_REGS := 0
1718 endif
1719
1720 -ifeq ($(ARCH),arm)
1721 +ifeq ($(SRCARCH),arm)
1722 NO_PERF_REGS := 0
1723 LIBUNWIND_LIBS = -lunwind -lunwind-arm
1724 endif
1725
1726 -ifeq ($(ARCH),arm64)
1727 +ifeq ($(SRCARCH),arm64)
1728 NO_PERF_REGS := 0
1729 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
1730 endif
1731 @@ -61,7 +61,7 @@ endif
1732 # Disable it on all other architectures in case libdw unwind
1733 # support is detected in system. Add supported architectures
1734 # to the check.
1735 -ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
1736 +ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
1737 NO_LIBDW_DWARF_UNWIND := 1
1738 endif
1739
1740 @@ -115,9 +115,9 @@ endif
1741 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
1742 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
1743
1744 -FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
1745 +FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
1746 # include ARCH specific config
1747 --include $(src-perf)/arch/$(ARCH)/Makefile
1748 +-include $(src-perf)/arch/$(SRCARCH)/Makefile
1749
1750 ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
1751 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
1752 @@ -205,12 +205,12 @@ ifeq ($(DEBUG),0)
1753 endif
1754
1755 CFLAGS += -I$(src-perf)/util/include
1756 -CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
1757 +CFLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
1758 CFLAGS += -I$(srctree)/tools/include/uapi
1759 CFLAGS += -I$(srctree)/tools/include/
1760 -CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
1761 -CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
1762 -CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/
1763 +CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
1764 +CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
1765 +CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
1766
1767 # $(obj-perf) for generated common-cmds.h
1768 # $(obj-perf)/util for generated bison/flex headers
1769 @@ -321,7 +321,7 @@ ifndef NO_LIBELF
1770
1771 ifndef NO_DWARF
1772 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
1773 - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
1774 + msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
1775 NO_DWARF := 1
1776 else
1777 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
1778 @@ -346,7 +346,7 @@ ifndef NO_LIBELF
1779 CFLAGS += -DHAVE_BPF_PROLOGUE
1780 $(call detected,CONFIG_BPF_PROLOGUE)
1781 else
1782 - msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
1783 + msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
1784 endif
1785 else
1786 msg := $(warning DWARF support is off, BPF prologue is disabled);
1787 @@ -372,7 +372,7 @@ ifdef PERF_HAVE_JITDUMP
1788 endif
1789 endif
1790
1791 -ifeq ($(ARCH),powerpc)
1792 +ifeq ($(SRCARCH),powerpc)
1793 ifndef NO_DWARF
1794 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
1795 endif
1796 @@ -453,7 +453,7 @@ else
1797 endif
1798
1799 ifndef NO_LOCAL_LIBUNWIND
1800 - ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
1801 + ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
1802 $(call feature_check,libunwind-debug-frame)
1803 ifneq ($(feature-libunwind-debug-frame), 1)
1804 msg := $(warning No debug_frame support found in libunwind);
1805 @@ -717,7 +717,7 @@ ifeq (${IS_64_BIT}, 1)
1806 NO_PERF_READ_VDSO32 := 1
1807 endif
1808 endif
1809 - ifneq ($(ARCH), x86)
1810 + ifneq ($(SRCARCH), x86)
1811 NO_PERF_READ_VDSOX32 := 1
1812 endif
1813 ifndef NO_PERF_READ_VDSOX32
1814 @@ -746,7 +746,7 @@ ifdef LIBBABELTRACE
1815 endif
1816
1817 ifndef NO_AUXTRACE
1818 - ifeq ($(ARCH),x86)
1819 + ifeq ($(SRCARCH),x86)
1820 ifeq ($(feature-get_cpuid), 0)
1821 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
1822 NO_AUXTRACE := 1
1823 @@ -793,7 +793,7 @@ sysconfdir = $(prefix)/etc
1824 ETC_PERFCONFIG = etc/perfconfig
1825 endif
1826 ifndef lib
1827 -ifeq ($(ARCH)$(IS_64_BIT), x861)
1828 +ifeq ($(SRCARCH)$(IS_64_BIT), x861)
1829 lib = lib64
1830 else
1831 lib = lib
1832 diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
1833 index ef52d1e3d431..2b92ffef554b 100644
1834 --- a/tools/perf/Makefile.perf
1835 +++ b/tools/perf/Makefile.perf
1836 @@ -192,7 +192,7 @@ endif
1837
1838 ifeq ($(config),0)
1839 include $(srctree)/tools/scripts/Makefile.arch
1840 --include arch/$(ARCH)/Makefile
1841 +-include arch/$(SRCARCH)/Makefile
1842 endif
1843
1844 # The FEATURE_DUMP_EXPORT holds location of the actual
1845 diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
1846 index 109eb75cf7de..d9b6af837c7d 100644
1847 --- a/tools/perf/arch/Build
1848 +++ b/tools/perf/arch/Build
1849 @@ -1,2 +1,2 @@
1850 libperf-y += common.o
1851 -libperf-y += $(ARCH)/
1852 +libperf-y += $(SRCARCH)/
1853 diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
1854 index 9213a1273697..999a4e878162 100644
1855 --- a/tools/perf/pmu-events/Build
1856 +++ b/tools/perf/pmu-events/Build
1857 @@ -2,7 +2,7 @@ hostprogs := jevents
1858
1859 jevents-y += json.o jsmn.o jevents.o
1860 pmu-events-y += pmu-events.o
1861 -JDIR = pmu-events/arch/$(ARCH)
1862 +JDIR = pmu-events/arch/$(SRCARCH)
1863 JSON = $(shell [ -d $(JDIR) ] && \
1864 find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
1865 #
1866 @@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \
1867 # directory and create tables in pmu-events.c.
1868 #
1869 $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
1870 - $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
1871 + $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
1872 diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
1873 index 8a4ce492f7b2..546250a273e7 100644
1874 --- a/tools/perf/tests/Build
1875 +++ b/tools/perf/tests/Build
1876 @@ -71,7 +71,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
1877 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
1878 $(Q)echo ';' >> $@
1879
1880 -ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc))
1881 +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
1882 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
1883 endif
1884
1885 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
1886 index 5337f49db361..28bdb48357f0 100644
1887 --- a/tools/perf/util/header.c
1888 +++ b/tools/perf/util/header.c
1889 @@ -826,7 +826,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
1890
1891 /*
1892 * default get_cpuid(): nothing gets recorded
1893 - * actual implementation must be in arch/$(ARCH)/util/header.c
1894 + * actual implementation must be in arch/$(SRCARCH)/util/header.c
1895 */
1896 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
1897 {