Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0196-4.9.97-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 113649 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 466c039c622b..5f9e51436a99 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -2640,6 +2640,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6
7 noalign [KNL,ARM]
8
9 + noaltinstr [S390] Disables alternative instructions patching
10 + (CPU alternatives feature).
11 +
12 noapic [SMP,APIC] Tells the kernel to not make use of any
13 IOAPICs that may be present in the system.
14
15 diff --git a/Makefile b/Makefile
16 index 50ae573e8951..ee3e943c3bd9 100644
17 --- a/Makefile
18 +++ b/Makefile
19 @@ -1,6 +1,6 @@
20 VERSION = 4
21 PATCHLEVEL = 9
22 -SUBLEVEL = 96
23 +SUBLEVEL = 97
24 EXTRAVERSION =
25 NAME = Roaring Lionus
26
27 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
28 index 2d2fd79ced9d..34fbbf8fdeaa 100644
29 --- a/arch/mips/Kconfig
30 +++ b/arch/mips/Kconfig
31 @@ -95,6 +95,7 @@ config MIPS_GENERIC
32 select PCI_DRIVERS_GENERIC
33 select PINCTRL
34 select SMP_UP if SMP
35 + select SWAP_IO_SPACE
36 select SYS_HAS_CPU_MIPS32_R1
37 select SYS_HAS_CPU_MIPS32_R2
38 select SYS_HAS_CPU_MIPS32_R6
39 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
40 index 9aa0d04c9dcc..1c4a595e8224 100644
41 --- a/arch/s390/Kconfig
42 +++ b/arch/s390/Kconfig
43 @@ -118,6 +118,7 @@ config S390
44 select GENERIC_CLOCKEVENTS
45 select GENERIC_CPU_AUTOPROBE
46 select GENERIC_CPU_DEVICES if !SMP
47 + select GENERIC_CPU_VULNERABILITIES
48 select GENERIC_FIND_FIRST_BIT
49 select GENERIC_SMP_IDLE_THREAD
50 select GENERIC_TIME_VSYSCALL
51 @@ -704,6 +705,51 @@ config SECCOMP
52
53 If unsure, say Y.
54
55 +config KERNEL_NOBP
56 + def_bool n
57 + prompt "Enable modified branch prediction for the kernel by default"
58 + help
59 + If this option is selected the kernel will switch to a modified
60 + branch prediction mode if the firmware interface is available.
61 + The modified branch prediction mode improves the behaviour in
62 + regard to speculative execution.
63 +
64 + With the option enabled the kernel parameter "nobp=0" or "nospec"
65 + can be used to run the kernel in the normal branch prediction mode.
66 +
67 + With the option disabled the modified branch prediction mode is
68 + enabled with the "nobp=1" kernel parameter.
69 +
70 + If unsure, say N.
71 +
72 +config EXPOLINE
73 + def_bool n
74 + prompt "Avoid speculative indirect branches in the kernel"
75 + help
76 + Compile the kernel with the expoline compiler options to guard
77 + against kernel-to-user data leaks by avoiding speculative indirect
78 + branches.
79 + Requires a compiler with -mindirect-branch=thunk support for full
80 + protection. The kernel may run slower.
81 +
82 + If unsure, say N.
83 +
84 +choice
85 + prompt "Expoline default"
86 + depends on EXPOLINE
87 + default EXPOLINE_FULL
88 +
89 +config EXPOLINE_OFF
90 + bool "spectre_v2=off"
91 +
92 +config EXPOLINE_AUTO
93 + bool "spectre_v2=auto"
94 +
95 +config EXPOLINE_FULL
96 + bool "spectre_v2=on"
97 +
98 +endchoice
99 +
100 endmenu
101
102 menu "Power Management"
103 @@ -753,6 +799,7 @@ config PFAULT
104 config SHARED_KERNEL
105 bool "VM shared kernel support"
106 depends on !JUMP_LABEL
107 + depends on !ALTERNATIVES
108 help
109 Select this option, if you want to share the text segment of the
110 Linux kernel between different VM guests. This reduces memory
111 diff --git a/arch/s390/Makefile b/arch/s390/Makefile
112 index 54e00526b8df..bef67c0f63e2 100644
113 --- a/arch/s390/Makefile
114 +++ b/arch/s390/Makefile
115 @@ -79,6 +79,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
116 cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
117 endif
118
119 +ifdef CONFIG_EXPOLINE
120 + ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
121 + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
122 + CC_FLAGS_EXPOLINE += -mfunction-return=thunk
123 + CC_FLAGS_EXPOLINE += -mindirect-branch-table
124 + export CC_FLAGS_EXPOLINE
125 + cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
126 + endif
127 +endif
128 +
129 ifdef CONFIG_FUNCTION_TRACER
130 # make use of hotpatch feature if the compiler supports it
131 cc_hotpatch := -mhotpatch=0,3
132 diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
133 new file mode 100644
134 index 000000000000..a72002056b54
135 --- /dev/null
136 +++ b/arch/s390/include/asm/alternative.h
137 @@ -0,0 +1,149 @@
138 +#ifndef _ASM_S390_ALTERNATIVE_H
139 +#define _ASM_S390_ALTERNATIVE_H
140 +
141 +#ifndef __ASSEMBLY__
142 +
143 +#include <linux/types.h>
144 +#include <linux/stddef.h>
145 +#include <linux/stringify.h>
146 +
147 +struct alt_instr {
148 + s32 instr_offset; /* original instruction */
149 + s32 repl_offset; /* offset to replacement instruction */
150 + u16 facility; /* facility bit set for replacement */
151 + u8 instrlen; /* length of original instruction */
152 + u8 replacementlen; /* length of new instruction */
153 +} __packed;
154 +
155 +void apply_alternative_instructions(void);
156 +void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
157 +
158 +/*
159 + * |661: |662: |6620 |663:
160 + * +-----------+---------------------+
161 + * | oldinstr | oldinstr_padding |
162 + * | +----------+----------+
163 + * | | | |
164 + * | | >6 bytes |6/4/2 nops|
165 + * | |6 bytes jg----------->
166 + * +-----------+---------------------+
167 + * ^^ static padding ^^
168 + *
169 + * .altinstr_replacement section
170 + * +---------------------+-----------+
171 + * |6641: |6651:
172 + * | alternative instr 1 |
173 + * +-----------+---------+- - - - - -+
174 + * |6642: |6652: |
175 + * | alternative instr 2 | padding
176 + * +---------------------+- - - - - -+
177 + * ^ runtime ^
178 + *
179 + * .altinstructions section
180 + * +---------------------------------+
181 + * | alt_instr entries for each |
182 + * | alternative instr |
183 + * +---------------------------------+
184 + */
185 +
186 +#define b_altinstr(num) "664"#num
187 +#define e_altinstr(num) "665"#num
188 +
189 +#define e_oldinstr_pad_end "663"
190 +#define oldinstr_len "662b-661b"
191 +#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
192 +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
193 +#define oldinstr_pad_len(num) \
194 + "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
195 + "((" altinstr_len(num) ")-(" oldinstr_len "))"
196 +
197 +#define INSTR_LEN_SANITY_CHECK(len) \
198 + ".if " len " > 254\n" \
199 + "\t.error \"cpu alternatives does not support instructions " \
200 + "blocks > 254 bytes\"\n" \
201 + ".endif\n" \
202 + ".if (" len ") %% 2\n" \
203 + "\t.error \"cpu alternatives instructions length is odd\"\n" \
204 + ".endif\n"
205 +
206 +#define OLDINSTR_PADDING(oldinstr, num) \
207 + ".if " oldinstr_pad_len(num) " > 6\n" \
208 + "\tjg " e_oldinstr_pad_end "f\n" \
209 + "6620:\n" \
210 + "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
211 + ".else\n" \
212 + "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
213 + "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
214 + "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
215 + ".endif\n"
216 +
217 +#define OLDINSTR(oldinstr, num) \
218 + "661:\n\t" oldinstr "\n662:\n" \
219 + OLDINSTR_PADDING(oldinstr, num) \
220 + e_oldinstr_pad_end ":\n" \
221 + INSTR_LEN_SANITY_CHECK(oldinstr_len)
222 +
223 +#define OLDINSTR_2(oldinstr, num1, num2) \
224 + "661:\n\t" oldinstr "\n662:\n" \
225 + ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
226 + OLDINSTR_PADDING(oldinstr, num2) \
227 + ".else\n" \
228 + OLDINSTR_PADDING(oldinstr, num1) \
229 + ".endif\n" \
230 + e_oldinstr_pad_end ":\n" \
231 + INSTR_LEN_SANITY_CHECK(oldinstr_len)
232 +
233 +#define ALTINSTR_ENTRY(facility, num) \
234 + "\t.long 661b - .\n" /* old instruction */ \
235 + "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
236 + "\t.word " __stringify(facility) "\n" /* facility bit */ \
237 + "\t.byte " oldinstr_total_len "\n" /* source len */ \
238 + "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
239 +
240 +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
241 + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
242 + INSTR_LEN_SANITY_CHECK(altinstr_len(num))
243 +
244 +/* alternative assembly primitive: */
245 +#define ALTERNATIVE(oldinstr, altinstr, facility) \
246 + ".pushsection .altinstr_replacement, \"ax\"\n" \
247 + ALTINSTR_REPLACEMENT(altinstr, 1) \
248 + ".popsection\n" \
249 + OLDINSTR(oldinstr, 1) \
250 + ".pushsection .altinstructions,\"a\"\n" \
251 + ALTINSTR_ENTRY(facility, 1) \
252 + ".popsection\n"
253 +
254 +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
255 + ".pushsection .altinstr_replacement, \"ax\"\n" \
256 + ALTINSTR_REPLACEMENT(altinstr1, 1) \
257 + ALTINSTR_REPLACEMENT(altinstr2, 2) \
258 + ".popsection\n" \
259 + OLDINSTR_2(oldinstr, 1, 2) \
260 + ".pushsection .altinstructions,\"a\"\n" \
261 + ALTINSTR_ENTRY(facility1, 1) \
262 + ALTINSTR_ENTRY(facility2, 2) \
263 + ".popsection\n"
264 +
265 +/*
266 + * Alternative instructions for different CPU types or capabilities.
267 + *
268 + * This allows to use optimized instructions even on generic binary
269 + * kernels.
270 + *
271 + * oldinstr is padded with jump and nops at compile time if altinstr is
272 + * longer. altinstr is padded with jump and nops at run-time during patching.
273 + *
274 + * For non barrier like inlines please define new variants
275 + * without volatile and memory clobber.
276 + */
277 +#define alternative(oldinstr, altinstr, facility) \
278 + asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
279 +
280 +#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
281 + asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
282 + altinstr2, facility2) ::: "memory")
283 +
284 +#endif /* __ASSEMBLY__ */
285 +
286 +#endif /* _ASM_S390_ALTERNATIVE_H */
287 diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
288 index 5c8db3ce61c8..03b2e5bf1206 100644
289 --- a/arch/s390/include/asm/barrier.h
290 +++ b/arch/s390/include/asm/barrier.h
291 @@ -48,6 +48,30 @@ do { \
292 #define __smp_mb__before_atomic() barrier()
293 #define __smp_mb__after_atomic() barrier()
294
295 +/**
296 + * array_index_mask_nospec - generate a mask for array_idx() that is
297 + * ~0UL when the bounds check succeeds and 0 otherwise
298 + * @index: array element index
299 + * @size: number of elements in array
300 + */
301 +#define array_index_mask_nospec array_index_mask_nospec
302 +static inline unsigned long array_index_mask_nospec(unsigned long index,
303 + unsigned long size)
304 +{
305 + unsigned long mask;
306 +
307 + if (__builtin_constant_p(size) && size > 0) {
308 + asm(" clgr %2,%1\n"
309 + " slbgr %0,%0\n"
310 + :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
311 + return mask;
312 + }
313 + asm(" clgr %1,%2\n"
314 + " slbgr %0,%0\n"
315 + :"=d" (mask) : "d" (size), "d" (index) :"cc");
316 + return ~mask;
317 +}
318 +
319 #include <asm-generic/barrier.h>
320
321 #endif /* __ASM_BARRIER_H */
322 diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
323 index 09b406db7529..7a8a1457dbb8 100644
324 --- a/arch/s390/include/asm/facility.h
325 +++ b/arch/s390/include/asm/facility.h
326 @@ -17,6 +17,24 @@
327
328 #define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
329
330 +static inline void __set_facility(unsigned long nr, void *facilities)
331 +{
332 + unsigned char *ptr = (unsigned char *) facilities;
333 +
334 + if (nr >= MAX_FACILITY_BIT)
335 + return;
336 + ptr[nr >> 3] |= 0x80 >> (nr & 7);
337 +}
338 +
339 +static inline void __clear_facility(unsigned long nr, void *facilities)
340 +{
341 + unsigned char *ptr = (unsigned char *) facilities;
342 +
343 + if (nr >= MAX_FACILITY_BIT)
344 + return;
345 + ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
346 +}
347 +
348 static inline int __test_facility(unsigned long nr, void *facilities)
349 {
350 unsigned char *ptr;
351 diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
352 index a41faf34b034..5792590d0e7c 100644
353 --- a/arch/s390/include/asm/kvm_host.h
354 +++ b/arch/s390/include/asm/kvm_host.h
355 @@ -181,7 +181,8 @@ struct kvm_s390_sie_block {
356 __u16 ipa; /* 0x0056 */
357 __u32 ipb; /* 0x0058 */
358 __u32 scaoh; /* 0x005c */
359 - __u8 reserved60; /* 0x0060 */
360 +#define FPF_BPBC 0x20
361 + __u8 fpf; /* 0x0060 */
362 __u8 ecb; /* 0x0061 */
363 __u8 ecb2; /* 0x0062 */
364 #define ECB3_AES 0x04
365 diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
366 index 7b93b78f423c..ad4e0cee1557 100644
367 --- a/arch/s390/include/asm/lowcore.h
368 +++ b/arch/s390/include/asm/lowcore.h
369 @@ -135,7 +135,9 @@ struct lowcore {
370 /* Per cpu primary space access list */
371 __u32 paste[16]; /* 0x0400 */
372
373 - __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
374 + /* br %r1 trampoline */
375 + __u16 br_r1_trampoline; /* 0x0440 */
376 + __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
377
378 /*
379 * 0xe00 contains the address of the IPL Parameter Information
380 @@ -150,7 +152,8 @@ struct lowcore {
381 __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
382
383 /* Extended facility list */
384 - __u64 stfle_fac_list[32]; /* 0x0f00 */
385 + __u64 stfle_fac_list[16]; /* 0x0f00 */
386 + __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
387 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
388
389 /* Pointer to vector register save area */
390 diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
391 new file mode 100644
392 index 000000000000..b4bd8c41e9d3
393 --- /dev/null
394 +++ b/arch/s390/include/asm/nospec-branch.h
395 @@ -0,0 +1,17 @@
396 +/* SPDX-License-Identifier: GPL-2.0 */
397 +#ifndef _ASM_S390_EXPOLINE_H
398 +#define _ASM_S390_EXPOLINE_H
399 +
400 +#ifndef __ASSEMBLY__
401 +
402 +#include <linux/types.h>
403 +
404 +extern int nospec_disable;
405 +
406 +void nospec_init_branches(void);
407 +void nospec_auto_detect(void);
408 +void nospec_revert(s32 *start, s32 *end);
409 +
410 +#endif /* __ASSEMBLY__ */
411 +
412 +#endif /* _ASM_S390_EXPOLINE_H */
413 diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
414 index 6bcbbece082b..d5842126ec70 100644
415 --- a/arch/s390/include/asm/processor.h
416 +++ b/arch/s390/include/asm/processor.h
417 @@ -84,6 +84,7 @@ void cpu_detect_mhz_feature(void);
418 extern const struct seq_operations cpuinfo_op;
419 extern int sysctl_ieee_emulation_warnings;
420 extern void execve_tail(void);
421 +extern void __bpon(void);
422
423 /*
424 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
425 @@ -359,6 +360,9 @@ extern void memcpy_absolute(void *, void *, size_t);
426 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
427 }
428
429 +extern int s390_isolate_bp(void);
430 +extern int s390_isolate_bp_guest(void);
431 +
432 #endif /* __ASSEMBLY__ */
433
434 #endif /* __ASM_S390_PROCESSOR_H */
435 diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
436 index f15c0398c363..84f2ae44b4e9 100644
437 --- a/arch/s390/include/asm/thread_info.h
438 +++ b/arch/s390/include/asm/thread_info.h
439 @@ -79,6 +79,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
440 #define TIF_SECCOMP 5 /* secure computing */
441 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
442 #define TIF_UPROBE 7 /* breakpointed or single-stepping */
443 +#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
444 +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
445 #define TIF_31BIT 16 /* 32bit process */
446 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
447 #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
448 @@ -94,6 +96,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
449 #define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
450 #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
451 #define _TIF_UPROBE _BITUL(TIF_UPROBE)
452 +#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
453 +#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
454 #define _TIF_31BIT _BITUL(TIF_31BIT)
455 #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
456
457 diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
458 index a2ffec4139ad..81c02e198527 100644
459 --- a/arch/s390/include/uapi/asm/kvm.h
460 +++ b/arch/s390/include/uapi/asm/kvm.h
461 @@ -197,6 +197,7 @@ struct kvm_guest_debug_arch {
462 #define KVM_SYNC_VRS (1UL << 6)
463 #define KVM_SYNC_RICCB (1UL << 7)
464 #define KVM_SYNC_FPRS (1UL << 8)
465 +#define KVM_SYNC_BPBC (1UL << 10)
466 /* definition of registers in kvm_run */
467 struct kvm_sync_regs {
468 __u64 prefix; /* prefix register */
469 @@ -217,7 +218,9 @@ struct kvm_sync_regs {
470 };
471 __u8 reserved[512]; /* for future vector expansion */
472 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
473 - __u8 padding[52]; /* riccb needs to be 64byte aligned */
474 + __u8 bpbc : 1; /* bp mode */
475 + __u8 reserved2 : 7;
476 + __u8 padding1[51]; /* riccb needs to be 64byte aligned */
477 __u8 riccb[64]; /* runtime instrumentation controls block */
478 };
479
480 diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
481 index 1f0fe98f6db9..0501cac2ab95 100644
482 --- a/arch/s390/kernel/Makefile
483 +++ b/arch/s390/kernel/Makefile
484 @@ -42,6 +42,7 @@ ifneq ($(CC_FLAGS_MARCH),-march=z900)
485 CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
486 CFLAGS_sclp.o += -march=z900
487 CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
488 +CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
489 CFLAGS_als.o += -march=z900
490 AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
491 AFLAGS_head.o += -march=z900
492 @@ -57,10 +58,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
493 obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
494 obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
495 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
496 -obj-y += entry.o reipl.o relocate_kernel.o
497 +obj-y += entry.o reipl.o relocate_kernel.o alternative.o
498 +obj-y += nospec-branch.o
499
500 extra-y += head.o head64.o vmlinux.lds
501
502 +CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
503 +
504 obj-$(CONFIG_MODULES) += module.o
505 obj-$(CONFIG_SMP) += smp.o
506 obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
507 diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
508 new file mode 100644
509 index 000000000000..b57b293998dc
510 --- /dev/null
511 +++ b/arch/s390/kernel/alternative.c
512 @@ -0,0 +1,112 @@
513 +#include <linux/module.h>
514 +#include <asm/alternative.h>
515 +#include <asm/facility.h>
516 +#include <asm/nospec-branch.h>
517 +
518 +#define MAX_PATCH_LEN (255 - 1)
519 +
520 +static int __initdata_or_module alt_instr_disabled;
521 +
522 +static int __init disable_alternative_instructions(char *str)
523 +{
524 + alt_instr_disabled = 1;
525 + return 0;
526 +}
527 +
528 +early_param("noaltinstr", disable_alternative_instructions);
529 +
530 +struct brcl_insn {
531 + u16 opc;
532 + s32 disp;
533 +} __packed;
534 +
535 +static u16 __initdata_or_module nop16 = 0x0700;
536 +static u32 __initdata_or_module nop32 = 0x47000000;
537 +static struct brcl_insn __initdata_or_module nop48 = {
538 + 0xc004, 0
539 +};
540 +
541 +static const void *nops[] __initdata_or_module = {
542 + &nop16,
543 + &nop32,
544 + &nop48
545 +};
546 +
547 +static void __init_or_module add_jump_padding(void *insns, unsigned int len)
548 +{
549 + struct brcl_insn brcl = {
550 + 0xc0f4,
551 + len / 2
552 + };
553 +
554 + memcpy(insns, &brcl, sizeof(brcl));
555 + insns += sizeof(brcl);
556 + len -= sizeof(brcl);
557 +
558 + while (len > 0) {
559 + memcpy(insns, &nop16, 2);
560 + insns += 2;
561 + len -= 2;
562 + }
563 +}
564 +
565 +static void __init_or_module add_padding(void *insns, unsigned int len)
566 +{
567 + if (len > 6)
568 + add_jump_padding(insns, len);
569 + else if (len >= 2)
570 + memcpy(insns, nops[len / 2 - 1], len);
571 +}
572 +
573 +static void __init_or_module __apply_alternatives(struct alt_instr *start,
574 + struct alt_instr *end)
575 +{
576 + struct alt_instr *a;
577 + u8 *instr, *replacement;
578 + u8 insnbuf[MAX_PATCH_LEN];
579 +
580 + /*
581 + * The scan order should be from start to end. A later scanned
582 + * alternative code can overwrite previously scanned alternative code.
583 + */
584 + for (a = start; a < end; a++) {
585 + int insnbuf_sz = 0;
586 +
587 + instr = (u8 *)&a->instr_offset + a->instr_offset;
588 + replacement = (u8 *)&a->repl_offset + a->repl_offset;
589 +
590 + if (!__test_facility(a->facility,
591 + S390_lowcore.alt_stfle_fac_list))
592 + continue;
593 +
594 + if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
595 + WARN_ONCE(1, "cpu alternatives instructions length is "
596 + "odd, skipping patching\n");
597 + continue;
598 + }
599 +
600 + memcpy(insnbuf, replacement, a->replacementlen);
601 + insnbuf_sz = a->replacementlen;
602 +
603 + if (a->instrlen > a->replacementlen) {
604 + add_padding(insnbuf + a->replacementlen,
605 + a->instrlen - a->replacementlen);
606 + insnbuf_sz += a->instrlen - a->replacementlen;
607 + }
608 +
609 + s390_kernel_write(instr, insnbuf, insnbuf_sz);
610 + }
611 +}
612 +
613 +void __init_or_module apply_alternatives(struct alt_instr *start,
614 + struct alt_instr *end)
615 +{
616 + if (!alt_instr_disabled)
617 + __apply_alternatives(start, end);
618 +}
619 +
620 +extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
621 +void __init apply_alternative_instructions(void)
622 +{
623 + apply_alternatives(__alt_instructions, __alt_instructions_end);
624 +}
625 diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
626 index 62578989c74d..0c7a7d5d95f1 100644
627 --- a/arch/s390/kernel/early.c
628 +++ b/arch/s390/kernel/early.c
629 @@ -299,6 +299,11 @@ static noinline __init void setup_facility_list(void)
630 {
631 stfle(S390_lowcore.stfle_fac_list,
632 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
633 + memcpy(S390_lowcore.alt_stfle_fac_list,
634 + S390_lowcore.stfle_fac_list,
635 + sizeof(S390_lowcore.alt_stfle_fac_list));
636 + if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
637 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
638 }
639
640 static __init void detect_diag9c(void)
641 diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
642 index 3bc2825173ef..1996afeb2e81 100644
643 --- a/arch/s390/kernel/entry.S
644 +++ b/arch/s390/kernel/entry.S
645 @@ -105,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
646 j 3f
647 1: LAST_BREAK %r14
648 UPDATE_VTIME %r14,%r15,\timer
649 + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
650 2: lg %r15,__LC_ASYNC_STACK # load async stack
651 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
652 .endm
653 @@ -163,6 +164,130 @@ _PIF_WORK = (_PIF_PER_TRAP)
654 tm off+\addr, \mask
655 .endm
656
657 + .macro BPOFF
658 + .pushsection .altinstr_replacement, "ax"
659 +660: .long 0xb2e8c000
660 + .popsection
661 +661: .long 0x47000000
662 + .pushsection .altinstructions, "a"
663 + .long 661b - .
664 + .long 660b - .
665 + .word 82
666 + .byte 4
667 + .byte 4
668 + .popsection
669 + .endm
670 +
671 + .macro BPON
672 + .pushsection .altinstr_replacement, "ax"
673 +662: .long 0xb2e8d000
674 + .popsection
675 +663: .long 0x47000000
676 + .pushsection .altinstructions, "a"
677 + .long 663b - .
678 + .long 662b - .
679 + .word 82
680 + .byte 4
681 + .byte 4
682 + .popsection
683 + .endm
684 +
685 + .macro BPENTER tif_ptr,tif_mask
686 + .pushsection .altinstr_replacement, "ax"
687 +662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
688 + .word 0xc004, 0x0000, 0x0000 # 6 byte nop
689 + .popsection
690 +664: TSTMSK \tif_ptr,\tif_mask
691 + jz . + 8
692 + .long 0xb2e8d000
693 + .pushsection .altinstructions, "a"
694 + .long 664b - .
695 + .long 662b - .
696 + .word 82
697 + .byte 12
698 + .byte 12
699 + .popsection
700 + .endm
701 +
702 + .macro BPEXIT tif_ptr,tif_mask
703 + TSTMSK \tif_ptr,\tif_mask
704 + .pushsection .altinstr_replacement, "ax"
705 +662: jnz . + 8
706 + .long 0xb2e8d000
707 + .popsection
708 +664: jz . + 8
709 + .long 0xb2e8c000
710 + .pushsection .altinstructions, "a"
711 + .long 664b - .
712 + .long 662b - .
713 + .word 82
714 + .byte 8
715 + .byte 8
716 + .popsection
717 + .endm
718 +
719 +#ifdef CONFIG_EXPOLINE
720 +
721 + .macro GEN_BR_THUNK name,reg,tmp
722 + .section .text.\name,"axG",@progbits,\name,comdat
723 + .globl \name
724 + .hidden \name
725 + .type \name,@function
726 +\name:
727 + .cfi_startproc
728 +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
729 + exrl 0,0f
730 +#else
731 + larl \tmp,0f
732 + ex 0,0(\tmp)
733 +#endif
734 + j .
735 +0: br \reg
736 + .cfi_endproc
737 + .endm
738 +
739 + GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
740 + GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
741 + GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
742 +
743 + .macro BASR_R14_R9
744 +0: brasl %r14,__s390x_indirect_jump_r1use_r9
745 + .pushsection .s390_indirect_branches,"a",@progbits
746 + .long 0b-.
747 + .popsection
748 + .endm
749 +
750 + .macro BR_R1USE_R14
751 +0: jg __s390x_indirect_jump_r1use_r14
752 + .pushsection .s390_indirect_branches,"a",@progbits
753 + .long 0b-.
754 + .popsection
755 + .endm
756 +
757 + .macro BR_R11USE_R14
758 +0: jg __s390x_indirect_jump_r11use_r14
759 + .pushsection .s390_indirect_branches,"a",@progbits
760 + .long 0b-.
761 + .popsection
762 + .endm
763 +
764 +#else /* CONFIG_EXPOLINE */
765 +
766 + .macro BASR_R14_R9
767 + basr %r14,%r9
768 + .endm
769 +
770 + .macro BR_R1USE_R14
771 + br %r14
772 + .endm
773 +
774 + .macro BR_R11USE_R14
775 + br %r14
776 + .endm
777 +
778 +#endif /* CONFIG_EXPOLINE */
779 +
780 +
781 .section .kprobes.text, "ax"
782 .Ldummy:
783 /*
784 @@ -175,6 +300,11 @@ _PIF_WORK = (_PIF_PER_TRAP)
785 */
786 nop 0
787
788 +ENTRY(__bpon)
789 + .globl __bpon
790 + BPON
791 + BR_R1USE_R14
792 +
793 /*
794 * Scheduler resume function, called by switch_to
795 * gpr2 = (task_struct *) prev
796 @@ -201,9 +331,9 @@ ENTRY(__switch_to)
797 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
798 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
799 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
800 - bzr %r14
801 + jz 0f
802 .insn s,0xb2800000,__LC_LPP # set program parameter
803 - br %r14
804 +0: BR_R1USE_R14
805
806 .L__critical_start:
807
808 @@ -215,9 +345,11 @@ ENTRY(__switch_to)
809 */
810 ENTRY(sie64a)
811 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
812 + lg %r12,__LC_CURRENT
813 stg %r2,__SF_EMPTY(%r15) # save control block pointer
814 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
815 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
816 + mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
817 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
818 jno .Lsie_load_guest_gprs
819 brasl %r14,load_fpu_regs # load guest fp/vx regs
820 @@ -234,7 +366,11 @@ ENTRY(sie64a)
821 jnz .Lsie_skip
822 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
823 jo .Lsie_skip # exit if fp/vx regs changed
824 + BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
825 sie 0(%r14)
826 +.Lsie_exit:
827 + BPOFF
828 + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
829 .Lsie_skip:
830 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
831 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
832 @@ -255,9 +391,15 @@ ENTRY(sie64a)
833 sie_exit:
834 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
835 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
836 + xgr %r0,%r0 # clear guest registers to
837 + xgr %r1,%r1 # prevent speculative use
838 + xgr %r2,%r2
839 + xgr %r3,%r3
840 + xgr %r4,%r4
841 + xgr %r5,%r5
842 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
843 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
844 - br %r14
845 + BR_R1USE_R14
846 .Lsie_fault:
847 lghi %r14,-EFAULT
848 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
849 @@ -280,6 +422,7 @@ ENTRY(system_call)
850 stpt __LC_SYNC_ENTER_TIMER
851 .Lsysc_stmg:
852 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
853 + BPOFF
854 lg %r10,__LC_LAST_BREAK
855 lg %r12,__LC_THREAD_INFO
856 lghi %r14,_PIF_SYSCALL
857 @@ -289,12 +432,15 @@ ENTRY(system_call)
858 LAST_BREAK %r13
859 .Lsysc_vtime:
860 UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
861 + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
862 stmg %r0,%r7,__PT_R0(%r11)
863 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
864 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
865 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
866 stg %r14,__PT_FLAGS(%r11)
867 .Lsysc_do_svc:
868 + # clear user controlled register to prevent speculative use
869 + xgr %r0,%r0
870 lg %r10,__TI_sysc_table(%r12) # address of system call table
871 llgh %r8,__PT_INT_CODE+2(%r11)
872 slag %r8,%r8,2 # shift and test for svc 0
873 @@ -312,7 +458,7 @@ ENTRY(system_call)
874 lgf %r9,0(%r8,%r10) # get system call add.
875 TSTMSK __TI_flags(%r12),_TIF_TRACE
876 jnz .Lsysc_tracesys
877 - basr %r14,%r9 # call sys_xxxx
878 + BASR_R14_R9 # call sys_xxxx
879 stg %r2,__PT_R2(%r11) # store return value
880
881 .Lsysc_return:
882 @@ -324,6 +470,7 @@ ENTRY(system_call)
883 jnz .Lsysc_work # check for work
884 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
885 jnz .Lsysc_work
886 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
887 .Lsysc_restore:
888 lg %r14,__LC_VDSO_PER_CPU
889 lmg %r0,%r10,__PT_R0(%r11)
890 @@ -451,7 +598,7 @@ ENTRY(system_call)
891 lmg %r3,%r7,__PT_R3(%r11)
892 stg %r7,STACK_FRAME_OVERHEAD(%r15)
893 lg %r2,__PT_ORIG_GPR2(%r11)
894 - basr %r14,%r9 # call sys_xxx
895 + BASR_R14_R9 # call sys_xxx
896 stg %r2,__PT_R2(%r11) # store return value
897 .Lsysc_tracenogo:
898 TSTMSK __TI_flags(%r12),_TIF_TRACE
899 @@ -475,7 +622,7 @@ ENTRY(ret_from_fork)
900 lmg %r9,%r10,__PT_R9(%r11) # load gprs
901 ENTRY(kernel_thread_starter)
902 la %r2,0(%r10)
903 - basr %r14,%r9
904 + BASR_R14_R9
905 j .Lsysc_tracenogo
906
907 /*
908 @@ -484,6 +631,7 @@ ENTRY(kernel_thread_starter)
909
910 ENTRY(pgm_check_handler)
911 stpt __LC_SYNC_ENTER_TIMER
912 + BPOFF
913 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
914 lg %r10,__LC_LAST_BREAK
915 lg %r12,__LC_THREAD_INFO
916 @@ -508,6 +656,7 @@ ENTRY(pgm_check_handler)
917 j 3f
918 2: LAST_BREAK %r14
919 UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
920 + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
921 lg %r15,__LC_KERNEL_STACK
922 lg %r14,__TI_task(%r12)
923 aghi %r14,__TASK_thread # pointer to thread_struct
924 @@ -517,6 +666,15 @@ ENTRY(pgm_check_handler)
925 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
926 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
927 stmg %r0,%r7,__PT_R0(%r11)
928 + # clear user controlled registers to prevent speculative use
929 + xgr %r0,%r0
930 + xgr %r1,%r1
931 + xgr %r2,%r2
932 + xgr %r3,%r3
933 + xgr %r4,%r4
934 + xgr %r5,%r5
935 + xgr %r6,%r6
936 + xgr %r7,%r7
937 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
938 stmg %r8,%r9,__PT_PSW(%r11)
939 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
940 @@ -538,9 +696,9 @@ ENTRY(pgm_check_handler)
941 nill %r10,0x007f
942 sll %r10,2
943 je .Lpgm_return
944 - lgf %r1,0(%r10,%r1) # load address of handler routine
945 + lgf %r9,0(%r10,%r1) # load address of handler routine
946 lgr %r2,%r11 # pass pointer to pt_regs
947 - basr %r14,%r1 # branch to interrupt-handler
948 + BASR_R14_R9 # branch to interrupt-handler
949 .Lpgm_return:
950 LOCKDEP_SYS_EXIT
951 tm __PT_PSW+1(%r11),0x01 # returning to user ?
952 @@ -573,6 +731,7 @@ ENTRY(pgm_check_handler)
953 ENTRY(io_int_handler)
954 STCK __LC_INT_CLOCK
955 stpt __LC_ASYNC_ENTER_TIMER
956 + BPOFF
957 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
958 lg %r10,__LC_LAST_BREAK
959 lg %r12,__LC_THREAD_INFO
960 @@ -580,6 +739,16 @@ ENTRY(io_int_handler)
961 lmg %r8,%r9,__LC_IO_OLD_PSW
962 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
963 stmg %r0,%r7,__PT_R0(%r11)
964 + # clear user controlled registers to prevent speculative use
965 + xgr %r0,%r0
966 + xgr %r1,%r1
967 + xgr %r2,%r2
968 + xgr %r3,%r3
969 + xgr %r4,%r4
970 + xgr %r5,%r5
971 + xgr %r6,%r6
972 + xgr %r7,%r7
973 + xgr %r10,%r10
974 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
975 stmg %r8,%r9,__PT_PSW(%r11)
976 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
977 @@ -614,9 +783,13 @@ ENTRY(io_int_handler)
978 lg %r14,__LC_VDSO_PER_CPU
979 lmg %r0,%r10,__PT_R0(%r11)
980 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
981 + tm __PT_PSW+1(%r11),0x01 # returning to user ?
982 + jno .Lio_exit_kernel
983 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
984 .Lio_exit_timer:
985 stpt __LC_EXIT_TIMER
986 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
987 +.Lio_exit_kernel:
988 lmg %r11,%r15,__PT_R11(%r11)
989 lpswe __LC_RETURN_PSW
990 .Lio_done:
991 @@ -748,6 +921,7 @@ ENTRY(io_int_handler)
992 ENTRY(ext_int_handler)
993 STCK __LC_INT_CLOCK
994 stpt __LC_ASYNC_ENTER_TIMER
995 + BPOFF
996 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
997 lg %r10,__LC_LAST_BREAK
998 lg %r12,__LC_THREAD_INFO
999 @@ -755,6 +929,16 @@ ENTRY(ext_int_handler)
1000 lmg %r8,%r9,__LC_EXT_OLD_PSW
1001 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
1002 stmg %r0,%r7,__PT_R0(%r11)
1003 + # clear user controlled registers to prevent speculative use
1004 + xgr %r0,%r0
1005 + xgr %r1,%r1
1006 + xgr %r2,%r2
1007 + xgr %r3,%r3
1008 + xgr %r4,%r4
1009 + xgr %r5,%r5
1010 + xgr %r6,%r6
1011 + xgr %r7,%r7
1012 + xgr %r10,%r10
1013 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1014 stmg %r8,%r9,__PT_PSW(%r11)
1015 lghi %r1,__LC_EXT_PARAMS2
1016 @@ -787,11 +971,12 @@ ENTRY(psw_idle)
1017 .Lpsw_idle_stcctm:
1018 #endif
1019 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
1020 + BPON
1021 STCK __CLOCK_IDLE_ENTER(%r2)
1022 stpt __TIMER_IDLE_ENTER(%r2)
1023 .Lpsw_idle_lpsw:
1024 lpswe __SF_EMPTY(%r15)
1025 - br %r14
1026 + BR_R1USE_R14
1027 .Lpsw_idle_end:
1028
1029 /*
1030 @@ -805,7 +990,7 @@ ENTRY(save_fpu_regs)
1031 lg %r2,__LC_CURRENT
1032 aghi %r2,__TASK_thread
1033 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1034 - bor %r14
1035 + jo .Lsave_fpu_regs_exit
1036 stfpc __THREAD_FPU_fpc(%r2)
1037 .Lsave_fpu_regs_fpc_end:
1038 lg %r3,__THREAD_FPU_regs(%r2)
1039 @@ -835,7 +1020,8 @@ ENTRY(save_fpu_regs)
1040 std 15,120(%r3)
1041 .Lsave_fpu_regs_done:
1042 oi __LC_CPU_FLAGS+7,_CIF_FPU
1043 - br %r14
1044 +.Lsave_fpu_regs_exit:
1045 + BR_R1USE_R14
1046 .Lsave_fpu_regs_end:
1047 #if IS_ENABLED(CONFIG_KVM)
1048 EXPORT_SYMBOL(save_fpu_regs)
1049 @@ -855,7 +1041,7 @@ load_fpu_regs:
1050 lg %r4,__LC_CURRENT
1051 aghi %r4,__TASK_thread
1052 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1053 - bnor %r14
1054 + jno .Lload_fpu_regs_exit
1055 lfpc __THREAD_FPU_fpc(%r4)
1056 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1057 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1058 @@ -884,7 +1070,8 @@ load_fpu_regs:
1059 ld 15,120(%r4)
1060 .Lload_fpu_regs_done:
1061 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1062 - br %r14
1063 +.Lload_fpu_regs_exit:
1064 + BR_R1USE_R14
1065 .Lload_fpu_regs_end:
1066
1067 .L__critical_end:
1068 @@ -894,6 +1081,7 @@ load_fpu_regs:
1069 */
1070 ENTRY(mcck_int_handler)
1071 STCK __LC_MCCK_CLOCK
1072 + BPOFF
1073 la %r1,4095 # revalidate r1
1074 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
1075 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
1076 @@ -925,6 +1113,16 @@ ENTRY(mcck_int_handler)
1077 .Lmcck_skip:
1078 lghi %r14,__LC_GPREGS_SAVE_AREA+64
1079 stmg %r0,%r7,__PT_R0(%r11)
1080 + # clear user controlled registers to prevent speculative use
1081 + xgr %r0,%r0
1082 + xgr %r1,%r1
1083 + xgr %r2,%r2
1084 + xgr %r3,%r3
1085 + xgr %r4,%r4
1086 + xgr %r5,%r5
1087 + xgr %r6,%r6
1088 + xgr %r7,%r7
1089 + xgr %r10,%r10
1090 mvc __PT_R8(64,%r11),0(%r14)
1091 stmg %r8,%r9,__PT_PSW(%r11)
1092 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1093 @@ -950,6 +1148,7 @@ ENTRY(mcck_int_handler)
1094 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1095 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1096 jno 0f
1097 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1098 stpt __LC_EXIT_TIMER
1099 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1100 0: lmg %r11,%r15,__PT_R11(%r11)
1101 @@ -1045,7 +1244,7 @@ cleanup_critical:
1102 jl 0f
1103 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1104 jl .Lcleanup_load_fpu_regs
1105 -0: br %r14
1106 +0: BR_R11USE_R14
1107
1108 .align 8
1109 .Lcleanup_table:
1110 @@ -1070,11 +1269,12 @@ cleanup_critical:
1111 .quad .Lsie_done
1112
1113 .Lcleanup_sie:
1114 + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1115 lg %r9,__SF_EMPTY(%r15) # get control block pointer
1116 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1117 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1118 larl %r9,sie_exit # skip forward to sie_exit
1119 - br %r14
1120 + BR_R11USE_R14
1121 #endif
1122
1123 .Lcleanup_system_call:
1124 @@ -1116,7 +1316,8 @@ cleanup_critical:
1125 srag %r9,%r9,23
1126 jz 0f
1127 mvc __TI_last_break(8,%r12),16(%r11)
1128 -0: # set up saved register r11
1129 +0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1130 + # set up saved register r11
1131 lg %r15,__LC_KERNEL_STACK
1132 la %r9,STACK_FRAME_OVERHEAD(%r15)
1133 stg %r9,24(%r11) # r11 pt_regs pointer
1134 @@ -1131,7 +1332,7 @@ cleanup_critical:
1135 stg %r15,56(%r11) # r15 stack pointer
1136 # set new psw address and exit
1137 larl %r9,.Lsysc_do_svc
1138 - br %r14
1139 + BR_R11USE_R14
1140 .Lcleanup_system_call_insn:
1141 .quad system_call
1142 .quad .Lsysc_stmg
1143 @@ -1141,7 +1342,7 @@ cleanup_critical:
1144
1145 .Lcleanup_sysc_tif:
1146 larl %r9,.Lsysc_tif
1147 - br %r14
1148 + BR_R11USE_R14
1149
1150 .Lcleanup_sysc_restore:
1151 # check if stpt has been executed
1152 @@ -1158,14 +1359,14 @@ cleanup_critical:
1153 mvc 0(64,%r11),__PT_R8(%r9)
1154 lmg %r0,%r7,__PT_R0(%r9)
1155 1: lmg %r8,%r9,__LC_RETURN_PSW
1156 - br %r14
1157 + BR_R11USE_R14
1158 .Lcleanup_sysc_restore_insn:
1159 .quad .Lsysc_exit_timer
1160 .quad .Lsysc_done - 4
1161
1162 .Lcleanup_io_tif:
1163 larl %r9,.Lio_tif
1164 - br %r14
1165 + BR_R11USE_R14
1166
1167 .Lcleanup_io_restore:
1168 # check if stpt has been executed
1169 @@ -1179,7 +1380,7 @@ cleanup_critical:
1170 mvc 0(64,%r11),__PT_R8(%r9)
1171 lmg %r0,%r7,__PT_R0(%r9)
1172 1: lmg %r8,%r9,__LC_RETURN_PSW
1173 - br %r14
1174 + BR_R11USE_R14
1175 .Lcleanup_io_restore_insn:
1176 .quad .Lio_exit_timer
1177 .quad .Lio_done - 4
1178 @@ -1232,17 +1433,17 @@ cleanup_critical:
1179 # prepare return psw
1180 nihh %r8,0xfcfd # clear irq & wait state bits
1181 lg %r9,48(%r11) # return from psw_idle
1182 - br %r14
1183 + BR_R11USE_R14
1184 .Lcleanup_idle_insn:
1185 .quad .Lpsw_idle_lpsw
1186
1187 .Lcleanup_save_fpu_regs:
1188 larl %r9,save_fpu_regs
1189 - br %r14
1190 + BR_R11USE_R14
1191
1192 .Lcleanup_load_fpu_regs:
1193 larl %r9,load_fpu_regs
1194 - br %r14
1195 + BR_R11USE_R14
1196
1197 /*
1198 * Integer constants
1199 @@ -1258,7 +1459,6 @@ cleanup_critical:
1200 .Lsie_critical_length:
1201 .quad .Lsie_done - .Lsie_gmap
1202 #endif
1203 -
1204 .section .rodata, "a"
1205 #define SYSCALL(esame,emu) .long esame
1206 .globl sys_call_table
1207 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1208 index 39127b691b78..df49f2a1a7e5 100644
1209 --- a/arch/s390/kernel/ipl.c
1210 +++ b/arch/s390/kernel/ipl.c
1211 @@ -563,6 +563,7 @@ static struct kset *ipl_kset;
1212
1213 static void __ipl_run(void *unused)
1214 {
1215 + __bpon();
1216 diag308(DIAG308_LOAD_CLEAR, NULL);
1217 if (MACHINE_IS_VM)
1218 __cpcmd("IPL", NULL, 0, NULL);
1219 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
1220 index fbc07891f9e7..64ccfdf96b32 100644
1221 --- a/arch/s390/kernel/module.c
1222 +++ b/arch/s390/kernel/module.c
1223 @@ -31,6 +31,9 @@
1224 #include <linux/kernel.h>
1225 #include <linux/moduleloader.h>
1226 #include <linux/bug.h>
1227 +#include <asm/alternative.h>
1228 +#include <asm/nospec-branch.h>
1229 +#include <asm/facility.h>
1230
1231 #if 0
1232 #define DEBUGP printk
1233 @@ -167,7 +170,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
1234 me->arch.got_offset = me->core_layout.size;
1235 me->core_layout.size += me->arch.got_size;
1236 me->arch.plt_offset = me->core_layout.size;
1237 - me->core_layout.size += me->arch.plt_size;
1238 + if (me->arch.plt_size) {
1239 + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
1240 + me->arch.plt_size += PLT_ENTRY_SIZE;
1241 + me->core_layout.size += me->arch.plt_size;
1242 + }
1243 return 0;
1244 }
1245
1246 @@ -321,9 +328,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
1247 unsigned int *ip;
1248 ip = me->core_layout.base + me->arch.plt_offset +
1249 info->plt_offset;
1250 - ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
1251 - ip[1] = 0x100a0004;
1252 - ip[2] = 0x07f10000;
1253 + ip[0] = 0x0d10e310; /* basr 1,0 */
1254 + ip[1] = 0x100a0004; /* lg 1,10(1) */
1255 + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
1256 + unsigned int *ij;
1257 + ij = me->core_layout.base +
1258 + me->arch.plt_offset +
1259 + me->arch.plt_size - PLT_ENTRY_SIZE;
1260 + ip[2] = 0xa7f40000 + /* j __jump_r1 */
1261 + (unsigned int)(u16)
1262 + (((unsigned long) ij - 8 -
1263 + (unsigned long) ip) / 2);
1264 + } else {
1265 + ip[2] = 0x07f10000; /* br %r1 */
1266 + }
1267 ip[3] = (unsigned int) (val >> 32);
1268 ip[4] = (unsigned int) val;
1269 info->plt_initialized = 1;
1270 @@ -428,6 +446,45 @@ int module_finalize(const Elf_Ehdr *hdr,
1271 const Elf_Shdr *sechdrs,
1272 struct module *me)
1273 {
1274 + const Elf_Shdr *s;
1275 + char *secstrings, *secname;
1276 + void *aseg;
1277 +
1278 + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1279 + !nospec_disable && me->arch.plt_size) {
1280 + unsigned int *ij;
1281 +
1282 + ij = me->core_layout.base + me->arch.plt_offset +
1283 + me->arch.plt_size - PLT_ENTRY_SIZE;
1284 + if (test_facility(35)) {
1285 + ij[0] = 0xc6000000; /* exrl %r0,.+10 */
1286 + ij[1] = 0x0005a7f4; /* j . */
1287 + ij[2] = 0x000007f1; /* br %r1 */
1288 + } else {
1289 + ij[0] = 0x44000000 | (unsigned int)
1290 + offsetof(struct lowcore, br_r1_trampoline);
1291 + ij[1] = 0xa7f40000; /* j . */
1292 + }
1293 + }
1294 +
1295 + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1296 + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
1297 + aseg = (void *) s->sh_addr;
1298 + secname = secstrings + s->sh_name;
1299 +
1300 + if (!strcmp(".altinstructions", secname))
1301 + /* patch .altinstructions */
1302 + apply_alternatives(aseg, aseg + s->sh_size);
1303 +
1304 + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1305 + (!strncmp(".s390_indirect", secname, 14)))
1306 + nospec_revert(aseg, aseg + s->sh_size);
1307 +
1308 + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1309 + (!strncmp(".s390_return", secname, 12)))
1310 + nospec_revert(aseg, aseg + s->sh_size);
1311 + }
1312 +
1313 jump_label_apply_nops(me);
1314 return 0;
1315 }
1316 diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1317 new file mode 100644
1318 index 000000000000..9f3b5b382743
1319 --- /dev/null
1320 +++ b/arch/s390/kernel/nospec-branch.c
1321 @@ -0,0 +1,169 @@
1322 +// SPDX-License-Identifier: GPL-2.0
1323 +#include <linux/module.h>
1324 +#include <linux/device.h>
1325 +#include <asm/facility.h>
1326 +#include <asm/nospec-branch.h>
1327 +
1328 +static int __init nobp_setup_early(char *str)
1329 +{
1330 + bool enabled;
1331 + int rc;
1332 +
1333 + rc = kstrtobool(str, &enabled);
1334 + if (rc)
1335 + return rc;
1336 + if (enabled && test_facility(82)) {
1337 + /*
1338 + * The user explicitely requested nobp=1, enable it and
1339 + * disable the expoline support.
1340 + */
1341 + __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1342 + if (IS_ENABLED(CONFIG_EXPOLINE))
1343 + nospec_disable = 1;
1344 + } else {
1345 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1346 + }
1347 + return 0;
1348 +}
1349 +early_param("nobp", nobp_setup_early);
1350 +
1351 +static int __init nospec_setup_early(char *str)
1352 +{
1353 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1354 + return 0;
1355 +}
1356 +early_param("nospec", nospec_setup_early);
1357 +
1358 +static int __init nospec_report(void)
1359 +{
1360 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1361 + pr_info("Spectre V2 mitigation: execute trampolines.\n");
1362 + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1363 + pr_info("Spectre V2 mitigation: limited branch prediction.\n");
1364 + return 0;
1365 +}
1366 +arch_initcall(nospec_report);
1367 +
1368 +#ifdef CONFIG_SYSFS
1369 +ssize_t cpu_show_spectre_v1(struct device *dev,
1370 + struct device_attribute *attr, char *buf)
1371 +{
1372 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1373 +}
1374 +
1375 +ssize_t cpu_show_spectre_v2(struct device *dev,
1376 + struct device_attribute *attr, char *buf)
1377 +{
1378 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1379 + return sprintf(buf, "Mitigation: execute trampolines\n");
1380 + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1381 + return sprintf(buf, "Mitigation: limited branch prediction.\n");
1382 + return sprintf(buf, "Vulnerable\n");
1383 +}
1384 +#endif
1385 +
1386 +#ifdef CONFIG_EXPOLINE
1387 +
1388 +int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1389 +
1390 +static int __init nospectre_v2_setup_early(char *str)
1391 +{
1392 + nospec_disable = 1;
1393 + return 0;
1394 +}
1395 +early_param("nospectre_v2", nospectre_v2_setup_early);
1396 +
1397 +void __init nospec_auto_detect(void)
1398 +{
1399 + if (IS_ENABLED(CC_USING_EXPOLINE)) {
1400 + /*
1401 + * The kernel has been compiled with expolines.
1402 + * Keep expolines enabled and disable nobp.
1403 + */
1404 + nospec_disable = 0;
1405 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1406 + }
1407 + /*
1408 + * If the kernel has not been compiled with expolines the
1409 + * nobp setting decides what is done, this depends on the
1410 + * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
1411 + */
1412 +}
1413 +
1414 +static int __init spectre_v2_setup_early(char *str)
1415 +{
1416 + if (str && !strncmp(str, "on", 2)) {
1417 + nospec_disable = 0;
1418 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1419 + }
1420 + if (str && !strncmp(str, "off", 3))
1421 + nospec_disable = 1;
1422 + if (str && !strncmp(str, "auto", 4))
1423 + nospec_auto_detect();
1424 + return 0;
1425 +}
1426 +early_param("spectre_v2", spectre_v2_setup_early);
1427 +
1428 +static void __init_or_module __nospec_revert(s32 *start, s32 *end)
1429 +{
1430 + enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
1431 + u8 *instr, *thunk, *br;
1432 + u8 insnbuf[6];
1433 + s32 *epo;
1434 +
1435 + /* Second part of the instruction replace is always a nop */
1436 + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
1437 + for (epo = start; epo < end; epo++) {
1438 + instr = (u8 *) epo + *epo;
1439 + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
1440 + type = BRCL_EXPOLINE; /* brcl instruction */
1441 + else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
1442 + type = BRASL_EXPOLINE; /* brasl instruction */
1443 + else
1444 + continue;
1445 + thunk = instr + (*(int *)(instr + 2)) * 2;
1446 + if (thunk[0] == 0xc6 && thunk[1] == 0x00)
1447 + /* exrl %r0,<target-br> */
1448 + br = thunk + (*(int *)(thunk + 2)) * 2;
1449 + else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
1450 + thunk[6] == 0x44 && thunk[7] == 0x00 &&
1451 + (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
1452 + (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
1453 + /* larl %rx,<target br> + ex %r0,0(%rx) */
1454 + br = thunk + (*(int *)(thunk + 2)) * 2;
1455 + else
1456 + continue;
1457 + if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
1458 + continue;
1459 + switch (type) {
1460 + case BRCL_EXPOLINE:
1461 + /* brcl to thunk, replace with br + nop */
1462 + insnbuf[0] = br[0];
1463 + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1464 + break;
1465 + case BRASL_EXPOLINE:
1466 + /* brasl to thunk, replace with basr + nop */
1467 + insnbuf[0] = 0x0d;
1468 + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1469 + break;
1470 + }
1471 +
1472 + s390_kernel_write(instr, insnbuf, 6);
1473 + }
1474 +}
1475 +
1476 +void __init_or_module nospec_revert(s32 *start, s32 *end)
1477 +{
1478 + if (nospec_disable)
1479 + __nospec_revert(start, end);
1480 +}
1481 +
1482 +extern s32 __nospec_call_start[], __nospec_call_end[];
1483 +extern s32 __nospec_return_start[], __nospec_return_end[];
1484 +void __init nospec_init_branches(void)
1485 +{
1486 + nospec_revert(__nospec_call_start, __nospec_call_end);
1487 + nospec_revert(__nospec_return_start, __nospec_return_end);
1488 +}
1489 +
1490 +#endif /* CONFIG_EXPOLINE */
1491 diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
1492 index 81d0808085e6..d856263fd768 100644
1493 --- a/arch/s390/kernel/processor.c
1494 +++ b/arch/s390/kernel/processor.c
1495 @@ -179,3 +179,21 @@ const struct seq_operations cpuinfo_op = {
1496 .stop = c_stop,
1497 .show = show_cpuinfo,
1498 };
1499 +
1500 +int s390_isolate_bp(void)
1501 +{
1502 + if (!test_facility(82))
1503 + return -EOPNOTSUPP;
1504 + set_thread_flag(TIF_ISOLATE_BP);
1505 + return 0;
1506 +}
1507 +EXPORT_SYMBOL(s390_isolate_bp);
1508 +
1509 +int s390_isolate_bp_guest(void)
1510 +{
1511 + if (!test_facility(82))
1512 + return -EOPNOTSUPP;
1513 + set_thread_flag(TIF_ISOLATE_BP_GUEST);
1514 + return 0;
1515 +}
1516 +EXPORT_SYMBOL(s390_isolate_bp_guest);
1517 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1518 index e974e53ab597..feb9d97a9d14 100644
1519 --- a/arch/s390/kernel/setup.c
1520 +++ b/arch/s390/kernel/setup.c
1521 @@ -63,6 +63,8 @@
1522 #include <asm/sclp.h>
1523 #include <asm/sysinfo.h>
1524 #include <asm/numa.h>
1525 +#include <asm/alternative.h>
1526 +#include <asm/nospec-branch.h>
1527 #include "entry.h"
1528
1529 /*
1530 @@ -335,7 +337,9 @@ static void __init setup_lowcore(void)
1531 lc->machine_flags = S390_lowcore.machine_flags;
1532 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
1533 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1534 - MAX_FACILITY_BIT/8);
1535 + sizeof(lc->stfle_fac_list));
1536 + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1537 + sizeof(lc->alt_stfle_fac_list));
1538 if (MACHINE_HAS_VX)
1539 lc->vector_save_area_addr =
1540 (unsigned long) &lc->vector_save_area;
1541 @@ -372,6 +376,7 @@ static void __init setup_lowcore(void)
1542 #ifdef CONFIG_SMP
1543 lc->spinlock_lockval = arch_spin_lockval(0);
1544 #endif
1545 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1546
1547 set_prefix((u32)(unsigned long) lc);
1548 lowcore_ptr[0] = lc;
1549 @@ -871,6 +876,9 @@ void __init setup_arch(char **cmdline_p)
1550 init_mm.end_data = (unsigned long) &_edata;
1551 init_mm.brk = (unsigned long) &_end;
1552
1553 + if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1554 + nospec_auto_detect();
1555 +
1556 parse_early_param();
1557 #ifdef CONFIG_CRASH_DUMP
1558 /* Deactivate elfcorehdr= kernel parameter */
1559 @@ -931,6 +939,10 @@ void __init setup_arch(char **cmdline_p)
1560 conmode_default();
1561 set_preferred_console();
1562
1563 + apply_alternative_instructions();
1564 + if (IS_ENABLED(CONFIG_EXPOLINE))
1565 + nospec_init_branches();
1566 +
1567 /* Setup zfcpdump support */
1568 setup_zfcpdump();
1569
1570 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1571 index 35531fe1c5ea..0a31110f41f6 100644
1572 --- a/arch/s390/kernel/smp.c
1573 +++ b/arch/s390/kernel/smp.c
1574 @@ -205,6 +205,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1575 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
1576 lc->cpu_nr = cpu;
1577 lc->spinlock_lockval = arch_spin_lockval(cpu);
1578 + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1579 if (MACHINE_HAS_VX)
1580 lc->vector_save_area_addr =
1581 (unsigned long) &lc->vector_save_area;
1582 @@ -253,7 +254,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1583 __ctl_store(lc->cregs_save_area, 0, 15);
1584 save_access_regs((unsigned int *) lc->access_regs_save_area);
1585 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1586 - MAX_FACILITY_BIT/8);
1587 + sizeof(lc->stfle_fac_list));
1588 + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1589 + sizeof(lc->alt_stfle_fac_list));
1590 }
1591
1592 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
1593 @@ -302,6 +305,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
1594 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
1595 mem_assign_absolute(lc->restart_data, (unsigned long) data);
1596 mem_assign_absolute(lc->restart_source, source_cpu);
1597 + __bpon();
1598 asm volatile(
1599 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
1600 " brc 2,0b # busy, try again\n"
1601 @@ -875,6 +879,7 @@ void __cpu_die(unsigned int cpu)
1602 void __noreturn cpu_die(void)
1603 {
1604 idle_task_exit();
1605 + __bpon();
1606 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
1607 for (;;) ;
1608 }
1609 diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
1610 index 66956c09d5bf..3d04dfdabc9f 100644
1611 --- a/arch/s390/kernel/uprobes.c
1612 +++ b/arch/s390/kernel/uprobes.c
1613 @@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
1614 return orig;
1615 }
1616
1617 +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1618 + struct pt_regs *regs)
1619 +{
1620 + if (ctx == RP_CHECK_CHAIN_CALL)
1621 + return user_stack_pointer(regs) <= ret->stack;
1622 + else
1623 + return user_stack_pointer(regs) < ret->stack;
1624 +}
1625 +
1626 /* Instruction Emulation */
1627
1628 static void adjust_psw_addr(psw_t *psw, unsigned long len)
1629 diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
1630 index 115bda280d50..dd96b467946b 100644
1631 --- a/arch/s390/kernel/vmlinux.lds.S
1632 +++ b/arch/s390/kernel/vmlinux.lds.S
1633 @@ -99,6 +99,43 @@ SECTIONS
1634 EXIT_DATA
1635 }
1636
1637 + /*
1638 + * struct alt_inst entries. From the header (alternative.h):
1639 + * "Alternative instructions for different CPU types or capabilities"
1640 + * Think locking instructions on spinlocks.
1641 + * Note, that it is a part of __init region.
1642 + */
1643 + . = ALIGN(8);
1644 + .altinstructions : {
1645 + __alt_instructions = .;
1646 + *(.altinstructions)
1647 + __alt_instructions_end = .;
1648 + }
1649 +
1650 + /*
1651 + * And here are the replacement instructions. The linker sticks
1652 + * them as binary blobs. The .altinstructions has enough data to
1653 + * get the address and the length of them to patch the kernel safely.
1654 + * Note, that it is a part of __init region.
1655 + */
1656 + .altinstr_replacement : {
1657 + *(.altinstr_replacement)
1658 + }
1659 +
1660 + /*
1661 + * Table with the patch locations to undo expolines
1662 + */
1663 + .nospec_call_table : {
1664 + __nospec_call_start = . ;
1665 + *(.s390_indirect*)
1666 + __nospec_call_end = . ;
1667 + }
1668 + .nospec_return_table : {
1669 + __nospec_return_start = . ;
1670 + *(.s390_return*)
1671 + __nospec_return_end = . ;
1672 + }
1673 +
1674 /* early.c uses stsi, which requires page aligned data. */
1675 . = ALIGN(PAGE_SIZE);
1676 INIT_DATA_SECTION(0x100)
1677 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1678 index a70ff09b4982..2032ab81b2d7 100644
1679 --- a/arch/s390/kvm/kvm-s390.c
1680 +++ b/arch/s390/kvm/kvm-s390.c
1681 @@ -401,6 +401,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1682 case KVM_CAP_S390_RI:
1683 r = test_facility(64);
1684 break;
1685 + case KVM_CAP_S390_BPB:
1686 + r = test_facility(82);
1687 + break;
1688 default:
1689 r = 0;
1690 }
1691 @@ -1713,6 +1716,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1692 kvm_s390_set_prefix(vcpu, 0);
1693 if (test_kvm_facility(vcpu->kvm, 64))
1694 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1695 + if (test_kvm_facility(vcpu->kvm, 82))
1696 + vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
1697 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1698 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1699 */
1700 @@ -1829,7 +1834,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1701 if (test_fp_ctl(current->thread.fpu.fpc))
1702 /* User space provided an invalid FPC, let's clear it */
1703 current->thread.fpu.fpc = 0;
1704 -
1705 save_access_regs(vcpu->arch.host_acrs);
1706 restore_access_regs(vcpu->run->s.regs.acrs);
1707 gmap_enable(vcpu->arch.enabled_gmap);
1708 @@ -1877,6 +1881,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1709 current->thread.fpu.fpc = 0;
1710 vcpu->arch.sie_block->gbea = 1;
1711 vcpu->arch.sie_block->pp = 0;
1712 + vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1713 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1714 kvm_clear_async_pf_completion_queue(vcpu);
1715 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1716 @@ -2744,6 +2749,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1717 if (riccb->valid)
1718 vcpu->arch.sie_block->ecb3 |= 0x01;
1719 }
1720 + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
1721 + test_kvm_facility(vcpu->kvm, 82)) {
1722 + vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1723 + vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
1724 + }
1725
1726 kvm_run->kvm_dirty_regs = 0;
1727 }
1728 @@ -2762,6 +2772,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1729 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1730 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1731 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1732 + kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
1733 }
1734
1735 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1736 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1737 index d8673e243f13..ced6c9b8f04d 100644
1738 --- a/arch/s390/kvm/vsie.c
1739 +++ b/arch/s390/kvm/vsie.c
1740 @@ -217,6 +217,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1741 memcpy(scb_o->gcr, scb_s->gcr, 128);
1742 scb_o->pp = scb_s->pp;
1743
1744 + /* branch prediction */
1745 + if (test_kvm_facility(vcpu->kvm, 82)) {
1746 + scb_o->fpf &= ~FPF_BPBC;
1747 + scb_o->fpf |= scb_s->fpf & FPF_BPBC;
1748 + }
1749 +
1750 /* interrupt intercept */
1751 switch (scb_s->icptcode) {
1752 case ICPT_PROGI:
1753 @@ -259,6 +265,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1754 scb_s->ecb3 = 0;
1755 scb_s->ecd = 0;
1756 scb_s->fac = 0;
1757 + scb_s->fpf = 0;
1758
1759 rc = prepare_cpuflags(vcpu, vsie_page);
1760 if (rc)
1761 @@ -316,6 +323,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1762 prefix_unmapped(vsie_page);
1763 scb_s->ecb |= scb_o->ecb & 0x10U;
1764 }
1765 + /* branch prediction */
1766 + if (test_kvm_facility(vcpu->kvm, 82))
1767 + scb_s->fpf |= scb_o->fpf & FPF_BPBC;
1768 /* SIMD */
1769 if (test_kvm_facility(vcpu->kvm, 129)) {
1770 scb_s->eca |= scb_o->eca & 0x00020000U;
1771 @@ -754,6 +764,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1772 {
1773 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1774 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1775 + int guest_bp_isolation;
1776 int rc;
1777
1778 handle_last_fault(vcpu, vsie_page);
1779 @@ -764,6 +775,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1780 s390_handle_mcck();
1781
1782 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1783 +
1784 + /* save current guest state of bp isolation override */
1785 + guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1786 +
1787 + /*
1788 + * The guest is running with BPBC, so we have to force it on for our
1789 + * nested guest. This is done by enabling BPBC globally, so the BPBC
1790 + * control in the SCB (which the nested guest can modify) is simply
1791 + * ignored.
1792 + */
1793 + if (test_kvm_facility(vcpu->kvm, 82) &&
1794 + vcpu->arch.sie_block->fpf & FPF_BPBC)
1795 + set_thread_flag(TIF_ISOLATE_BP_GUEST);
1796 +
1797 local_irq_disable();
1798 guest_enter_irqoff();
1799 local_irq_enable();
1800 @@ -773,6 +798,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1801 local_irq_disable();
1802 guest_exit_irqoff();
1803 local_irq_enable();
1804 +
1805 + /* restore guest state for bp isolation override */
1806 + if (!guest_bp_isolation)
1807 + clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1808 +
1809 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1810
1811 if (rc > 0)
1812 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1813 index bbfb03eccb7f..da6a287a11e4 100644
1814 --- a/arch/x86/kernel/tsc.c
1815 +++ b/arch/x86/kernel/tsc.c
1816 @@ -409,7 +409,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
1817 hpet2 -= hpet1;
1818 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
1819 do_div(tmp, 1000000);
1820 - do_div(deltatsc, tmp);
1821 + deltatsc = div64_u64(deltatsc, tmp);
1822
1823 return (unsigned long) deltatsc;
1824 }
1825 diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1826 index 94e04c9de12b..667dc5c86fef 100644
1827 --- a/drivers/acpi/acpi_video.c
1828 +++ b/drivers/acpi/acpi_video.c
1829 @@ -2069,6 +2069,25 @@ static int __init intel_opregion_present(void)
1830 return opregion;
1831 }
1832
1833 +static bool dmi_is_desktop(void)
1834 +{
1835 + const char *chassis_type;
1836 +
1837 + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
1838 + if (!chassis_type)
1839 + return false;
1840 +
1841 + if (!strcmp(chassis_type, "3") || /* 3: Desktop */
1842 + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
1843 + !strcmp(chassis_type, "5") || /* 5: Pizza Box */
1844 + !strcmp(chassis_type, "6") || /* 6: Mini Tower */
1845 + !strcmp(chassis_type, "7") || /* 7: Tower */
1846 + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
1847 + return true;
1848 +
1849 + return false;
1850 +}
1851 +
1852 int acpi_video_register(void)
1853 {
1854 int ret = 0;
1855 @@ -2089,8 +2108,12 @@ int acpi_video_register(void)
1856 * win8 ready (where we also prefer the native backlight driver, so
1857 * normally the acpi_video code should not register there anyways).
1858 */
1859 - if (only_lcd == -1)
1860 - only_lcd = acpi_osi_is_win8();
1861 + if (only_lcd == -1) {
1862 + if (dmi_is_desktop() && acpi_osi_is_win8())
1863 + only_lcd = true;
1864 + else
1865 + only_lcd = false;
1866 + }
1867
1868 dmi_check_system(video_dmi_table);
1869
1870 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1871 index 5d475b3a0b2e..128ebd439221 100644
1872 --- a/drivers/cdrom/cdrom.c
1873 +++ b/drivers/cdrom/cdrom.c
1874 @@ -2368,7 +2368,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
1875 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
1876 return media_changed(cdi, 1);
1877
1878 - if ((unsigned int)arg >= cdi->capacity)
1879 + if (arg >= cdi->capacity)
1880 return -EINVAL;
1881
1882 info = kmalloc(sizeof(*info), GFP_KERNEL);
1883 diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1884 index a7b2a751f6fe..cdb53586c8fe 100644
1885 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1886 +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1887 @@ -322,19 +322,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
1888 {
1889 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
1890 ssize_t ret;
1891 + int retry;
1892
1893 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
1894 return 0;
1895
1896 - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1897 - &tmds_oen, sizeof(tmds_oen));
1898 - if (ret) {
1899 - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
1900 - enable ? "enable" : "disable");
1901 - return ret;
1902 + /*
1903 + * LSPCON adapters in low-power state may ignore the first write, so
1904 + * read back and verify the written value a few times.
1905 + */
1906 + for (retry = 0; retry < 3; retry++) {
1907 + uint8_t tmp;
1908 +
1909 + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1910 + &tmds_oen, sizeof(tmds_oen));
1911 + if (ret) {
1912 + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
1913 + enable ? "enable" : "disable",
1914 + retry + 1);
1915 + return ret;
1916 + }
1917 +
1918 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
1919 + &tmp, sizeof(tmp));
1920 + if (ret) {
1921 + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
1922 + enable ? "enabling" : "disabling",
1923 + retry + 1);
1924 + return ret;
1925 + }
1926 +
1927 + if (tmp == tmds_oen)
1928 + return 0;
1929 }
1930
1931 - return 0;
1932 + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
1933 + enable ? "enabling" : "disabling");
1934 +
1935 + return -EIO;
1936 }
1937 EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
1938
1939 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1940 index 36a665f0e5c9..e23748cca0c0 100644
1941 --- a/drivers/gpu/drm/i915/i915_drv.h
1942 +++ b/drivers/gpu/drm/i915/i915_drv.h
1943 @@ -3681,7 +3681,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
1944 struct intel_display_error_state *error);
1945
1946 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
1947 -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
1948 +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
1949 + u32 val, int timeout_us);
1950 +#define sandybridge_pcode_write(dev_priv, mbox, val) \
1951 + sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
1952 +
1953 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
1954 u32 reply_mask, u32 reply, int timeout_base_ms);
1955
1956 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1957 index ce32303b3013..c185625d67f2 100644
1958 --- a/drivers/gpu/drm/i915/intel_display.c
1959 +++ b/drivers/gpu/drm/i915/intel_display.c
1960 @@ -6012,8 +6012,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
1961
1962 /* Inform power controller of upcoming frequency change */
1963 mutex_lock(&dev_priv->rps.hw_lock);
1964 - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
1965 - 0x80000000);
1966 + ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
1967 + 0x80000000, 2000);
1968 mutex_unlock(&dev_priv->rps.hw_lock);
1969
1970 if (ret) {
1971 @@ -6044,8 +6044,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
1972 I915_WRITE(CDCLK_CTL, val);
1973
1974 mutex_lock(&dev_priv->rps.hw_lock);
1975 - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
1976 - DIV_ROUND_UP(cdclk, 25000));
1977 + ret = sandybridge_pcode_write_timeout(dev_priv,
1978 + HSW_PCODE_DE_WRITE_FREQ_REQ,
1979 + DIV_ROUND_UP(cdclk, 25000), 2000);
1980 mutex_unlock(&dev_priv->rps.hw_lock);
1981
1982 if (ret) {
1983 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1984 index 49de4760cc16..05427d292457 100644
1985 --- a/drivers/gpu/drm/i915/intel_pm.c
1986 +++ b/drivers/gpu/drm/i915/intel_pm.c
1987 @@ -7913,8 +7913,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
1988 return 0;
1989 }
1990
1991 -int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
1992 - u32 mbox, u32 val)
1993 +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
1994 + u32 mbox, u32 val, int timeout_us)
1995 {
1996 int status;
1997
1998 @@ -7935,7 +7935,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
1999
2000 if (intel_wait_for_register_fw(dev_priv,
2001 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
2002 - 500)) {
2003 + timeout_us)) {
2004 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
2005 return -ETIMEDOUT;
2006 }
2007 diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
2008 index ec9023bd935b..d53e805d392f 100644
2009 --- a/drivers/gpu/drm/vc4/vc4_bo.c
2010 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
2011 @@ -80,6 +80,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
2012 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
2013
2014 if (bo->validated_shader) {
2015 + kfree(bo->validated_shader->uniform_addr_offsets);
2016 kfree(bo->validated_shader->texture_samples);
2017 kfree(bo->validated_shader);
2018 bo->validated_shader = NULL;
2019 @@ -328,6 +329,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
2020 }
2021
2022 if (bo->validated_shader) {
2023 + kfree(bo->validated_shader->uniform_addr_offsets);
2024 kfree(bo->validated_shader->texture_samples);
2025 kfree(bo->validated_shader);
2026 bo->validated_shader = NULL;
2027 diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2028 index 917321ce832f..19a5bde8e490 100644
2029 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2030 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
2031 @@ -874,6 +874,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
2032 fail:
2033 kfree(validation_state.branch_targets);
2034 if (validated_shader) {
2035 + kfree(validated_shader->uniform_addr_offsets);
2036 kfree(validated_shader->texture_samples);
2037 kfree(validated_shader);
2038 }
2039 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
2040 index e6fe21a6135b..b32bf7eac3c8 100644
2041 --- a/drivers/i2c/busses/i2c-i801.c
2042 +++ b/drivers/i2c/busses/i2c-i801.c
2043 @@ -243,6 +243,7 @@ struct i801_priv {
2044 struct i2c_adapter adapter;
2045 unsigned long smba;
2046 unsigned char original_hstcfg;
2047 + unsigned char original_slvcmd;
2048 struct pci_dev *pci_dev;
2049 unsigned int features;
2050
2051 @@ -962,13 +963,24 @@ static int i801_enable_host_notify(struct i2c_adapter *adapter)
2052 if (!priv->host_notify)
2053 return -ENOMEM;
2054
2055 - outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
2056 + if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
2057 + outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
2058 + SMBSLVCMD(priv));
2059 +
2060 /* clear Host Notify bit to allow a new notification */
2061 outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
2062
2063 return 0;
2064 }
2065
2066 +static void i801_disable_host_notify(struct i801_priv *priv)
2067 +{
2068 + if (!(priv->features & FEATURE_HOST_NOTIFY))
2069 + return;
2070 +
2071 + outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
2072 +}
2073 +
2074 static const struct i2c_algorithm smbus_algorithm = {
2075 .smbus_xfer = i801_access,
2076 .functionality = i801_func,
2077 @@ -1589,6 +1601,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
2078 outb_p(inb_p(SMBAUXCTL(priv)) &
2079 ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
2080
2081 + /* Remember original Host Notify setting */
2082 + if (priv->features & FEATURE_HOST_NOTIFY)
2083 + priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
2084 +
2085 /* Default timeout in interrupt mode: 200 ms */
2086 priv->adapter.timeout = HZ / 5;
2087
2088 @@ -1666,6 +1682,7 @@ static void i801_remove(struct pci_dev *dev)
2089 pm_runtime_forbid(&dev->dev);
2090 pm_runtime_get_noresume(&dev->dev);
2091
2092 + i801_disable_host_notify(priv);
2093 i801_del_mux(priv);
2094 i2c_del_adapter(&priv->adapter);
2095 i801_acpi_remove(priv);
2096 @@ -1679,6 +1696,15 @@ static void i801_remove(struct pci_dev *dev)
2097 */
2098 }
2099
2100 +static void i801_shutdown(struct pci_dev *dev)
2101 +{
2102 + struct i801_priv *priv = pci_get_drvdata(dev);
2103 +
2104 + /* Restore config registers to avoid hard hang on some systems */
2105 + i801_disable_host_notify(priv);
2106 + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
2107 +}
2108 +
2109 #ifdef CONFIG_PM
2110 static int i801_suspend(struct device *dev)
2111 {
2112 @@ -1711,6 +1737,7 @@ static struct pci_driver i801_driver = {
2113 .id_table = i801_ids,
2114 .probe = i801_probe,
2115 .remove = i801_remove,
2116 + .shutdown = i801_shutdown,
2117 .driver = {
2118 .pm = &i801_pm_ops,
2119 },
2120 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
2121 index 403df3591d29..5b8909d1b55e 100644
2122 --- a/drivers/infiniband/hw/mlx5/qp.c
2123 +++ b/drivers/infiniband/hw/mlx5/qp.c
2124 @@ -2848,7 +2848,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
2125 * If we moved a kernel QP to RESET, clean up all old CQ
2126 * entries and reinitialize the QP.
2127 */
2128 - if (new_state == IB_QPS_RESET && !ibqp->uobject) {
2129 + if (new_state == IB_QPS_RESET &&
2130 + !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
2131 mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2132 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
2133 if (send_cq != recv_cq)
2134 diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
2135 index 930424e55439..251d64ca41ce 100644
2136 --- a/drivers/input/misc/drv260x.c
2137 +++ b/drivers/input/misc/drv260x.c
2138 @@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
2139 if (!haptics)
2140 return -ENOMEM;
2141
2142 - haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
2143 + haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
2144 haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
2145
2146 if (pdata) {
2147 diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
2148 index e546b014d7ad..2dcc8d0be9e7 100644
2149 --- a/drivers/media/usb/stkwebcam/stk-sensor.c
2150 +++ b/drivers/media/usb/stkwebcam/stk-sensor.c
2151 @@ -228,7 +228,7 @@
2152 static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
2153 {
2154 int i = 0;
2155 - int tmpval = 0;
2156 + u8 tmpval = 0;
2157
2158 if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
2159 return 1;
2160 @@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
2161 static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
2162 {
2163 int i = 0;
2164 - int tmpval = 0;
2165 + u8 tmpval = 0;
2166
2167 if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
2168 return 1;
2169 @@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
2170 if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
2171 return 1;
2172
2173 - *val = (u8) tmpval;
2174 + *val = tmpval;
2175 return 0;
2176 }
2177
2178 diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
2179 index 22a9aae16291..1c48f2f1e14a 100644
2180 --- a/drivers/media/usb/stkwebcam/stk-webcam.c
2181 +++ b/drivers/media/usb/stkwebcam/stk-webcam.c
2182 @@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
2183 return 0;
2184 }
2185
2186 -int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2187 +int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
2188 {
2189 struct usb_device *udev = dev->udev;
2190 unsigned char *buf;
2191 @@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2192 sizeof(u8),
2193 500);
2194 if (ret >= 0)
2195 - memcpy(value, buf, sizeof(u8));
2196 + *value = *buf;
2197
2198 kfree(buf);
2199 return ret;
2200 @@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
2201
2202 static int stk_start_stream(struct stk_camera *dev)
2203 {
2204 - int value;
2205 + u8 value;
2206 int i, ret;
2207 - int value_116, value_117;
2208 + u8 value_116, value_117;
2209 +
2210
2211 if (!is_present(dev))
2212 return -ENODEV;
2213 @@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
2214
2215 static int stk_stop_stream(struct stk_camera *dev)
2216 {
2217 - int value;
2218 + u8 value;
2219 int i;
2220 if (is_present(dev)) {
2221 stk_camera_read_reg(dev, 0x0100, &value);
2222 diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
2223 index 9bbfa3d9bfdd..92bb48e3c74e 100644
2224 --- a/drivers/media/usb/stkwebcam/stk-webcam.h
2225 +++ b/drivers/media/usb/stkwebcam/stk-webcam.h
2226 @@ -129,7 +129,7 @@ struct stk_camera {
2227 #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
2228
2229 int stk_camera_write_reg(struct stk_camera *, u16, u8);
2230 -int stk_camera_read_reg(struct stk_camera *, u16, int *);
2231 +int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
2232
2233 int stk_sensor_init(struct stk_camera *);
2234 int stk_sensor_configure(struct stk_camera *);
2235 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
2236 index 7ee1667acde4..00dff9b5a6c4 100644
2237 --- a/drivers/message/fusion/mptsas.c
2238 +++ b/drivers/message/fusion/mptsas.c
2239 @@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
2240 .cmd_per_lun = 7,
2241 .use_clustering = ENABLE_CLUSTERING,
2242 .shost_attrs = mptscsih_host_attrs,
2243 + .no_write_same = 1,
2244 };
2245
2246 static int mptsas_get_linkerrors(struct sas_phy *phy)
2247 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2248 index 513457a2a7bf..13a015b8052b 100644
2249 --- a/drivers/net/bonding/bond_main.c
2250 +++ b/drivers/net/bonding/bond_main.c
2251 @@ -1654,8 +1654,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2252 } /* switch(bond_mode) */
2253
2254 #ifdef CONFIG_NET_POLL_CONTROLLER
2255 - slave_dev->npinfo = bond->dev->npinfo;
2256 - if (slave_dev->npinfo) {
2257 + if (bond->dev->npinfo) {
2258 if (slave_enable_netpoll(new_slave)) {
2259 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2260 res = -EBUSY;
2261 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2262 index 552de9c490c6..de336897a28a 100644
2263 --- a/drivers/net/ethernet/ti/cpsw.c
2264 +++ b/drivers/net/ethernet/ti/cpsw.c
2265 @@ -124,7 +124,7 @@ do { \
2266
2267 #define RX_PRIORITY_MAPPING 0x76543210
2268 #define TX_PRIORITY_MAPPING 0x33221100
2269 -#define CPDMA_TX_PRIORITY_MAP 0x01234567
2270 +#define CPDMA_TX_PRIORITY_MAP 0x76543210
2271
2272 #define CPSW_VLAN_AWARE BIT(1)
2273 #define CPSW_ALE_VLAN_AWARE 1
2274 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
2275 index dc36c2ec1d10..fa2c7bd638be 100644
2276 --- a/drivers/net/ppp/pppoe.c
2277 +++ b/drivers/net/ppp/pppoe.c
2278 @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
2279 lock_sock(sk);
2280
2281 error = -EINVAL;
2282 +
2283 + if (sockaddr_len != sizeof(struct sockaddr_pppox))
2284 + goto end;
2285 +
2286 if (sp->sa_protocol != PX_PROTO_OE)
2287 goto end;
2288
2289 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2290 index 8673ef3c9cdc..36963685d42a 100644
2291 --- a/drivers/net/team/team.c
2292 +++ b/drivers/net/team/team.c
2293 @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
2294 }
2295 }
2296
2297 +static bool __team_option_inst_tmp_find(const struct list_head *opts,
2298 + const struct team_option_inst *needle)
2299 +{
2300 + struct team_option_inst *opt_inst;
2301 +
2302 + list_for_each_entry(opt_inst, opts, tmp_list)
2303 + if (opt_inst == needle)
2304 + return true;
2305 + return false;
2306 +}
2307 +
2308 static int __team_options_register(struct team *team,
2309 const struct team_option *option,
2310 size_t option_count)
2311 @@ -1067,14 +1078,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
2312 }
2313
2314 #ifdef CONFIG_NET_POLL_CONTROLLER
2315 -static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2316 +static int __team_port_enable_netpoll(struct team_port *port)
2317 {
2318 struct netpoll *np;
2319 int err;
2320
2321 - if (!team->dev->npinfo)
2322 - return 0;
2323 -
2324 np = kzalloc(sizeof(*np), GFP_KERNEL);
2325 if (!np)
2326 return -ENOMEM;
2327 @@ -1088,6 +1096,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2328 return err;
2329 }
2330
2331 +static int team_port_enable_netpoll(struct team_port *port)
2332 +{
2333 + if (!port->team->dev->npinfo)
2334 + return 0;
2335 +
2336 + return __team_port_enable_netpoll(port);
2337 +}
2338 +
2339 static void team_port_disable_netpoll(struct team_port *port)
2340 {
2341 struct netpoll *np = port->np;
2342 @@ -1102,7 +1118,7 @@ static void team_port_disable_netpoll(struct team_port *port)
2343 kfree(np);
2344 }
2345 #else
2346 -static int team_port_enable_netpoll(struct team *team, struct team_port *port)
2347 +static int team_port_enable_netpoll(struct team_port *port)
2348 {
2349 return 0;
2350 }
2351 @@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
2352 goto err_vids_add;
2353 }
2354
2355 - err = team_port_enable_netpoll(team, port);
2356 + err = team_port_enable_netpoll(port);
2357 if (err) {
2358 netdev_err(dev, "Failed to enable netpoll on device %s\n",
2359 portname);
2360 @@ -1908,7 +1924,7 @@ static int team_netpoll_setup(struct net_device *dev,
2361
2362 mutex_lock(&team->lock);
2363 list_for_each_entry(port, &team->port_list, list) {
2364 - err = team_port_enable_netpoll(team, port);
2365 + err = __team_port_enable_netpoll(port);
2366 if (err) {
2367 __team_netpoll_cleanup(team);
2368 break;
2369 @@ -2569,6 +2585,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2370 if (err)
2371 goto team_put;
2372 opt_inst->changed = true;
2373 +
2374 + /* dumb/evil user-space can send us duplicate opt,
2375 + * keep only the last one
2376 + */
2377 + if (__team_option_inst_tmp_find(&opt_inst_list,
2378 + opt_inst))
2379 + continue;
2380 +
2381 list_add(&opt_inst->tmp_list, &opt_inst_list);
2382 }
2383 if (!opt_found) {
2384 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2385 index 4fb468666b19..99424c87b464 100644
2386 --- a/drivers/net/usb/cdc_ether.c
2387 +++ b/drivers/net/usb/cdc_ether.c
2388 @@ -530,6 +530,7 @@ static const struct driver_info wwan_info = {
2389 #define REALTEK_VENDOR_ID 0x0bda
2390 #define SAMSUNG_VENDOR_ID 0x04e8
2391 #define LENOVO_VENDOR_ID 0x17ef
2392 +#define LINKSYS_VENDOR_ID 0x13b1
2393 #define NVIDIA_VENDOR_ID 0x0955
2394 #define HP_VENDOR_ID 0x03f0
2395
2396 @@ -719,6 +720,15 @@ static const struct usb_device_id products[] = {
2397 .driver_info = 0,
2398 },
2399
2400 +#if IS_ENABLED(CONFIG_USB_RTL8152)
2401 +/* Linksys USB3GIGV1 Ethernet Adapter */
2402 +{
2403 + USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
2404 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
2405 + .driver_info = 0,
2406 +},
2407 +#endif
2408 +
2409 /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
2410 {
2411 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
2412 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2413 index b2d7c7e32250..3cdfa2465e3f 100644
2414 --- a/drivers/net/usb/r8152.c
2415 +++ b/drivers/net/usb/r8152.c
2416 @@ -519,6 +519,7 @@ enum rtl8152_flags {
2417 #define VENDOR_ID_REALTEK 0x0bda
2418 #define VENDOR_ID_SAMSUNG 0x04e8
2419 #define VENDOR_ID_LENOVO 0x17ef
2420 +#define VENDOR_ID_LINKSYS 0x13b1
2421 #define VENDOR_ID_NVIDIA 0x0955
2422
2423 #define MCU_TYPE_PLA 0x0100
2424 @@ -4506,6 +4507,7 @@ static struct usb_device_id rtl8152_table[] = {
2425 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
2426 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
2427 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
2428 + {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
2429 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
2430 {}
2431 };
2432 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2433 index a497bf31953d..5aa5df24f4dc 100644
2434 --- a/drivers/net/wireless/ath/ath10k/mac.c
2435 +++ b/drivers/net/wireless/ath/ath10k/mac.c
2436 @@ -5819,9 +5819,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
2437 sta->addr, smps, err);
2438 }
2439
2440 - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
2441 - changed & IEEE80211_RC_NSS_CHANGED) {
2442 - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
2443 + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
2444 + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
2445 sta->addr);
2446
2447 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
2448 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2449 index a35f78be8dec..acef4ec928c1 100644
2450 --- a/drivers/net/wireless/ath/ath9k/hw.c
2451 +++ b/drivers/net/wireless/ath/ath9k/hw.c
2452 @@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
2453 int count = 50;
2454 u32 reg, last_val;
2455
2456 + /* Check if chip failed to wake up */
2457 + if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
2458 + return false;
2459 +
2460 if (AR_SREV_9300(ah))
2461 return !ath9k_hw_detect_mac_hang(ah);
2462
2463 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2464 index 4182c3775a72..2681b5339810 100644
2465 --- a/drivers/net/wireless/mac80211_hwsim.c
2466 +++ b/drivers/net/wireless/mac80211_hwsim.c
2467 @@ -3346,8 +3346,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
2468 continue;
2469
2470 list_del(&data->list);
2471 - INIT_WORK(&data->destroy_work, destroy_radio);
2472 - schedule_work(&data->destroy_work);
2473 + spin_unlock_bh(&hwsim_radio_lock);
2474 + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
2475 + NULL);
2476 + spin_lock_bh(&hwsim_radio_lock);
2477 +
2478 }
2479 spin_unlock_bh(&hwsim_radio_lock);
2480 }
2481 diff --git a/drivers/of/base.c b/drivers/of/base.c
2482 index a0bccb54a9bd..466b285cef3e 100644
2483 --- a/drivers/of/base.c
2484 +++ b/drivers/of/base.c
2485 @@ -2109,7 +2109,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2486 continue;
2487
2488 /* Allocate an alias_prop with enough space for the stem */
2489 - ap = dt_alloc(sizeof(*ap) + len + 1, 4);
2490 + ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2491 if (!ap)
2492 continue;
2493 memset(ap, 0, sizeof(*ap) + len + 1);
2494 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2495 index a87c8e1aef68..9c13aeeeb973 100644
2496 --- a/drivers/pci/pci.c
2497 +++ b/drivers/pci/pci.c
2498 @@ -3756,27 +3756,49 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
2499 }
2500 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
2501
2502 -/*
2503 - * We should only need to wait 100ms after FLR, but some devices take longer.
2504 - * Wait for up to 1000ms for config space to return something other than -1.
2505 - * Intel IGD requires this when an LCD panel is attached. We read the 2nd
2506 - * dword because VFs don't implement the 1st dword.
2507 - */
2508 static void pci_flr_wait(struct pci_dev *dev)
2509 {
2510 - int i = 0;
2511 + int delay = 1, timeout = 60000;
2512 u32 id;
2513
2514 - do {
2515 - msleep(100);
2516 + /*
2517 + * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
2518 + * 100ms, but may silently discard requests while the FLR is in
2519 + * progress. Wait 100ms before trying to access the device.
2520 + */
2521 + msleep(100);
2522 +
2523 + /*
2524 + * After 100ms, the device should not silently discard config
2525 + * requests, but it may still indicate that it needs more time by
2526 + * responding to them with CRS completions. The Root Port will
2527 + * generally synthesize ~0 data to complete the read (except when
2528 + * CRS SV is enabled and the read was for the Vendor ID; in that
2529 + * case it synthesizes 0x0001 data).
2530 + *
2531 + * Wait for the device to return a non-CRS completion. Read the
2532 + * Command register instead of Vendor ID so we don't have to
2533 + * contend with the CRS SV value.
2534 + */
2535 + pci_read_config_dword(dev, PCI_COMMAND, &id);
2536 + while (id == ~0) {
2537 + if (delay > timeout) {
2538 + dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
2539 + 100 + delay - 1);
2540 + return;
2541 + }
2542 +
2543 + if (delay > 1000)
2544 + dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
2545 + 100 + delay - 1);
2546 +
2547 + msleep(delay);
2548 + delay *= 2;
2549 pci_read_config_dword(dev, PCI_COMMAND, &id);
2550 - } while (i++ < 10 && id == ~0);
2551 + }
2552
2553 - if (id == ~0)
2554 - dev_warn(&dev->dev, "Failed to return from FLR\n");
2555 - else if (i > 1)
2556 - dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
2557 - (i - 1) * 100);
2558 + if (delay > 1000)
2559 + dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
2560 }
2561
2562 static int pcie_flr(struct pci_dev *dev, int probe)
2563 diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
2564 index df63b7d997e8..b40a074822cf 100644
2565 --- a/drivers/pinctrl/intel/pinctrl-intel.c
2566 +++ b/drivers/pinctrl/intel/pinctrl-intel.c
2567 @@ -368,18 +368,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
2568 writel(value, padcfg0);
2569 }
2570
2571 -static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
2572 -{
2573 - u32 value;
2574 -
2575 - /* Put the pad into GPIO mode */
2576 - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
2577 - /* Disable SCI/SMI/NMI generation */
2578 - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
2579 - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
2580 - writel(value, padcfg0);
2581 -}
2582 -
2583 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2584 struct pinctrl_gpio_range *range,
2585 unsigned pin)
2586 @@ -387,6 +375,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2587 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
2588 void __iomem *padcfg0;
2589 unsigned long flags;
2590 + u32 value;
2591
2592 raw_spin_lock_irqsave(&pctrl->lock, flags);
2593
2594 @@ -396,7 +385,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
2595 }
2596
2597 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
2598 - intel_gpio_set_gpio_mode(padcfg0);
2599 + /* Put the pad into GPIO mode */
2600 + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
2601 + /* Disable SCI/SMI/NMI generation */
2602 + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
2603 + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
2604 + writel(value, padcfg0);
2605 +
2606 /* Disable TX buffer and enable RX (this will be input) */
2607 __intel_gpio_set_direction(padcfg0, true);
2608
2609 @@ -775,8 +770,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
2610
2611 raw_spin_lock_irqsave(&pctrl->lock, flags);
2612
2613 - intel_gpio_set_gpio_mode(reg);
2614 -
2615 value = readl(reg);
2616
2617 value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
2618 diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
2619 index 73e2f0b79dd4..c4770a94cc8e 100644
2620 --- a/drivers/power/supply/bq2415x_charger.c
2621 +++ b/drivers/power/supply/bq2415x_charger.c
2622 @@ -1569,6 +1569,11 @@ static int bq2415x_probe(struct i2c_client *client,
2623 acpi_id =
2624 acpi_match_device(client->dev.driver->acpi_match_table,
2625 &client->dev);
2626 + if (!acpi_id) {
2627 + dev_err(&client->dev, "failed to match device name\n");
2628 + ret = -ENODEV;
2629 + goto error_1;
2630 + }
2631 name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
2632 }
2633 if (!name) {
2634 diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
2635 index 1e560188dd13..e453d2a7d7f9 100644
2636 --- a/drivers/s390/block/dasd_alias.c
2637 +++ b/drivers/s390/block/dasd_alias.c
2638 @@ -591,13 +591,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
2639 int dasd_alias_add_device(struct dasd_device *device)
2640 {
2641 struct dasd_eckd_private *private = device->private;
2642 - struct alias_lcu *lcu;
2643 + __u8 uaddr = private->uid.real_unit_addr;
2644 + struct alias_lcu *lcu = private->lcu;
2645 unsigned long flags;
2646 int rc;
2647
2648 - lcu = private->lcu;
2649 rc = 0;
2650 spin_lock_irqsave(&lcu->lock, flags);
2651 + /*
2652 + * Check if device and lcu type differ. If so, the uac data may be
2653 + * outdated and needs to be updated.
2654 + */
2655 + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
2656 + lcu->flags |= UPDATE_PENDING;
2657 + DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2658 + "uid type mismatch - trigger rescan");
2659 + }
2660 if (!(lcu->flags & UPDATE_PENDING)) {
2661 rc = _add_device_to_lcu(lcu, device, device);
2662 if (rc)
2663 diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
2664 index 41e28b23b26a..8ac27efe34fc 100644
2665 --- a/drivers/s390/char/Makefile
2666 +++ b/drivers/s390/char/Makefile
2667 @@ -2,6 +2,8 @@
2668 # S/390 character devices
2669 #
2670
2671 +CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
2672 +
2673 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
2674 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
2675 sclp_early.o
2676 diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
2677 index 11674698b36d..67903c93328b 100644
2678 --- a/drivers/s390/cio/chsc.c
2679 +++ b/drivers/s390/cio/chsc.c
2680 @@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
2681
2682 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2683 {
2684 + struct channel_path *chp;
2685 struct chp_link link;
2686 struct chp_id chpid;
2687 int status;
2688 @@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
2689 chpid.id = sei_area->rsid;
2690 /* allocate a new channel path structure, if needed */
2691 status = chp_get_status(chpid);
2692 - if (status < 0)
2693 - chp_new(chpid);
2694 - else if (!status)
2695 + if (!status)
2696 return;
2697 +
2698 + if (status < 0) {
2699 + chp_new(chpid);
2700 + } else {
2701 + chp = chpid_to_chp(chpid);
2702 + mutex_lock(&chp->lock);
2703 + chp_update_desc(chp);
2704 + mutex_unlock(&chp->lock);
2705 + }
2706 memset(&link, 0, sizeof(struct chp_link));
2707 link.chpid = chpid;
2708 if ((sei_area->vf & 0xc0) != 0) {
2709 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
2710 index 2d9a8067eaca..579aa9accafc 100644
2711 --- a/drivers/usb/musb/musb_core.c
2712 +++ b/drivers/usb/musb/musb_core.c
2713 @@ -1774,6 +1774,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
2714 int vbus;
2715 u8 devctl;
2716
2717 + pm_runtime_get_sync(dev);
2718 spin_lock_irqsave(&musb->lock, flags);
2719 val = musb->a_wait_bcon;
2720 vbus = musb_platform_get_vbus_status(musb);
2721 @@ -1787,6 +1788,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
2722 vbus = 0;
2723 }
2724 spin_unlock_irqrestore(&musb->lock, flags);
2725 + pm_runtime_put_sync(dev);
2726
2727 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
2728 vbus ? "on" : "off", val);
2729 @@ -2483,10 +2485,11 @@ static int musb_remove(struct platform_device *pdev)
2730 musb_generic_disable(musb);
2731 spin_unlock_irqrestore(&musb->lock, flags);
2732 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2733 + musb_platform_exit(musb);
2734 +
2735 pm_runtime_dont_use_autosuspend(musb->controller);
2736 pm_runtime_put_sync(musb->controller);
2737 pm_runtime_disable(musb->controller);
2738 - musb_platform_exit(musb);
2739 musb_phy_callback = NULL;
2740 if (musb->dma_controller)
2741 musb_dma_controller_destroy(musb->dma_controller);
2742 @@ -2710,7 +2713,8 @@ static int musb_resume(struct device *dev)
2743 if ((devctl & mask) != (musb->context.devctl & mask))
2744 musb->port1_status = 0;
2745
2746 - musb_start(musb);
2747 + musb_enable_interrupts(musb);
2748 + musb_platform_enable(musb);
2749
2750 spin_lock_irqsave(&musb->lock, flags);
2751 error = musb_run_resume_work(musb);
2752 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2753 index d9cbda269462..331ddd07e505 100644
2754 --- a/fs/cifs/dir.c
2755 +++ b/fs/cifs/dir.c
2756 @@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2757 goto mknod_out;
2758 }
2759
2760 + if (!S_ISCHR(mode) && !S_ISBLK(mode))
2761 + goto mknod_out;
2762 +
2763 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
2764 goto mknod_out;
2765
2766 @@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2767
2768 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
2769 if (buf == NULL) {
2770 - kfree(full_path);
2771 rc = -ENOMEM;
2772 - free_xid(xid);
2773 - return rc;
2774 + goto mknod_out;
2775 }
2776
2777 if (backup_cred(cifs_sb))
2778 @@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
2779 pdev->minor = cpu_to_le64(MINOR(device_number));
2780 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
2781 &bytes_written, iov, 1);
2782 - } /* else if (S_ISFIFO) */
2783 + }
2784 tcon->ses->server->ops->close(xid, tcon, &fid);
2785 d_drop(direntry);
2786
2787 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2788 index 542e33d29088..d10bb2c30bf8 100644
2789 --- a/fs/jbd2/journal.c
2790 +++ b/fs/jbd2/journal.c
2791 @@ -276,11 +276,11 @@ static int kjournald2(void *arg)
2792 goto loop;
2793
2794 end_loop:
2795 - write_unlock(&journal->j_state_lock);
2796 del_timer_sync(&journal->j_commit_timer);
2797 journal->j_task = NULL;
2798 wake_up(&journal->j_wait_done_commit);
2799 jbd_debug(1, "Journal thread exiting.\n");
2800 + write_unlock(&journal->j_state_lock);
2801 return 0;
2802 }
2803
2804 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2805 index 8feecd5345e7..7e39719e27cb 100644
2806 --- a/include/linux/if_vlan.h
2807 +++ b/include/linux/if_vlan.h
2808 @@ -600,7 +600,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
2809 * Returns true if the skb is tagged with multiple vlan headers, regardless
2810 * of whether it is hardware accelerated or not.
2811 */
2812 -static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2813 +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
2814 {
2815 __be16 protocol = skb->protocol;
2816
2817 @@ -610,6 +610,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2818 if (likely(!eth_type_vlan(protocol)))
2819 return false;
2820
2821 + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
2822 + return false;
2823 +
2824 veh = (struct vlan_ethhdr *)skb->data;
2825 protocol = veh->h_vlan_encapsulated_proto;
2826 }
2827 @@ -627,7 +630,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2828 *
2829 * Returns features without unsafe ones if the skb has multiple tags.
2830 */
2831 -static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
2832 +static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
2833 netdev_features_t features)
2834 {
2835 if (skb_vlan_tagged_multi(skb)) {
2836 diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
2837 index fe994d2e5286..ea985aa7a6c5 100644
2838 --- a/include/net/llc_conn.h
2839 +++ b/include/net/llc_conn.h
2840 @@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
2841
2842 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
2843 struct proto *prot, int kern);
2844 +void llc_sk_stop_all_timers(struct sock *sk, bool sync);
2845 void llc_sk_free(struct sock *sk);
2846
2847 void llc_sk_reset(struct sock *sk);
2848 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
2849 index 4ee67cb99143..05b9bb63dbec 100644
2850 --- a/include/uapi/linux/kvm.h
2851 +++ b/include/uapi/linux/kvm.h
2852 @@ -870,6 +870,7 @@ struct kvm_ppc_smmu_info {
2853 #define KVM_CAP_S390_USER_INSTR0 130
2854 #define KVM_CAP_MSI_DEVID 131
2855 #define KVM_CAP_PPC_HTM 132
2856 +#define KVM_CAP_S390_BPB 152
2857
2858 #ifdef KVM_CAP_IRQ_ROUTING
2859
2860 diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
2861 index 411226b26bca..04988d6466bf 100644
2862 --- a/kernel/events/callchain.c
2863 +++ b/kernel/events/callchain.c
2864 @@ -117,19 +117,22 @@ int get_callchain_buffers(int event_max_stack)
2865 goto exit;
2866 }
2867
2868 + /*
2869 + * If requesting per event more than the global cap,
2870 + * return a different error to help userspace figure
2871 + * this out.
2872 + *
2873 + * And also do it here so that we have &callchain_mutex held.
2874 + */
2875 + if (event_max_stack > sysctl_perf_event_max_stack) {
2876 + err = -EOVERFLOW;
2877 + goto exit;
2878 + }
2879 +
2880 if (count > 1) {
2881 /* If the allocation failed, give up */
2882 if (!callchain_cpus_entries)
2883 err = -ENOMEM;
2884 - /*
2885 - * If requesting per event more than the global cap,
2886 - * return a different error to help userspace figure
2887 - * this out.
2888 - *
2889 - * And also do it here so that we have &callchain_mutex held.
2890 - */
2891 - if (event_max_stack > sysctl_perf_event_max_stack)
2892 - err = -EOVERFLOW;
2893 goto exit;
2894 }
2895
2896 diff --git a/kernel/events/core.c b/kernel/events/core.c
2897 index 74710fad35d5..b1d6b9888fba 100644
2898 --- a/kernel/events/core.c
2899 +++ b/kernel/events/core.c
2900 @@ -9456,9 +9456,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
2901 * __u16 sample size limit.
2902 */
2903 if (attr->sample_stack_user >= USHRT_MAX)
2904 - ret = -EINVAL;
2905 + return -EINVAL;
2906 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
2907 - ret = -EINVAL;
2908 + return -EINVAL;
2909 }
2910
2911 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
2912 diff --git a/net/core/dev.c b/net/core/dev.c
2913 index 3d9190c2940d..5407d5f7b2d0 100644
2914 --- a/net/core/dev.c
2915 +++ b/net/core/dev.c
2916 @@ -2871,7 +2871,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
2917 }
2918 EXPORT_SYMBOL(passthru_features_check);
2919
2920 -static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2921 +static netdev_features_t dflt_features_check(struct sk_buff *skb,
2922 struct net_device *dev,
2923 netdev_features_t features)
2924 {
2925 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2926 index a426790b0688..128c811dcb1a 100644
2927 --- a/net/core/neighbour.c
2928 +++ b/net/core/neighbour.c
2929 @@ -54,7 +54,8 @@ do { \
2930 static void neigh_timer_handler(unsigned long arg);
2931 static void __neigh_notify(struct neighbour *n, int type, int flags);
2932 static void neigh_update_notify(struct neighbour *neigh);
2933 -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
2934 +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2935 + struct net_device *dev);
2936
2937 #ifdef CONFIG_PROC_FS
2938 static const struct file_operations neigh_stat_seq_fops;
2939 @@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2940 {
2941 write_lock_bh(&tbl->lock);
2942 neigh_flush_dev(tbl, dev);
2943 - pneigh_ifdown(tbl, dev);
2944 - write_unlock_bh(&tbl->lock);
2945 + pneigh_ifdown_and_unlock(tbl, dev);
2946
2947 del_timer_sync(&tbl->proxy_timer);
2948 pneigh_queue_purge(&tbl->proxy_queue);
2949 @@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
2950 return -ENOENT;
2951 }
2952
2953 -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2954 +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
2955 + struct net_device *dev)
2956 {
2957 - struct pneigh_entry *n, **np;
2958 + struct pneigh_entry *n, **np, *freelist = NULL;
2959 u32 h;
2960
2961 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2962 @@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
2963 while ((n = *np) != NULL) {
2964 if (!dev || n->dev == dev) {
2965 *np = n->next;
2966 - if (tbl->pdestructor)
2967 - tbl->pdestructor(n);
2968 - if (n->dev)
2969 - dev_put(n->dev);
2970 - kfree(n);
2971 + n->next = freelist;
2972 + freelist = n;
2973 continue;
2974 }
2975 np = &n->next;
2976 }
2977 }
2978 + write_unlock_bh(&tbl->lock);
2979 + while ((n = freelist)) {
2980 + freelist = n->next;
2981 + n->next = NULL;
2982 + if (tbl->pdestructor)
2983 + tbl->pdestructor(n);
2984 + if (n->dev)
2985 + dev_put(n->dev);
2986 + kfree(n);
2987 + }
2988 return -ENOENT;
2989 }
2990
2991 @@ -2279,12 +2287,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2992
2993 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2994 if (!err) {
2995 - if (tb[NDA_IFINDEX])
2996 + if (tb[NDA_IFINDEX]) {
2997 + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2998 + return -EINVAL;
2999 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
3000 -
3001 - if (tb[NDA_MASTER])
3002 + }
3003 + if (tb[NDA_MASTER]) {
3004 + if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
3005 + return -EINVAL;
3006 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
3007 -
3008 + }
3009 if (filter_idx || filter_master_idx)
3010 flags |= NLM_F_DUMP_FILTERED;
3011 }
3012 diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
3013 index e1d4d898a007..f0252768ecf4 100644
3014 --- a/net/dns_resolver/dns_key.c
3015 +++ b/net/dns_resolver/dns_key.c
3016 @@ -25,6 +25,7 @@
3017 #include <linux/moduleparam.h>
3018 #include <linux/slab.h>
3019 #include <linux/string.h>
3020 +#include <linux/ratelimit.h>
3021 #include <linux/kernel.h>
3022 #include <linux/keyctl.h>
3023 #include <linux/err.h>
3024 @@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3025
3026 next_opt = memchr(opt, '#', end - opt) ?: end;
3027 opt_len = next_opt - opt;
3028 - if (!opt_len) {
3029 - printk(KERN_WARNING
3030 - "Empty option to dns_resolver key\n");
3031 + if (opt_len <= 0 || opt_len > 128) {
3032 + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
3033 + opt_len);
3034 return -EINVAL;
3035 }
3036
3037 @@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3038 }
3039
3040 bad_option_value:
3041 - printk(KERN_WARNING
3042 - "Option '%*.*s' to dns_resolver key:"
3043 - " bad/missing value\n",
3044 - opt_nlen, opt_nlen, opt);
3045 + pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
3046 + opt_nlen, opt_nlen, opt);
3047 return -EINVAL;
3048 } while (opt = next_opt + 1, opt < end);
3049 }
3050 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3051 index 0d1a767db1bb..0fc5dad02fe8 100644
3052 --- a/net/ipv4/tcp.c
3053 +++ b/net/ipv4/tcp.c
3054 @@ -2662,8 +2662,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
3055
3056 #ifdef CONFIG_TCP_MD5SIG
3057 case TCP_MD5SIG:
3058 - /* Read the IP->Key mappings from userspace */
3059 - err = tp->af_specific->md5_parse(sk, optval, optlen);
3060 + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
3061 + err = tp->af_specific->md5_parse(sk, optval, optlen);
3062 + else
3063 + err = -EINVAL;
3064 break;
3065 #endif
3066 case TCP_USER_TIMEOUT:
3067 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3068 index eb05ad940e37..52b0a84be765 100644
3069 --- a/net/ipv4/tcp_input.c
3070 +++ b/net/ipv4/tcp_input.c
3071 @@ -3943,11 +3943,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3072 int length = (th->doff << 2) - sizeof(*th);
3073 const u8 *ptr = (const u8 *)(th + 1);
3074
3075 - /* If the TCP option is too short, we can short cut */
3076 - if (length < TCPOLEN_MD5SIG)
3077 - return NULL;
3078 -
3079 - while (length > 0) {
3080 + /* If not enough data remaining, we can short cut */
3081 + while (length >= TCPOLEN_MD5SIG) {
3082 int opcode = *ptr++;
3083 int opsize;
3084
3085 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3086 index d6a4b2c73a7c..f6ac472acd0f 100644
3087 --- a/net/ipv6/route.c
3088 +++ b/net/ipv6/route.c
3089 @@ -2811,6 +2811,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3090
3091 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3092 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3093 + [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
3094 [RTA_OIF] = { .type = NLA_U32 },
3095 [RTA_IIF] = { .type = NLA_U32 },
3096 [RTA_PRIORITY] = { .type = NLA_U32 },
3097 @@ -2820,6 +2821,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3098 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3099 [RTA_ENCAP] = { .type = NLA_NESTED },
3100 [RTA_EXPIRES] = { .type = NLA_U32 },
3101 + [RTA_TABLE] = { .type = NLA_U32 },
3102 };
3103
3104 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3105 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
3106 index 163f1fa53917..9b214f313cc0 100644
3107 --- a/net/l2tp/l2tp_ppp.c
3108 +++ b/net/l2tp/l2tp_ppp.c
3109 @@ -590,6 +590,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3110 lock_sock(sk);
3111
3112 error = -EINVAL;
3113 +
3114 + if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
3115 + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
3116 + sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
3117 + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
3118 + goto end;
3119 +
3120 if (sp->sa_protocol != PX_PROTO_OL2TP)
3121 goto end;
3122
3123 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3124 index f7caf0f5d9c8..d6bc5f2a1175 100644
3125 --- a/net/llc/af_llc.c
3126 +++ b/net/llc/af_llc.c
3127 @@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
3128 llc->laddr.lsap, llc->daddr.lsap);
3129 if (!llc_send_disc(sk))
3130 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
3131 - if (!sock_flag(sk, SOCK_ZAPPED))
3132 + if (!sock_flag(sk, SOCK_ZAPPED)) {
3133 + struct llc_sap *sap = llc->sap;
3134 +
3135 + /* Hold this for release_sock(), so that llc_backlog_rcv()
3136 + * could still use it.
3137 + */
3138 + llc_sap_hold(sap);
3139 llc_sap_remove_socket(llc->sap, sk);
3140 - release_sock(sk);
3141 + release_sock(sk);
3142 + llc_sap_put(sap);
3143 + } else {
3144 + release_sock(sk);
3145 + }
3146 if (llc->dev)
3147 dev_put(llc->dev);
3148 sock_put(sk);
3149 diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
3150 index ea225bd2672c..f8d4ab8ca1a5 100644
3151 --- a/net/llc/llc_c_ac.c
3152 +++ b/net/llc/llc_c_ac.c
3153 @@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
3154
3155 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
3156 {
3157 - struct llc_sock *llc = llc_sk(sk);
3158 -
3159 - del_timer(&llc->pf_cycle_timer.timer);
3160 - del_timer(&llc->ack_timer.timer);
3161 - del_timer(&llc->rej_sent_timer.timer);
3162 - del_timer(&llc->busy_state_timer.timer);
3163 - llc->ack_must_be_send = 0;
3164 - llc->ack_pf = 0;
3165 + llc_sk_stop_all_timers(sk, false);
3166 return 0;
3167 }
3168
3169 diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
3170 index 8bc5a1bd2d45..d861b74ad068 100644
3171 --- a/net/llc/llc_conn.c
3172 +++ b/net/llc/llc_conn.c
3173 @@ -951,6 +951,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr
3174 return sk;
3175 }
3176
3177 +void llc_sk_stop_all_timers(struct sock *sk, bool sync)
3178 +{
3179 + struct llc_sock *llc = llc_sk(sk);
3180 +
3181 + if (sync) {
3182 + del_timer_sync(&llc->pf_cycle_timer.timer);
3183 + del_timer_sync(&llc->ack_timer.timer);
3184 + del_timer_sync(&llc->rej_sent_timer.timer);
3185 + del_timer_sync(&llc->busy_state_timer.timer);
3186 + } else {
3187 + del_timer(&llc->pf_cycle_timer.timer);
3188 + del_timer(&llc->ack_timer.timer);
3189 + del_timer(&llc->rej_sent_timer.timer);
3190 + del_timer(&llc->busy_state_timer.timer);
3191 + }
3192 +
3193 + llc->ack_must_be_send = 0;
3194 + llc->ack_pf = 0;
3195 +}
3196 +
3197 /**
3198 * llc_sk_free - Frees a LLC socket
3199 * @sk - socket to free
3200 @@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
3201
3202 llc->state = LLC_CONN_OUT_OF_SVC;
3203 /* Stop all (possibly) running timers */
3204 - llc_conn_ac_stop_all_timers(sk, NULL);
3205 + llc_sk_stop_all_timers(sk, true);
3206 #ifdef DEBUG_LLC_CONN_ALLOC
3207 printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
3208 skb_queue_len(&llc->pdu_unack_q),
3209 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3210 index 267db0d603bc..a027f8c00944 100644
3211 --- a/net/packet/af_packet.c
3212 +++ b/net/packet/af_packet.c
3213 @@ -333,11 +333,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
3214 skb_set_queue_mapping(skb, queue_index);
3215 }
3216
3217 -/* register_prot_hook must be invoked with the po->bind_lock held,
3218 +/* __register_prot_hook must be invoked through register_prot_hook
3219 * or from a context in which asynchronous accesses to the packet
3220 * socket is not possible (packet_create()).
3221 */
3222 -static void register_prot_hook(struct sock *sk)
3223 +static void __register_prot_hook(struct sock *sk)
3224 {
3225 struct packet_sock *po = pkt_sk(sk);
3226
3227 @@ -352,8 +352,13 @@ static void register_prot_hook(struct sock *sk)
3228 }
3229 }
3230
3231 -/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
3232 - * held. If the sync parameter is true, we will temporarily drop
3233 +static void register_prot_hook(struct sock *sk)
3234 +{
3235 + lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
3236 + __register_prot_hook(sk);
3237 +}
3238 +
3239 +/* If the sync parameter is true, we will temporarily drop
3240 * the po->bind_lock and do a synchronize_net to make sure no
3241 * asynchronous packet processing paths still refer to the elements
3242 * of po->prot_hook. If the sync parameter is false, it is the
3243 @@ -363,6 +368,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
3244 {
3245 struct packet_sock *po = pkt_sk(sk);
3246
3247 + lockdep_assert_held_once(&po->bind_lock);
3248 +
3249 po->running = 0;
3250
3251 if (po->fanout)
3252 @@ -3017,6 +3024,7 @@ static int packet_release(struct socket *sock)
3253
3254 packet_flush_mclist(sk);
3255
3256 + lock_sock(sk);
3257 if (po->rx_ring.pg_vec) {
3258 memset(&req_u, 0, sizeof(req_u));
3259 packet_set_ring(sk, &req_u, 1, 0);
3260 @@ -3026,6 +3034,7 @@ static int packet_release(struct socket *sock)
3261 memset(&req_u, 0, sizeof(req_u));
3262 packet_set_ring(sk, &req_u, 1, 1);
3263 }
3264 + release_sock(sk);
3265
3266 f = fanout_release(sk);
3267
3268 @@ -3259,7 +3268,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3269
3270 if (proto) {
3271 po->prot_hook.type = proto;
3272 - register_prot_hook(sk);
3273 + __register_prot_hook(sk);
3274 }
3275
3276 mutex_lock(&net->packet.sklist_lock);
3277 @@ -3654,6 +3663,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3278 union tpacket_req_u req_u;
3279 int len;
3280
3281 + lock_sock(sk);
3282 switch (po->tp_version) {
3283 case TPACKET_V1:
3284 case TPACKET_V2:
3285 @@ -3664,12 +3674,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3286 len = sizeof(req_u.req3);
3287 break;
3288 }
3289 - if (optlen < len)
3290 - return -EINVAL;
3291 - if (copy_from_user(&req_u.req, optval, len))
3292 - return -EFAULT;
3293 - return packet_set_ring(sk, &req_u, 0,
3294 - optname == PACKET_TX_RING);
3295 + if (optlen < len) {
3296 + ret = -EINVAL;
3297 + } else {
3298 + if (copy_from_user(&req_u.req, optval, len))
3299 + ret = -EFAULT;
3300 + else
3301 + ret = packet_set_ring(sk, &req_u, 0,
3302 + optname == PACKET_TX_RING);
3303 + }
3304 + release_sock(sk);
3305 + return ret;
3306 }
3307 case PACKET_COPY_THRESH:
3308 {
3309 @@ -3735,12 +3750,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3310
3311 if (optlen != sizeof(val))
3312 return -EINVAL;
3313 - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3314 - return -EBUSY;
3315 if (copy_from_user(&val, optval, sizeof(val)))
3316 return -EFAULT;
3317 - po->tp_loss = !!val;
3318 - return 0;
3319 +
3320 + lock_sock(sk);
3321 + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3322 + ret = -EBUSY;
3323 + } else {
3324 + po->tp_loss = !!val;
3325 + ret = 0;
3326 + }
3327 + release_sock(sk);
3328 + return ret;
3329 }
3330 case PACKET_AUXDATA:
3331 {
3332 @@ -3751,7 +3772,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3333 if (copy_from_user(&val, optval, sizeof(val)))
3334 return -EFAULT;
3335
3336 + lock_sock(sk);
3337 po->auxdata = !!val;
3338 + release_sock(sk);
3339 return 0;
3340 }
3341 case PACKET_ORIGDEV:
3342 @@ -3763,7 +3786,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3343 if (copy_from_user(&val, optval, sizeof(val)))
3344 return -EFAULT;
3345
3346 + lock_sock(sk);
3347 po->origdev = !!val;
3348 + release_sock(sk);
3349 return 0;
3350 }
3351 case PACKET_VNET_HDR:
3352 @@ -3772,15 +3797,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3353
3354 if (sock->type != SOCK_RAW)
3355 return -EINVAL;
3356 - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3357 - return -EBUSY;
3358 if (optlen < sizeof(val))
3359 return -EINVAL;
3360 if (copy_from_user(&val, optval, sizeof(val)))
3361 return -EFAULT;
3362
3363 - po->has_vnet_hdr = !!val;
3364 - return 0;
3365 + lock_sock(sk);
3366 + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3367 + ret = -EBUSY;
3368 + } else {
3369 + po->has_vnet_hdr = !!val;
3370 + ret = 0;
3371 + }
3372 + release_sock(sk);
3373 + return ret;
3374 }
3375 case PACKET_TIMESTAMP:
3376 {
3377 @@ -3818,11 +3848,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3378
3379 if (optlen != sizeof(val))
3380 return -EINVAL;
3381 - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3382 - return -EBUSY;
3383 if (copy_from_user(&val, optval, sizeof(val)))
3384 return -EFAULT;
3385 - po->tp_tx_has_off = !!val;
3386 +
3387 + lock_sock(sk);
3388 + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3389 + ret = -EBUSY;
3390 + } else {
3391 + po->tp_tx_has_off = !!val;
3392 + ret = 0;
3393 + }
3394 + release_sock(sk);
3395 return 0;
3396 }
3397 case PACKET_QDISC_BYPASS:
3398 @@ -4219,7 +4255,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3399 /* Added to avoid minimal code churn */
3400 struct tpacket_req *req = &req_u->req;
3401
3402 - lock_sock(sk);
3403 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3404 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3405 net_warn_ratelimited("Tx-ring is not supported.\n");
3406 @@ -4355,7 +4390,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3407 if (pg_vec)
3408 free_pg_vec(pg_vec, order, req->tp_block_nr);
3409 out:
3410 - release_sock(sk);
3411 return err;
3412 }
3413
3414 diff --git a/net/packet/internal.h b/net/packet/internal.h
3415 index d55bfc34d6b3..1309e2a7baad 100644
3416 --- a/net/packet/internal.h
3417 +++ b/net/packet/internal.h
3418 @@ -109,10 +109,12 @@ struct packet_sock {
3419 int copy_thresh;
3420 spinlock_t bind_lock;
3421 struct mutex pg_vec_lock;
3422 - unsigned int running:1, /* prot_hook is attached*/
3423 - auxdata:1,
3424 + unsigned int running; /* bind_lock must be held */
3425 + unsigned int auxdata:1, /* writer must hold sock lock */
3426 origdev:1,
3427 - has_vnet_hdr:1;
3428 + has_vnet_hdr:1,
3429 + tp_loss:1,
3430 + tp_tx_has_off:1;
3431 int pressure;
3432 int ifindex; /* bound device */
3433 __be16 num;
3434 @@ -122,8 +124,6 @@ struct packet_sock {
3435 enum tpacket_versions tp_version;
3436 unsigned int tp_hdrlen;
3437 unsigned int tp_reserve;
3438 - unsigned int tp_loss:1;
3439 - unsigned int tp_tx_has_off:1;
3440 unsigned int tp_tstamp;
3441 struct net_device __rcu *cached_dev;
3442 int (*xmit)(struct sk_buff *skb);
3443 diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
3444 index 95c463cbb9a6..235db2c9bbbb 100644
3445 --- a/net/sched/act_ife.c
3446 +++ b/net/sched/act_ife.c
3447 @@ -634,7 +634,7 @@ int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
3448 }
3449 }
3450
3451 - return 0;
3452 + return -ENOENT;
3453 }
3454
3455 struct ifeheadr {
3456 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3457 index 355d95a7cd81..e031797ad311 100644
3458 --- a/net/sctp/ipv6.c
3459 +++ b/net/sctp/ipv6.c
3460 @@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
3461 addr->v6.sin6_scope_id = 0;
3462 }
3463
3464 -/* Compare addresses exactly.
3465 - * v4-mapped-v6 is also in consideration.
3466 - */
3467 -static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
3468 - const union sctp_addr *addr2)
3469 +static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
3470 + const union sctp_addr *addr2)
3471 {
3472 if (addr1->sa.sa_family != addr2->sa.sa_family) {
3473 if (addr1->sa.sa_family == AF_INET &&
3474 addr2->sa.sa_family == AF_INET6 &&
3475 - ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
3476 - if (addr2->v6.sin6_port == addr1->v4.sin_port &&
3477 - addr2->v6.sin6_addr.s6_addr32[3] ==
3478 - addr1->v4.sin_addr.s_addr)
3479 - return 1;
3480 - }
3481 + ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
3482 + addr2->v6.sin6_addr.s6_addr32[3] ==
3483 + addr1->v4.sin_addr.s_addr)
3484 + return 1;
3485 +
3486 if (addr2->sa.sa_family == AF_INET &&
3487 addr1->sa.sa_family == AF_INET6 &&
3488 - ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
3489 - if (addr1->v6.sin6_port == addr2->v4.sin_port &&
3490 - addr1->v6.sin6_addr.s6_addr32[3] ==
3491 - addr2->v4.sin_addr.s_addr)
3492 - return 1;
3493 - }
3494 + ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
3495 + addr1->v6.sin6_addr.s6_addr32[3] ==
3496 + addr2->v4.sin_addr.s_addr)
3497 + return 1;
3498 +
3499 return 0;
3500 }
3501 - if (addr1->v6.sin6_port != addr2->v6.sin6_port)
3502 - return 0;
3503 +
3504 if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
3505 return 0;
3506 +
3507 /* If this is a linklocal address, compare the scope_id. */
3508 - if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
3509 - if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
3510 - (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
3511 - return 0;
3512 - }
3513 - }
3514 + if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
3515 + addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
3516 + addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
3517 + return 0;
3518
3519 return 1;
3520 }
3521
3522 +/* Compare addresses exactly.
3523 + * v4-mapped-v6 is also in consideration.
3524 + */
3525 +static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
3526 + const union sctp_addr *addr2)
3527 +{
3528 + return __sctp_v6_cmp_addr(addr1, addr2) &&
3529 + addr1->v6.sin6_port == addr2->v6.sin6_port;
3530 +}
3531 +
3532 /* Initialize addr struct to INADDR_ANY. */
3533 static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
3534 {
3535 @@ -844,8 +847,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
3536 const union sctp_addr *addr2,
3537 struct sctp_sock *opt)
3538 {
3539 - struct sctp_af *af1, *af2;
3540 struct sock *sk = sctp_opt2sk(opt);
3541 + struct sctp_af *af1, *af2;
3542
3543 af1 = sctp_get_af_specific(addr1->sa.sa_family);
3544 af2 = sctp_get_af_specific(addr2->sa.sa_family);
3545 @@ -861,10 +864,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
3546 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
3547 return 1;
3548
3549 - if (addr1->sa.sa_family != addr2->sa.sa_family)
3550 - return 0;
3551 -
3552 - return af1->cmp_addr(addr1, addr2);
3553 + return __sctp_v6_cmp_addr(addr1, addr2);
3554 }
3555
3556 /* Verify that the provided sockaddr looks bindable. Common verification,
3557 diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
3558 index 6cbc935ddd96..bbee334ab1b0 100644
3559 --- a/net/strparser/strparser.c
3560 +++ b/net/strparser/strparser.c
3561 @@ -285,9 +285,9 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
3562 strp_start_rx_timer(strp);
3563 }
3564
3565 + rxm->accum_len += cand_len;
3566 strp->rx_need_bytes = rxm->strp.full_len -
3567 rxm->accum_len;
3568 - rxm->accum_len += cand_len;
3569 rxm->early_eaten = cand_len;
3570 STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
3571 desc->count = 0; /* Stop reading socket */
3572 @@ -310,6 +310,7 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
3573 /* Hurray, we have a new message! */
3574 del_timer(&strp->rx_msg_timer);
3575 strp->rx_skb_head = NULL;
3576 + strp->rx_need_bytes = 0;
3577 STRP_STATS_INCR(strp->stats.rx_msgs);
3578
3579 /* Give skb to upper layer */
3580 @@ -374,9 +375,7 @@ void strp_data_ready(struct strparser *strp)
3581 return;
3582
3583 if (strp->rx_need_bytes) {
3584 - if (strp_peek_len(strp) >= strp->rx_need_bytes)
3585 - strp->rx_need_bytes = 0;
3586 - else
3587 + if (strp_peek_len(strp) < strp->rx_need_bytes)
3588 return;
3589 }
3590
3591 diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
3592 index 3200059d14b2..9ba3c462f86e 100644
3593 --- a/net/tipc/netlink.c
3594 +++ b/net/tipc/netlink.c
3595 @@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
3596
3597 const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
3598 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
3599 - [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
3600 + [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
3601 + [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
3602 };
3603
3604 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
3605 diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
3606 index 4bc58822416c..d2c6cdd9d42b 100644
3607 --- a/tools/perf/util/dso.c
3608 +++ b/tools/perf/util/dso.c
3609 @@ -366,23 +366,7 @@ static int __open_dso(struct dso *dso, struct machine *machine)
3610 if (!is_regular_file(name))
3611 return -EINVAL;
3612
3613 - if (dso__needs_decompress(dso)) {
3614 - char newpath[KMOD_DECOMP_LEN];
3615 - size_t len = sizeof(newpath);
3616 -
3617 - if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
3618 - free(name);
3619 - return -dso->load_errno;
3620 - }
3621 -
3622 - strcpy(name, newpath);
3623 - }
3624 -
3625 fd = do_open(name);
3626 -
3627 - if (dso__needs_decompress(dso))
3628 - unlink(name);
3629 -
3630 free(name);
3631 return fd;
3632 }