Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0137-4.14.38-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 139458 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index fb385af482ff..8cfb44ffe853 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -2541,6 +2541,9 @@
6    
7     noalign [KNL,ARM]
8    
9     + noaltinstr [S390] Disables alternative instructions patching
10     + (CPU alternatives feature).
11     +
12     noapic [SMP,APIC] Tells the kernel to not make use of any
13     IOAPICs that may be present in the system.
14    
15     diff --git a/Makefile b/Makefile
16     index ee330f5449e6..27a8d5c37180 100644
17     --- a/Makefile
18     +++ b/Makefile
19     @@ -1,7 +1,7 @@
20     # SPDX-License-Identifier: GPL-2.0
21     VERSION = 4
22     PATCHLEVEL = 14
23     -SUBLEVEL = 37
24     +SUBLEVEL = 38
25     EXTRAVERSION =
26     NAME = Petit Gorille
27    
28     diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
29     index 910628d18add..1fc5060d7027 100644
30     --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
31     +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
32     @@ -155,17 +155,6 @@
33     regulator-min-microvolt = <5000000>;
34     regulator-max-microvolt = <5000000>;
35     };
36     -
37     - vdd_log: vdd-log {
38     - compatible = "pwm-regulator";
39     - pwms = <&pwm2 0 25000 0>;
40     - regulator-name = "vdd_log";
41     - regulator-min-microvolt = <800000>;
42     - regulator-max-microvolt = <1400000>;
43     - regulator-always-on;
44     - regulator-boot-on;
45     - status = "okay";
46     - };
47     };
48    
49     &cpu_b0 {
50     diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform
51     index 1b3d8c849101..f7f1739c11b9 100644
52     --- a/arch/microblaze/Kconfig.platform
53     +++ b/arch/microblaze/Kconfig.platform
54     @@ -20,6 +20,7 @@ config OPT_LIB_FUNCTION
55     config OPT_LIB_ASM
56     bool "Optimalized lib function ASM"
57     depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
58     + depends on CPU_BIG_ENDIAN
59     default n
60     help
61     Allows turn on optimalized library function (memcpy and memmove).
62     diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
63     index 62021d7e249e..fdc48bb065d8 100644
64     --- a/arch/microblaze/lib/fastcopy.S
65     +++ b/arch/microblaze/lib/fastcopy.S
66     @@ -29,10 +29,6 @@
67     * between mem locations with size of xfer spec'd in bytes
68     */
69    
70     -#ifdef __MICROBLAZEEL__
71     -#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
72     -#endif
73     -
74     #include <linux/linkage.h>
75     .text
76     .globl memcpy
77     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
78     index ae55e715cc74..49fb6614ea8c 100644
79     --- a/arch/s390/Kconfig
80     +++ b/arch/s390/Kconfig
81     @@ -121,6 +121,7 @@ config S390
82     select GENERIC_CLOCKEVENTS
83     select GENERIC_CPU_AUTOPROBE
84     select GENERIC_CPU_DEVICES if !SMP
85     + select GENERIC_CPU_VULNERABILITIES
86     select GENERIC_FIND_FIRST_BIT
87     select GENERIC_SMP_IDLE_THREAD
88     select GENERIC_TIME_VSYSCALL
89     @@ -538,6 +539,51 @@ config ARCH_RANDOM
90    
91     If unsure, say Y.
92    
93     +config KERNEL_NOBP
94     + def_bool n
95     + prompt "Enable modified branch prediction for the kernel by default"
96     + help
97     + If this option is selected the kernel will switch to a modified
98     + branch prediction mode if the firmware interface is available.
99     + The modified branch prediction mode improves the behaviour in
100     + regard to speculative execution.
101     +
102     + With the option enabled the kernel parameter "nobp=0" or "nospec"
103     + can be used to run the kernel in the normal branch prediction mode.
104     +
105     + With the option disabled the modified branch prediction mode is
106     + enabled with the "nobp=1" kernel parameter.
107     +
108     + If unsure, say N.
109     +
110     +config EXPOLINE
111     + def_bool n
112     + prompt "Avoid speculative indirect branches in the kernel"
113     + help
114     + Compile the kernel with the expoline compiler options to guard
115     + against kernel-to-user data leaks by avoiding speculative indirect
116     + branches.
117     + Requires a compiler with -mindirect-branch=thunk support for full
118     + protection. The kernel may run slower.
119     +
120     + If unsure, say N.
121     +
122     +choice
123     + prompt "Expoline default"
124     + depends on EXPOLINE
125     + default EXPOLINE_FULL
126     +
127     +config EXPOLINE_OFF
128     + bool "spectre_v2=off"
129     +
130     +config EXPOLINE_AUTO
131     + bool "spectre_v2=auto"
132     +
133     +config EXPOLINE_FULL
134     + bool "spectre_v2=on"
135     +
136     +endchoice
137     +
138     endmenu
139    
140     menu "Memory setup"
141     @@ -812,6 +858,7 @@ config PFAULT
142     config SHARED_KERNEL
143     bool "VM shared kernel support"
144     depends on !JUMP_LABEL
145     + depends on !ALTERNATIVES
146     help
147     Select this option, if you want to share the text segment of the
148     Linux kernel between different VM guests. This reduces memory
149     diff --git a/arch/s390/Makefile b/arch/s390/Makefile
150     index dac821cfcd43..ec3fa105f448 100644
151     --- a/arch/s390/Makefile
152     +++ b/arch/s390/Makefile
153     @@ -81,6 +81,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
154     cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
155     endif
156    
157     +ifdef CONFIG_EXPOLINE
158     + ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
159     + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
160     + CC_FLAGS_EXPOLINE += -mfunction-return=thunk
161     + CC_FLAGS_EXPOLINE += -mindirect-branch-table
162     + export CC_FLAGS_EXPOLINE
163     + cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
164     + endif
165     +endif
166     +
167     ifdef CONFIG_FUNCTION_TRACER
168     # make use of hotpatch feature if the compiler supports it
169     cc_hotpatch := -mhotpatch=0,3
170     diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
171     new file mode 100644
172     index 000000000000..a72002056b54
173     --- /dev/null
174     +++ b/arch/s390/include/asm/alternative.h
175     @@ -0,0 +1,149 @@
176     +#ifndef _ASM_S390_ALTERNATIVE_H
177     +#define _ASM_S390_ALTERNATIVE_H
178     +
179     +#ifndef __ASSEMBLY__
180     +
181     +#include <linux/types.h>
182     +#include <linux/stddef.h>
183     +#include <linux/stringify.h>
184     +
185     +struct alt_instr {
186     + s32 instr_offset; /* original instruction */
187     + s32 repl_offset; /* offset to replacement instruction */
188     + u16 facility; /* facility bit set for replacement */
189     + u8 instrlen; /* length of original instruction */
190     + u8 replacementlen; /* length of new instruction */
191     +} __packed;
192     +
193     +void apply_alternative_instructions(void);
194     +void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
195     +
196     +/*
197     + * |661: |662: |6620 |663:
198     + * +-----------+---------------------+
199     + * | oldinstr | oldinstr_padding |
200     + * | +----------+----------+
201     + * | | | |
202     + * | | >6 bytes |6/4/2 nops|
203     + * | |6 bytes jg----------->
204     + * +-----------+---------------------+
205     + * ^^ static padding ^^
206     + *
207     + * .altinstr_replacement section
208     + * +---------------------+-----------+
209     + * |6641: |6651:
210     + * | alternative instr 1 |
211     + * +-----------+---------+- - - - - -+
212     + * |6642: |6652: |
213     + * | alternative instr 2 | padding
214     + * +---------------------+- - - - - -+
215     + * ^ runtime ^
216     + *
217     + * .altinstructions section
218     + * +---------------------------------+
219     + * | alt_instr entries for each |
220     + * | alternative instr |
221     + * +---------------------------------+
222     + */
223     +
224     +#define b_altinstr(num) "664"#num
225     +#define e_altinstr(num) "665"#num
226     +
227     +#define e_oldinstr_pad_end "663"
228     +#define oldinstr_len "662b-661b"
229     +#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
230     +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
231     +#define oldinstr_pad_len(num) \
232     + "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
233     + "((" altinstr_len(num) ")-(" oldinstr_len "))"
234     +
235     +#define INSTR_LEN_SANITY_CHECK(len) \
236     + ".if " len " > 254\n" \
237     + "\t.error \"cpu alternatives does not support instructions " \
238     + "blocks > 254 bytes\"\n" \
239     + ".endif\n" \
240     + ".if (" len ") %% 2\n" \
241     + "\t.error \"cpu alternatives instructions length is odd\"\n" \
242     + ".endif\n"
243     +
244     +#define OLDINSTR_PADDING(oldinstr, num) \
245     + ".if " oldinstr_pad_len(num) " > 6\n" \
246     + "\tjg " e_oldinstr_pad_end "f\n" \
247     + "6620:\n" \
248     + "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
249     + ".else\n" \
250     + "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
251     + "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
252     + "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
253     + ".endif\n"
254     +
255     +#define OLDINSTR(oldinstr, num) \
256     + "661:\n\t" oldinstr "\n662:\n" \
257     + OLDINSTR_PADDING(oldinstr, num) \
258     + e_oldinstr_pad_end ":\n" \
259     + INSTR_LEN_SANITY_CHECK(oldinstr_len)
260     +
261     +#define OLDINSTR_2(oldinstr, num1, num2) \
262     + "661:\n\t" oldinstr "\n662:\n" \
263     + ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
264     + OLDINSTR_PADDING(oldinstr, num2) \
265     + ".else\n" \
266     + OLDINSTR_PADDING(oldinstr, num1) \
267     + ".endif\n" \
268     + e_oldinstr_pad_end ":\n" \
269     + INSTR_LEN_SANITY_CHECK(oldinstr_len)
270     +
271     +#define ALTINSTR_ENTRY(facility, num) \
272     + "\t.long 661b - .\n" /* old instruction */ \
273     + "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
274     + "\t.word " __stringify(facility) "\n" /* facility bit */ \
275     + "\t.byte " oldinstr_total_len "\n" /* source len */ \
276     + "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
277     +
278     +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
279     + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
280     + INSTR_LEN_SANITY_CHECK(altinstr_len(num))
281     +
282     +/* alternative assembly primitive: */
283     +#define ALTERNATIVE(oldinstr, altinstr, facility) \
284     + ".pushsection .altinstr_replacement, \"ax\"\n" \
285     + ALTINSTR_REPLACEMENT(altinstr, 1) \
286     + ".popsection\n" \
287     + OLDINSTR(oldinstr, 1) \
288     + ".pushsection .altinstructions,\"a\"\n" \
289     + ALTINSTR_ENTRY(facility, 1) \
290     + ".popsection\n"
291     +
292     +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
293     + ".pushsection .altinstr_replacement, \"ax\"\n" \
294     + ALTINSTR_REPLACEMENT(altinstr1, 1) \
295     + ALTINSTR_REPLACEMENT(altinstr2, 2) \
296     + ".popsection\n" \
297     + OLDINSTR_2(oldinstr, 1, 2) \
298     + ".pushsection .altinstructions,\"a\"\n" \
299     + ALTINSTR_ENTRY(facility1, 1) \
300     + ALTINSTR_ENTRY(facility2, 2) \
301     + ".popsection\n"
302     +
303     +/*
304     + * Alternative instructions for different CPU types or capabilities.
305     + *
306     + * This allows to use optimized instructions even on generic binary
307     + * kernels.
308     + *
309     + * oldinstr is padded with jump and nops at compile time if altinstr is
310     + * longer. altinstr is padded with jump and nops at run-time during patching.
311     + *
312     + * For non barrier like inlines please define new variants
313     + * without volatile and memory clobber.
314     + */
315     +#define alternative(oldinstr, altinstr, facility) \
316     + asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
317     +
318     +#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
319     + asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
320     + altinstr2, facility2) ::: "memory")
321     +
322     +#endif /* __ASSEMBLY__ */
323     +
324     +#endif /* _ASM_S390_ALTERNATIVE_H */
325     diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
326     index 10432607a573..f9eddbca79d2 100644
327     --- a/arch/s390/include/asm/barrier.h
328     +++ b/arch/s390/include/asm/barrier.h
329     @@ -49,6 +49,30 @@ do { \
330     #define __smp_mb__before_atomic() barrier()
331     #define __smp_mb__after_atomic() barrier()
332    
333     +/**
334     + * array_index_mask_nospec - generate a mask for array_idx() that is
335     + * ~0UL when the bounds check succeeds and 0 otherwise
336     + * @index: array element index
337     + * @size: number of elements in array
338     + */
339     +#define array_index_mask_nospec array_index_mask_nospec
340     +static inline unsigned long array_index_mask_nospec(unsigned long index,
341     + unsigned long size)
342     +{
343     + unsigned long mask;
344     +
345     + if (__builtin_constant_p(size) && size > 0) {
346     + asm(" clgr %2,%1\n"
347     + " slbgr %0,%0\n"
348     + :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
349     + return mask;
350     + }
351     + asm(" clgr %1,%2\n"
352     + " slbgr %0,%0\n"
353     + :"=d" (mask) : "d" (size), "d" (index) :"cc");
354     + return ~mask;
355     +}
356     +
357     #include <asm-generic/barrier.h>
358    
359     #endif /* __ASM_BARRIER_H */
360     diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
361     index f040644575b7..2d58478c2745 100644
362     --- a/arch/s390/include/asm/facility.h
363     +++ b/arch/s390/include/asm/facility.h
364     @@ -15,6 +15,24 @@
365    
366     #define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
367    
368     +static inline void __set_facility(unsigned long nr, void *facilities)
369     +{
370     + unsigned char *ptr = (unsigned char *) facilities;
371     +
372     + if (nr >= MAX_FACILITY_BIT)
373     + return;
374     + ptr[nr >> 3] |= 0x80 >> (nr & 7);
375     +}
376     +
377     +static inline void __clear_facility(unsigned long nr, void *facilities)
378     +{
379     + unsigned char *ptr = (unsigned char *) facilities;
380     +
381     + if (nr >= MAX_FACILITY_BIT)
382     + return;
383     + ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
384     +}
385     +
386     static inline int __test_facility(unsigned long nr, void *facilities)
387     {
388     unsigned char *ptr;
389     diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
390     index 51375e766e90..d660e784e445 100644
391     --- a/arch/s390/include/asm/kvm_host.h
392     +++ b/arch/s390/include/asm/kvm_host.h
393     @@ -210,7 +210,8 @@ struct kvm_s390_sie_block {
394     __u16 ipa; /* 0x0056 */
395     __u32 ipb; /* 0x0058 */
396     __u32 scaoh; /* 0x005c */
397     - __u8 reserved60; /* 0x0060 */
398     +#define FPF_BPBC 0x20
399     + __u8 fpf; /* 0x0060 */
400     #define ECB_GS 0x40
401     #define ECB_TE 0x10
402     #define ECB_SRSI 0x04
403     diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
404     index 917f7344cab6..88a212df0dbc 100644
405     --- a/arch/s390/include/asm/lowcore.h
406     +++ b/arch/s390/include/asm/lowcore.h
407     @@ -140,7 +140,9 @@ struct lowcore {
408     /* Per cpu primary space access list */
409     __u32 paste[16]; /* 0x0400 */
410    
411     - __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
412     + /* br %r1 trampoline */
413     + __u16 br_r1_trampoline; /* 0x0440 */
414     + __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
415    
416     /*
417     * 0xe00 contains the address of the IPL Parameter Information
418     @@ -155,7 +157,8 @@ struct lowcore {
419     __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
420    
421     /* Extended facility list */
422     - __u64 stfle_fac_list[32]; /* 0x0f00 */
423     + __u64 stfle_fac_list[16]; /* 0x0f00 */
424     + __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
425     __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
426    
427     /* Pointer to the machine check extended save area */
428     diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
429     new file mode 100644
430     index 000000000000..b4bd8c41e9d3
431     --- /dev/null
432     +++ b/arch/s390/include/asm/nospec-branch.h
433     @@ -0,0 +1,17 @@
434     +/* SPDX-License-Identifier: GPL-2.0 */
435     +#ifndef _ASM_S390_EXPOLINE_H
436     +#define _ASM_S390_EXPOLINE_H
437     +
438     +#ifndef __ASSEMBLY__
439     +
440     +#include <linux/types.h>
441     +
442     +extern int nospec_disable;
443     +
444     +void nospec_init_branches(void);
445     +void nospec_auto_detect(void);
446     +void nospec_revert(s32 *start, s32 *end);
447     +
448     +#endif /* __ASSEMBLY__ */
449     +
450     +#endif /* _ASM_S390_EXPOLINE_H */
451     diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
452     index 9cf92abe23c3..0a39cd102c49 100644
453     --- a/arch/s390/include/asm/processor.h
454     +++ b/arch/s390/include/asm/processor.h
455     @@ -89,6 +89,7 @@ void cpu_detect_mhz_feature(void);
456     extern const struct seq_operations cpuinfo_op;
457     extern int sysctl_ieee_emulation_warnings;
458     extern void execve_tail(void);
459     +extern void __bpon(void);
460    
461     /*
462     * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
463     @@ -377,6 +378,9 @@ extern void memcpy_absolute(void *, void *, size_t);
464     memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
465     } while (0)
466    
467     +extern int s390_isolate_bp(void);
468     +extern int s390_isolate_bp_guest(void);
469     +
470     #endif /* __ASSEMBLY__ */
471    
472     #endif /* __ASM_S390_PROCESSOR_H */
473     diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
474     index 0880a37b6d3b..301b4f70bf31 100644
475     --- a/arch/s390/include/asm/thread_info.h
476     +++ b/arch/s390/include/asm/thread_info.h
477     @@ -60,6 +60,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
478     #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
479     #define TIF_PATCH_PENDING 5 /* pending live patching update */
480     #define TIF_PGSTE 6 /* New mm's will use 4K page tables */
481     +#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
482     +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
483    
484     #define TIF_31BIT 16 /* 32bit process */
485     #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
486     @@ -80,6 +82,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
487     #define _TIF_UPROBE _BITUL(TIF_UPROBE)
488     #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
489     #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING)
490     +#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
491     +#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
492    
493     #define _TIF_31BIT _BITUL(TIF_31BIT)
494     #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
495     diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
496     index 9ad172dcd912..a3938db010f7 100644
497     --- a/arch/s390/include/uapi/asm/kvm.h
498     +++ b/arch/s390/include/uapi/asm/kvm.h
499     @@ -228,6 +228,7 @@ struct kvm_guest_debug_arch {
500     #define KVM_SYNC_RICCB (1UL << 7)
501     #define KVM_SYNC_FPRS (1UL << 8)
502     #define KVM_SYNC_GSCB (1UL << 9)
503     +#define KVM_SYNC_BPBC (1UL << 10)
504     /* length and alignment of the sdnx as a power of two */
505     #define SDNXC 8
506     #define SDNXL (1UL << SDNXC)
507     @@ -251,7 +252,9 @@ struct kvm_sync_regs {
508     };
509     __u8 reserved[512]; /* for future vector expansion */
510     __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
511     - __u8 padding1[52]; /* riccb needs to be 64byte aligned */
512     + __u8 bpbc : 1; /* bp mode */
513     + __u8 reserved2 : 7;
514     + __u8 padding1[51]; /* riccb needs to be 64byte aligned */
515     __u8 riccb[64]; /* runtime instrumentation controls block */
516     __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
517     union {
518     diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
519     index 4ce2d05929a7..a3a4cafb6080 100644
520     --- a/arch/s390/kernel/Makefile
521     +++ b/arch/s390/kernel/Makefile
522     @@ -29,6 +29,7 @@ UBSAN_SANITIZE_early.o := n
523     #
524     ifneq ($(CC_FLAGS_MARCH),-march=z900)
525     CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
526     +CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
527     CFLAGS_als.o += -march=z900
528     AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
529     AFLAGS_head.o += -march=z900
530     @@ -57,10 +58,13 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
531     obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
532     obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
533     obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
534     -obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
535     +obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
536     +obj-y += nospec-branch.o
537    
538     extra-y += head.o head64.o vmlinux.lds
539    
540     +CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
541     +
542     obj-$(CONFIG_MODULES) += module.o
543     obj-$(CONFIG_SMP) += smp.o
544     obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
545     diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
546     new file mode 100644
547     index 000000000000..b57b293998dc
548     --- /dev/null
549     +++ b/arch/s390/kernel/alternative.c
550     @@ -0,0 +1,112 @@
551     +#include <linux/module.h>
552     +#include <asm/alternative.h>
553     +#include <asm/facility.h>
554     +#include <asm/nospec-branch.h>
555     +
556     +#define MAX_PATCH_LEN (255 - 1)
557     +
558     +static int __initdata_or_module alt_instr_disabled;
559     +
560     +static int __init disable_alternative_instructions(char *str)
561     +{
562     + alt_instr_disabled = 1;
563     + return 0;
564     +}
565     +
566     +early_param("noaltinstr", disable_alternative_instructions);
567     +
568     +struct brcl_insn {
569     + u16 opc;
570     + s32 disp;
571     +} __packed;
572     +
573     +static u16 __initdata_or_module nop16 = 0x0700;
574     +static u32 __initdata_or_module nop32 = 0x47000000;
575     +static struct brcl_insn __initdata_or_module nop48 = {
576     + 0xc004, 0
577     +};
578     +
579     +static const void *nops[] __initdata_or_module = {
580     + &nop16,
581     + &nop32,
582     + &nop48
583     +};
584     +
585     +static void __init_or_module add_jump_padding(void *insns, unsigned int len)
586     +{
587     + struct brcl_insn brcl = {
588     + 0xc0f4,
589     + len / 2
590     + };
591     +
592     + memcpy(insns, &brcl, sizeof(brcl));
593     + insns += sizeof(brcl);
594     + len -= sizeof(brcl);
595     +
596     + while (len > 0) {
597     + memcpy(insns, &nop16, 2);
598     + insns += 2;
599     + len -= 2;
600     + }
601     +}
602     +
603     +static void __init_or_module add_padding(void *insns, unsigned int len)
604     +{
605     + if (len > 6)
606     + add_jump_padding(insns, len);
607     + else if (len >= 2)
608     + memcpy(insns, nops[len / 2 - 1], len);
609     +}
610     +
611     +static void __init_or_module __apply_alternatives(struct alt_instr *start,
612     + struct alt_instr *end)
613     +{
614     + struct alt_instr *a;
615     + u8 *instr, *replacement;
616     + u8 insnbuf[MAX_PATCH_LEN];
617     +
618     + /*
619     + * The scan order should be from start to end. A later scanned
620     + * alternative code can overwrite previously scanned alternative code.
621     + */
622     + for (a = start; a < end; a++) {
623     + int insnbuf_sz = 0;
624     +
625     + instr = (u8 *)&a->instr_offset + a->instr_offset;
626     + replacement = (u8 *)&a->repl_offset + a->repl_offset;
627     +
628     + if (!__test_facility(a->facility,
629     + S390_lowcore.alt_stfle_fac_list))
630     + continue;
631     +
632     + if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
633     + WARN_ONCE(1, "cpu alternatives instructions length is "
634     + "odd, skipping patching\n");
635     + continue;
636     + }
637     +
638     + memcpy(insnbuf, replacement, a->replacementlen);
639     + insnbuf_sz = a->replacementlen;
640     +
641     + if (a->instrlen > a->replacementlen) {
642     + add_padding(insnbuf + a->replacementlen,
643     + a->instrlen - a->replacementlen);
644     + insnbuf_sz += a->instrlen - a->replacementlen;
645     + }
646     +
647     + s390_kernel_write(instr, insnbuf, insnbuf_sz);
648     + }
649     +}
650     +
651     +void __init_or_module apply_alternatives(struct alt_instr *start,
652     + struct alt_instr *end)
653     +{
654     + if (!alt_instr_disabled)
655     + __apply_alternatives(start, end);
656     +}
657     +
658     +extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
659     +void __init apply_alternative_instructions(void)
660     +{
661     + apply_alternatives(__alt_instructions, __alt_instructions_end);
662     +}
663     diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
664     index f7b280f0ab16..a3219837fa70 100644
665     --- a/arch/s390/kernel/early.c
666     +++ b/arch/s390/kernel/early.c
667     @@ -329,6 +329,11 @@ static noinline __init void setup_facility_list(void)
668     {
669     stfle(S390_lowcore.stfle_fac_list,
670     ARRAY_SIZE(S390_lowcore.stfle_fac_list));
671     + memcpy(S390_lowcore.alt_stfle_fac_list,
672     + S390_lowcore.stfle_fac_list,
673     + sizeof(S390_lowcore.alt_stfle_fac_list));
674     + if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
675     + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
676     }
677    
678     static __init void detect_diag9c(void)
679     diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
680     index 7c6904d616d8..ed9aaa212d4a 100644
681     --- a/arch/s390/kernel/entry.S
682     +++ b/arch/s390/kernel/entry.S
683     @@ -106,6 +106,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
684     aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
685     j 3f
686     1: UPDATE_VTIME %r14,%r15,\timer
687     + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
688     2: lg %r15,__LC_ASYNC_STACK # load async stack
689     3: la %r11,STACK_FRAME_OVERHEAD(%r15)
690     .endm
691     @@ -158,6 +159,130 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
692     tm off+\addr, \mask
693     .endm
694    
695     + .macro BPOFF
696     + .pushsection .altinstr_replacement, "ax"
697     +660: .long 0xb2e8c000
698     + .popsection
699     +661: .long 0x47000000
700     + .pushsection .altinstructions, "a"
701     + .long 661b - .
702     + .long 660b - .
703     + .word 82
704     + .byte 4
705     + .byte 4
706     + .popsection
707     + .endm
708     +
709     + .macro BPON
710     + .pushsection .altinstr_replacement, "ax"
711     +662: .long 0xb2e8d000
712     + .popsection
713     +663: .long 0x47000000
714     + .pushsection .altinstructions, "a"
715     + .long 663b - .
716     + .long 662b - .
717     + .word 82
718     + .byte 4
719     + .byte 4
720     + .popsection
721     + .endm
722     +
723     + .macro BPENTER tif_ptr,tif_mask
724     + .pushsection .altinstr_replacement, "ax"
725     +662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
726     + .word 0xc004, 0x0000, 0x0000 # 6 byte nop
727     + .popsection
728     +664: TSTMSK \tif_ptr,\tif_mask
729     + jz . + 8
730     + .long 0xb2e8d000
731     + .pushsection .altinstructions, "a"
732     + .long 664b - .
733     + .long 662b - .
734     + .word 82
735     + .byte 12
736     + .byte 12
737     + .popsection
738     + .endm
739     +
740     + .macro BPEXIT tif_ptr,tif_mask
741     + TSTMSK \tif_ptr,\tif_mask
742     + .pushsection .altinstr_replacement, "ax"
743     +662: jnz . + 8
744     + .long 0xb2e8d000
745     + .popsection
746     +664: jz . + 8
747     + .long 0xb2e8c000
748     + .pushsection .altinstructions, "a"
749     + .long 664b - .
750     + .long 662b - .
751     + .word 82
752     + .byte 8
753     + .byte 8
754     + .popsection
755     + .endm
756     +
757     +#ifdef CONFIG_EXPOLINE
758     +
759     + .macro GEN_BR_THUNK name,reg,tmp
760     + .section .text.\name,"axG",@progbits,\name,comdat
761     + .globl \name
762     + .hidden \name
763     + .type \name,@function
764     +\name:
765     + .cfi_startproc
766     +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
767     + exrl 0,0f
768     +#else
769     + larl \tmp,0f
770     + ex 0,0(\tmp)
771     +#endif
772     + j .
773     +0: br \reg
774     + .cfi_endproc
775     + .endm
776     +
777     + GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
778     + GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
779     + GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
780     +
781     + .macro BASR_R14_R9
782     +0: brasl %r14,__s390x_indirect_jump_r1use_r9
783     + .pushsection .s390_indirect_branches,"a",@progbits
784     + .long 0b-.
785     + .popsection
786     + .endm
787     +
788     + .macro BR_R1USE_R14
789     +0: jg __s390x_indirect_jump_r1use_r14
790     + .pushsection .s390_indirect_branches,"a",@progbits
791     + .long 0b-.
792     + .popsection
793     + .endm
794     +
795     + .macro BR_R11USE_R14
796     +0: jg __s390x_indirect_jump_r11use_r14
797     + .pushsection .s390_indirect_branches,"a",@progbits
798     + .long 0b-.
799     + .popsection
800     + .endm
801     +
802     +#else /* CONFIG_EXPOLINE */
803     +
804     + .macro BASR_R14_R9
805     + basr %r14,%r9
806     + .endm
807     +
808     + .macro BR_R1USE_R14
809     + br %r14
810     + .endm
811     +
812     + .macro BR_R11USE_R14
813     + br %r14
814     + .endm
815     +
816     +#endif /* CONFIG_EXPOLINE */
817     +
818     +
819     .section .kprobes.text, "ax"
820     .Ldummy:
821     /*
822     @@ -170,6 +295,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
823     */
824     nop 0
825    
826     +ENTRY(__bpon)
827     + .globl __bpon
828     + BPON
829     + BR_R1USE_R14
830     +
831     /*
832     * Scheduler resume function, called by switch_to
833     * gpr2 = (task_struct *) prev
834     @@ -193,9 +323,9 @@ ENTRY(__switch_to)
835     mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
836     lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
837     TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
838     - bzr %r14
839     + jz 0f
840     .insn s,0xb2800000,__LC_LPP # set program parameter
841     - br %r14
842     +0: BR_R1USE_R14
843    
844     .L__critical_start:
845    
846     @@ -207,9 +337,11 @@ ENTRY(__switch_to)
847     */
848     ENTRY(sie64a)
849     stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
850     + lg %r12,__LC_CURRENT
851     stg %r2,__SF_EMPTY(%r15) # save control block pointer
852     stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
853     xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
854     + mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
855     TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
856     jno .Lsie_load_guest_gprs
857     brasl %r14,load_fpu_regs # load guest fp/vx regs
858     @@ -226,8 +358,12 @@ ENTRY(sie64a)
859     jnz .Lsie_skip
860     TSTMSK __LC_CPU_FLAGS,_CIF_FPU
861     jo .Lsie_skip # exit if fp/vx regs changed
862     + BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
863     .Lsie_entry:
864     sie 0(%r14)
865     +.Lsie_exit:
866     + BPOFF
867     + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
868     .Lsie_skip:
869     ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
870     lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
871     @@ -248,9 +384,15 @@ ENTRY(sie64a)
872     sie_exit:
873     lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
874     stmg %r0,%r13,0(%r14) # save guest gprs 0-13
875     + xgr %r0,%r0 # clear guest registers to
876     + xgr %r1,%r1 # prevent speculative use
877     + xgr %r2,%r2
878     + xgr %r3,%r3
879     + xgr %r4,%r4
880     + xgr %r5,%r5
881     lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
882     lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
883     - br %r14
884     + BR_R1USE_R14
885     .Lsie_fault:
886     lghi %r14,-EFAULT
887     stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
888     @@ -273,6 +415,7 @@ ENTRY(system_call)
889     stpt __LC_SYNC_ENTER_TIMER
890     .Lsysc_stmg:
891     stmg %r8,%r15,__LC_SAVE_AREA_SYNC
892     + BPOFF
893     lg %r12,__LC_CURRENT
894     lghi %r13,__TASK_thread
895     lghi %r14,_PIF_SYSCALL
896     @@ -281,12 +424,15 @@ ENTRY(system_call)
897     la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
898     .Lsysc_vtime:
899     UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
900     + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
901     stmg %r0,%r7,__PT_R0(%r11)
902     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
903     mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
904     mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
905     stg %r14,__PT_FLAGS(%r11)
906     .Lsysc_do_svc:
907     + # clear user controlled register to prevent speculative use
908     + xgr %r0,%r0
909     # load address of system call table
910     lg %r10,__THREAD_sysc_table(%r13,%r12)
911     llgh %r8,__PT_INT_CODE+2(%r11)
912     @@ -305,7 +451,7 @@ ENTRY(system_call)
913     lgf %r9,0(%r8,%r10) # get system call add.
914     TSTMSK __TI_flags(%r12),_TIF_TRACE
915     jnz .Lsysc_tracesys
916     - basr %r14,%r9 # call sys_xxxx
917     + BASR_R14_R9 # call sys_xxxx
918     stg %r2,__PT_R2(%r11) # store return value
919    
920     .Lsysc_return:
921     @@ -317,6 +463,7 @@ ENTRY(system_call)
922     jnz .Lsysc_work # check for work
923     TSTMSK __LC_CPU_FLAGS,_CIF_WORK
924     jnz .Lsysc_work
925     + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
926     .Lsysc_restore:
927     lg %r14,__LC_VDSO_PER_CPU
928     lmg %r0,%r10,__PT_R0(%r11)
929     @@ -481,7 +628,7 @@ ENTRY(system_call)
930     lmg %r3,%r7,__PT_R3(%r11)
931     stg %r7,STACK_FRAME_OVERHEAD(%r15)
932     lg %r2,__PT_ORIG_GPR2(%r11)
933     - basr %r14,%r9 # call sys_xxx
934     + BASR_R14_R9 # call sys_xxx
935     stg %r2,__PT_R2(%r11) # store return value
936     .Lsysc_tracenogo:
937     TSTMSK __TI_flags(%r12),_TIF_TRACE
938     @@ -505,7 +652,7 @@ ENTRY(ret_from_fork)
939     lmg %r9,%r10,__PT_R9(%r11) # load gprs
940     ENTRY(kernel_thread_starter)
941     la %r2,0(%r10)
942     - basr %r14,%r9
943     + BASR_R14_R9
944     j .Lsysc_tracenogo
945    
946     /*
947     @@ -514,6 +661,7 @@ ENTRY(kernel_thread_starter)
948    
949     ENTRY(pgm_check_handler)
950     stpt __LC_SYNC_ENTER_TIMER
951     + BPOFF
952     stmg %r8,%r15,__LC_SAVE_AREA_SYNC
953     lg %r10,__LC_LAST_BREAK
954     lg %r12,__LC_CURRENT
955     @@ -540,6 +688,7 @@ ENTRY(pgm_check_handler)
956     aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
957     j 4f
958     2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
959     + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
960     lg %r15,__LC_KERNEL_STACK
961     lgr %r14,%r12
962     aghi %r14,__TASK_thread # pointer to thread_struct
963     @@ -550,6 +699,15 @@ ENTRY(pgm_check_handler)
964     3: stg %r10,__THREAD_last_break(%r14)
965     4: la %r11,STACK_FRAME_OVERHEAD(%r15)
966     stmg %r0,%r7,__PT_R0(%r11)
967     + # clear user controlled registers to prevent speculative use
968     + xgr %r0,%r0
969     + xgr %r1,%r1
970     + xgr %r2,%r2
971     + xgr %r3,%r3
972     + xgr %r4,%r4
973     + xgr %r5,%r5
974     + xgr %r6,%r6
975     + xgr %r7,%r7
976     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
977     stmg %r8,%r9,__PT_PSW(%r11)
978     mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
979     @@ -571,9 +729,9 @@ ENTRY(pgm_check_handler)
980     nill %r10,0x007f
981     sll %r10,2
982     je .Lpgm_return
983     - lgf %r1,0(%r10,%r1) # load address of handler routine
984     + lgf %r9,0(%r10,%r1) # load address of handler routine
985     lgr %r2,%r11 # pass pointer to pt_regs
986     - basr %r14,%r1 # branch to interrupt-handler
987     + BASR_R14_R9 # branch to interrupt-handler
988     .Lpgm_return:
989     LOCKDEP_SYS_EXIT
990     tm __PT_PSW+1(%r11),0x01 # returning to user ?
991     @@ -609,12 +767,23 @@ ENTRY(pgm_check_handler)
992     ENTRY(io_int_handler)
993     STCK __LC_INT_CLOCK
994     stpt __LC_ASYNC_ENTER_TIMER
995     + BPOFF
996     stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
997     lg %r12,__LC_CURRENT
998     larl %r13,cleanup_critical
999     lmg %r8,%r9,__LC_IO_OLD_PSW
1000     SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
1001     stmg %r0,%r7,__PT_R0(%r11)
1002     + # clear user controlled registers to prevent speculative use
1003     + xgr %r0,%r0
1004     + xgr %r1,%r1
1005     + xgr %r2,%r2
1006     + xgr %r3,%r3
1007     + xgr %r4,%r4
1008     + xgr %r5,%r5
1009     + xgr %r6,%r6
1010     + xgr %r7,%r7
1011     + xgr %r10,%r10
1012     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1013     stmg %r8,%r9,__PT_PSW(%r11)
1014     mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
1015     @@ -649,9 +818,13 @@ ENTRY(io_int_handler)
1016     lg %r14,__LC_VDSO_PER_CPU
1017     lmg %r0,%r10,__PT_R0(%r11)
1018     mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
1019     + tm __PT_PSW+1(%r11),0x01 # returning to user ?
1020     + jno .Lio_exit_kernel
1021     + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1022     .Lio_exit_timer:
1023     stpt __LC_EXIT_TIMER
1024     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1025     +.Lio_exit_kernel:
1026     lmg %r11,%r15,__PT_R11(%r11)
1027     lpswe __LC_RETURN_PSW
1028     .Lio_done:
1029     @@ -814,12 +987,23 @@ ENTRY(io_int_handler)
1030     ENTRY(ext_int_handler)
1031     STCK __LC_INT_CLOCK
1032     stpt __LC_ASYNC_ENTER_TIMER
1033     + BPOFF
1034     stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
1035     lg %r12,__LC_CURRENT
1036     larl %r13,cleanup_critical
1037     lmg %r8,%r9,__LC_EXT_OLD_PSW
1038     SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
1039     stmg %r0,%r7,__PT_R0(%r11)
1040     + # clear user controlled registers to prevent speculative use
1041     + xgr %r0,%r0
1042     + xgr %r1,%r1
1043     + xgr %r2,%r2
1044     + xgr %r3,%r3
1045     + xgr %r4,%r4
1046     + xgr %r5,%r5
1047     + xgr %r6,%r6
1048     + xgr %r7,%r7
1049     + xgr %r10,%r10
1050     mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
1051     stmg %r8,%r9,__PT_PSW(%r11)
1052     lghi %r1,__LC_EXT_PARAMS2
1053     @@ -852,11 +1036,12 @@ ENTRY(psw_idle)
1054     .Lpsw_idle_stcctm:
1055     #endif
1056     oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
1057     + BPON
1058     STCK __CLOCK_IDLE_ENTER(%r2)
1059     stpt __TIMER_IDLE_ENTER(%r2)
1060     .Lpsw_idle_lpsw:
1061     lpswe __SF_EMPTY(%r15)
1062     - br %r14
1063     + BR_R1USE_R14
1064     .Lpsw_idle_end:
1065    
1066     /*
1067     @@ -870,7 +1055,7 @@ ENTRY(save_fpu_regs)
1068     lg %r2,__LC_CURRENT
1069     aghi %r2,__TASK_thread
1070     TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1071     - bor %r14
1072     + jo .Lsave_fpu_regs_exit
1073     stfpc __THREAD_FPU_fpc(%r2)
1074     lg %r3,__THREAD_FPU_regs(%r2)
1075     TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1076     @@ -897,7 +1082,8 @@ ENTRY(save_fpu_regs)
1077     std 15,120(%r3)
1078     .Lsave_fpu_regs_done:
1079     oi __LC_CPU_FLAGS+7,_CIF_FPU
1080     - br %r14
1081     +.Lsave_fpu_regs_exit:
1082     + BR_R1USE_R14
1083     .Lsave_fpu_regs_end:
1084     EXPORT_SYMBOL(save_fpu_regs)
1085    
1086     @@ -915,7 +1101,7 @@ load_fpu_regs:
1087     lg %r4,__LC_CURRENT
1088     aghi %r4,__TASK_thread
1089     TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1090     - bnor %r14
1091     + jno .Lload_fpu_regs_exit
1092     lfpc __THREAD_FPU_fpc(%r4)
1093     TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1094     lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1095     @@ -942,7 +1128,8 @@ load_fpu_regs:
1096     ld 15,120(%r4)
1097     .Lload_fpu_regs_done:
1098     ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1099     - br %r14
1100     +.Lload_fpu_regs_exit:
1101     + BR_R1USE_R14
1102     .Lload_fpu_regs_end:
1103    
1104     .L__critical_end:
1105     @@ -952,6 +1139,7 @@ load_fpu_regs:
1106     */
1107     ENTRY(mcck_int_handler)
1108     STCK __LC_MCCK_CLOCK
1109     + BPOFF
1110     la %r1,4095 # revalidate r1
1111     spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
1112     lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
1113     @@ -982,6 +1170,16 @@ ENTRY(mcck_int_handler)
1114     .Lmcck_skip:
1115     lghi %r14,__LC_GPREGS_SAVE_AREA+64
1116     stmg %r0,%r7,__PT_R0(%r11)
1117     + # clear user controlled registers to prevent speculative use
1118     + xgr %r0,%r0
1119     + xgr %r1,%r1
1120     + xgr %r2,%r2
1121     + xgr %r3,%r3
1122     + xgr %r4,%r4
1123     + xgr %r5,%r5
1124     + xgr %r6,%r6
1125     + xgr %r7,%r7
1126     + xgr %r10,%r10
1127     mvc __PT_R8(64,%r11),0(%r14)
1128     stmg %r8,%r9,__PT_PSW(%r11)
1129     xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1130     @@ -1007,6 +1205,7 @@ ENTRY(mcck_int_handler)
1131     mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1132     tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1133     jno 0f
1134     + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1135     stpt __LC_EXIT_TIMER
1136     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1137     0: lmg %r11,%r15,__PT_R11(%r11)
1138     @@ -1102,7 +1301,7 @@ cleanup_critical:
1139     jl 0f
1140     clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1141     jl .Lcleanup_load_fpu_regs
1142     -0: br %r14
1143     +0: BR_R11USE_R14
1144    
1145     .align 8
1146     .Lcleanup_table:
1147     @@ -1133,11 +1332,12 @@ cleanup_critical:
1148     clg %r9,BASED(.Lsie_crit_mcck_length)
1149     jh 1f
1150     oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
1151     -1: lg %r9,__SF_EMPTY(%r15) # get control block pointer
1152     +1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1153     + lg %r9,__SF_EMPTY(%r15) # get control block pointer
1154     ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1155     lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1156     larl %r9,sie_exit # skip forward to sie_exit
1157     - br %r14
1158     + BR_R11USE_R14
1159     #endif
1160    
1161     .Lcleanup_system_call:
1162     @@ -1175,6 +1375,7 @@ cleanup_critical:
1163     stg %r15,__LC_SYSTEM_TIMER
1164     0: # update accounting time stamp
1165     mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1166     + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1167     # set up saved register r11
1168     lg %r15,__LC_KERNEL_STACK
1169     la %r9,STACK_FRAME_OVERHEAD(%r15)
1170     @@ -1190,7 +1391,7 @@ cleanup_critical:
1171     stg %r15,56(%r11) # r15 stack pointer
1172     # set new psw address and exit
1173     larl %r9,.Lsysc_do_svc
1174     - br %r14
1175     + BR_R11USE_R14
1176     .Lcleanup_system_call_insn:
1177     .quad system_call
1178     .quad .Lsysc_stmg
1179     @@ -1202,7 +1403,7 @@ cleanup_critical:
1180    
1181     .Lcleanup_sysc_tif:
1182     larl %r9,.Lsysc_tif
1183     - br %r14
1184     + BR_R11USE_R14
1185    
1186     .Lcleanup_sysc_restore:
1187     # check if stpt has been executed
1188     @@ -1219,14 +1420,14 @@ cleanup_critical:
1189     mvc 0(64,%r11),__PT_R8(%r9)
1190     lmg %r0,%r7,__PT_R0(%r9)
1191     1: lmg %r8,%r9,__LC_RETURN_PSW
1192     - br %r14
1193     + BR_R11USE_R14
1194     .Lcleanup_sysc_restore_insn:
1195     .quad .Lsysc_exit_timer
1196     .quad .Lsysc_done - 4
1197    
1198     .Lcleanup_io_tif:
1199     larl %r9,.Lio_tif
1200     - br %r14
1201     + BR_R11USE_R14
1202    
1203     .Lcleanup_io_restore:
1204     # check if stpt has been executed
1205     @@ -1240,7 +1441,7 @@ cleanup_critical:
1206     mvc 0(64,%r11),__PT_R8(%r9)
1207     lmg %r0,%r7,__PT_R0(%r9)
1208     1: lmg %r8,%r9,__LC_RETURN_PSW
1209     - br %r14
1210     + BR_R11USE_R14
1211     .Lcleanup_io_restore_insn:
1212     .quad .Lio_exit_timer
1213     .quad .Lio_done - 4
1214     @@ -1293,17 +1494,17 @@ cleanup_critical:
1215     # prepare return psw
1216     nihh %r8,0xfcfd # clear irq & wait state bits
1217     lg %r9,48(%r11) # return from psw_idle
1218     - br %r14
1219     + BR_R11USE_R14
1220     .Lcleanup_idle_insn:
1221     .quad .Lpsw_idle_lpsw
1222    
1223     .Lcleanup_save_fpu_regs:
1224     larl %r9,save_fpu_regs
1225     - br %r14
1226     + BR_R11USE_R14
1227    
1228     .Lcleanup_load_fpu_regs:
1229     larl %r9,load_fpu_regs
1230     - br %r14
1231     + BR_R11USE_R14
1232    
1233     /*
1234     * Integer constants
1235     @@ -1323,7 +1524,6 @@ cleanup_critical:
1236     .Lsie_crit_mcck_length:
1237     .quad .Lsie_skip - .Lsie_entry
1238     #endif
1239     -
1240     .section .rodata, "a"
1241     #define SYSCALL(esame,emu) .long esame
1242     .globl sys_call_table
1243     diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1244     index d1a0e2c521d7..b565e784bae8 100644
1245     --- a/arch/s390/kernel/ipl.c
1246     +++ b/arch/s390/kernel/ipl.c
1247     @@ -564,6 +564,7 @@ static struct kset *ipl_kset;
1248    
1249     static void __ipl_run(void *unused)
1250     {
1251     + __bpon();
1252     diag308(DIAG308_LOAD_CLEAR, NULL);
1253     if (MACHINE_IS_VM)
1254     __cpcmd("IPL", NULL, 0, NULL);
1255     diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
1256     index 1a27f307a920..b441e069e674 100644
1257     --- a/arch/s390/kernel/module.c
1258     +++ b/arch/s390/kernel/module.c
1259     @@ -31,6 +31,9 @@
1260     #include <linux/kernel.h>
1261     #include <linux/moduleloader.h>
1262     #include <linux/bug.h>
1263     +#include <asm/alternative.h>
1264     +#include <asm/nospec-branch.h>
1265     +#include <asm/facility.h>
1266    
1267     #if 0
1268     #define DEBUGP printk
1269     @@ -168,7 +171,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
1270     me->arch.got_offset = me->core_layout.size;
1271     me->core_layout.size += me->arch.got_size;
1272     me->arch.plt_offset = me->core_layout.size;
1273     - me->core_layout.size += me->arch.plt_size;
1274     + if (me->arch.plt_size) {
1275     + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
1276     + me->arch.plt_size += PLT_ENTRY_SIZE;
1277     + me->core_layout.size += me->arch.plt_size;
1278     + }
1279     return 0;
1280     }
1281    
1282     @@ -322,9 +329,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
1283     unsigned int *ip;
1284     ip = me->core_layout.base + me->arch.plt_offset +
1285     info->plt_offset;
1286     - ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
1287     - ip[1] = 0x100a0004;
1288     - ip[2] = 0x07f10000;
1289     + ip[0] = 0x0d10e310; /* basr 1,0 */
1290     + ip[1] = 0x100a0004; /* lg 1,10(1) */
1291     + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
1292     + unsigned int *ij;
1293     + ij = me->core_layout.base +
1294     + me->arch.plt_offset +
1295     + me->arch.plt_size - PLT_ENTRY_SIZE;
1296     + ip[2] = 0xa7f40000 + /* j __jump_r1 */
1297     + (unsigned int)(u16)
1298     + (((unsigned long) ij - 8 -
1299     + (unsigned long) ip) / 2);
1300     + } else {
1301     + ip[2] = 0x07f10000; /* br %r1 */
1302     + }
1303     ip[3] = (unsigned int) (val >> 32);
1304     ip[4] = (unsigned int) val;
1305     info->plt_initialized = 1;
1306     @@ -429,6 +447,45 @@ int module_finalize(const Elf_Ehdr *hdr,
1307     const Elf_Shdr *sechdrs,
1308     struct module *me)
1309     {
1310     + const Elf_Shdr *s;
1311     + char *secstrings, *secname;
1312     + void *aseg;
1313     +
1314     + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1315     + !nospec_disable && me->arch.plt_size) {
1316     + unsigned int *ij;
1317     +
1318     + ij = me->core_layout.base + me->arch.plt_offset +
1319     + me->arch.plt_size - PLT_ENTRY_SIZE;
1320     + if (test_facility(35)) {
1321     + ij[0] = 0xc6000000; /* exrl %r0,.+10 */
1322     + ij[1] = 0x0005a7f4; /* j . */
1323     + ij[2] = 0x000007f1; /* br %r1 */
1324     + } else {
1325     + ij[0] = 0x44000000 | (unsigned int)
1326     + offsetof(struct lowcore, br_r1_trampoline);
1327     + ij[1] = 0xa7f40000; /* j . */
1328     + }
1329     + }
1330     +
1331     + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1332     + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
1333     + aseg = (void *) s->sh_addr;
1334     + secname = secstrings + s->sh_name;
1335     +
1336     + if (!strcmp(".altinstructions", secname))
1337     + /* patch .altinstructions */
1338     + apply_alternatives(aseg, aseg + s->sh_size);
1339     +
1340     + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1341     + (!strncmp(".s390_indirect", secname, 14)))
1342     + nospec_revert(aseg, aseg + s->sh_size);
1343     +
1344     + if (IS_ENABLED(CONFIG_EXPOLINE) &&
1345     + (!strncmp(".s390_return", secname, 12)))
1346     + nospec_revert(aseg, aseg + s->sh_size);
1347     + }
1348     +
1349     jump_label_apply_nops(me);
1350     return 0;
1351     }
1352     diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1353     new file mode 100644
1354     index 000000000000..9f3b5b382743
1355     --- /dev/null
1356     +++ b/arch/s390/kernel/nospec-branch.c
1357     @@ -0,0 +1,169 @@
1358     +// SPDX-License-Identifier: GPL-2.0
1359     +#include <linux/module.h>
1360     +#include <linux/device.h>
1361     +#include <asm/facility.h>
1362     +#include <asm/nospec-branch.h>
1363     +
1364     +static int __init nobp_setup_early(char *str)
1365     +{
1366     + bool enabled;
1367     + int rc;
1368     +
1369     + rc = kstrtobool(str, &enabled);
1370     + if (rc)
1371     + return rc;
1372     + if (enabled && test_facility(82)) {
1373     + /*
1374     + * The user explicitely requested nobp=1, enable it and
1375     + * disable the expoline support.
1376     + */
1377     + __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1378     + if (IS_ENABLED(CONFIG_EXPOLINE))
1379     + nospec_disable = 1;
1380     + } else {
1381     + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1382     + }
1383     + return 0;
1384     +}
1385     +early_param("nobp", nobp_setup_early);
1386     +
1387     +static int __init nospec_setup_early(char *str)
1388     +{
1389     + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1390     + return 0;
1391     +}
1392     +early_param("nospec", nospec_setup_early);
1393     +
1394     +static int __init nospec_report(void)
1395     +{
1396     + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1397     + pr_info("Spectre V2 mitigation: execute trampolines.\n");
1398     + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1399     + pr_info("Spectre V2 mitigation: limited branch prediction.\n");
1400     + return 0;
1401     +}
1402     +arch_initcall(nospec_report);
1403     +
1404     +#ifdef CONFIG_SYSFS
1405     +ssize_t cpu_show_spectre_v1(struct device *dev,
1406     + struct device_attribute *attr, char *buf)
1407     +{
1408     + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1409     +}
1410     +
1411     +ssize_t cpu_show_spectre_v2(struct device *dev,
1412     + struct device_attribute *attr, char *buf)
1413     +{
1414     + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1415     + return sprintf(buf, "Mitigation: execute trampolines\n");
1416     + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1417     + return sprintf(buf, "Mitigation: limited branch prediction.\n");
1418     + return sprintf(buf, "Vulnerable\n");
1419     +}
1420     +#endif
1421     +
1422     +#ifdef CONFIG_EXPOLINE
1423     +
1424     +int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1425     +
1426     +static int __init nospectre_v2_setup_early(char *str)
1427     +{
1428     + nospec_disable = 1;
1429     + return 0;
1430     +}
1431     +early_param("nospectre_v2", nospectre_v2_setup_early);
1432     +
1433     +void __init nospec_auto_detect(void)
1434     +{
1435     + if (IS_ENABLED(CC_USING_EXPOLINE)) {
1436     + /*
1437     + * The kernel has been compiled with expolines.
1438     + * Keep expolines enabled and disable nobp.
1439     + */
1440     + nospec_disable = 0;
1441     + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1442     + }
1443     + /*
1444     + * If the kernel has not been compiled with expolines the
1445     + * nobp setting decides what is done, this depends on the
1446     + * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
1447     + */
1448     +}
1449     +
1450     +static int __init spectre_v2_setup_early(char *str)
1451     +{
1452     + if (str && !strncmp(str, "on", 2)) {
1453     + nospec_disable = 0;
1454     + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1455     + }
1456     + if (str && !strncmp(str, "off", 3))
1457     + nospec_disable = 1;
1458     + if (str && !strncmp(str, "auto", 4))
1459     + nospec_auto_detect();
1460     + return 0;
1461     +}
1462     +early_param("spectre_v2", spectre_v2_setup_early);
1463     +
1464     +static void __init_or_module __nospec_revert(s32 *start, s32 *end)
1465     +{
1466     + enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
1467     + u8 *instr, *thunk, *br;
1468     + u8 insnbuf[6];
1469     + s32 *epo;
1470     +
1471     + /* Second part of the instruction replace is always a nop */
1472     + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
1473     + for (epo = start; epo < end; epo++) {
1474     + instr = (u8 *) epo + *epo;
1475     + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
1476     + type = BRCL_EXPOLINE; /* brcl instruction */
1477     + else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
1478     + type = BRASL_EXPOLINE; /* brasl instruction */
1479     + else
1480     + continue;
1481     + thunk = instr + (*(int *)(instr + 2)) * 2;
1482     + if (thunk[0] == 0xc6 && thunk[1] == 0x00)
1483     + /* exrl %r0,<target-br> */
1484     + br = thunk + (*(int *)(thunk + 2)) * 2;
1485     + else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
1486     + thunk[6] == 0x44 && thunk[7] == 0x00 &&
1487     + (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
1488     + (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
1489     + /* larl %rx,<target br> + ex %r0,0(%rx) */
1490     + br = thunk + (*(int *)(thunk + 2)) * 2;
1491     + else
1492     + continue;
1493     + if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
1494     + continue;
1495     + switch (type) {
1496     + case BRCL_EXPOLINE:
1497     + /* brcl to thunk, replace with br + nop */
1498     + insnbuf[0] = br[0];
1499     + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1500     + break;
1501     + case BRASL_EXPOLINE:
1502     + /* brasl to thunk, replace with basr + nop */
1503     + insnbuf[0] = 0x0d;
1504     + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
1505     + break;
1506     + }
1507     +
1508     + s390_kernel_write(instr, insnbuf, 6);
1509     + }
1510     +}
1511     +
1512     +void __init_or_module nospec_revert(s32 *start, s32 *end)
1513     +{
1514     + if (nospec_disable)
1515     + __nospec_revert(start, end);
1516     +}
1517     +
1518     +extern s32 __nospec_call_start[], __nospec_call_end[];
1519     +extern s32 __nospec_return_start[], __nospec_return_end[];
1520     +void __init nospec_init_branches(void)
1521     +{
1522     + nospec_revert(__nospec_call_start, __nospec_call_end);
1523     + nospec_revert(__nospec_return_start, __nospec_return_end);
1524     +}
1525     +
1526     +#endif /* CONFIG_EXPOLINE */
1527     diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
1528     index 5362fd868d0d..6fe2e1875058 100644
1529     --- a/arch/s390/kernel/processor.c
1530     +++ b/arch/s390/kernel/processor.c
1531     @@ -197,3 +197,21 @@ const struct seq_operations cpuinfo_op = {
1532     .stop = c_stop,
1533     .show = show_cpuinfo,
1534     };
1535     +
1536     +int s390_isolate_bp(void)
1537     +{
1538     + if (!test_facility(82))
1539     + return -EOPNOTSUPP;
1540     + set_thread_flag(TIF_ISOLATE_BP);
1541     + return 0;
1542     +}
1543     +EXPORT_SYMBOL(s390_isolate_bp);
1544     +
1545     +int s390_isolate_bp_guest(void)
1546     +{
1547     + if (!test_facility(82))
1548     + return -EOPNOTSUPP;
1549     + set_thread_flag(TIF_ISOLATE_BP_GUEST);
1550     + return 0;
1551     +}
1552     +EXPORT_SYMBOL(s390_isolate_bp_guest);
1553     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1554     index 164a1e16b53e..98c1f7941142 100644
1555     --- a/arch/s390/kernel/setup.c
1556     +++ b/arch/s390/kernel/setup.c
1557     @@ -66,6 +66,8 @@
1558     #include <asm/sclp.h>
1559     #include <asm/sysinfo.h>
1560     #include <asm/numa.h>
1561     +#include <asm/alternative.h>
1562     +#include <asm/nospec-branch.h>
1563     #include "entry.h"
1564    
1565     /*
1566     @@ -338,7 +340,9 @@ static void __init setup_lowcore(void)
1567     lc->preempt_count = S390_lowcore.preempt_count;
1568     lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
1569     memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1570     - MAX_FACILITY_BIT/8);
1571     + sizeof(lc->stfle_fac_list));
1572     + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1573     + sizeof(lc->alt_stfle_fac_list));
1574     if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
1575     unsigned long bits, size;
1576    
1577     @@ -381,6 +385,7 @@ static void __init setup_lowcore(void)
1578     #ifdef CONFIG_SMP
1579     lc->spinlock_lockval = arch_spin_lockval(0);
1580     #endif
1581     + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1582    
1583     set_prefix((u32)(unsigned long) lc);
1584     lowcore_ptr[0] = lc;
1585     @@ -892,6 +897,9 @@ void __init setup_arch(char **cmdline_p)
1586     init_mm.end_data = (unsigned long) &_edata;
1587     init_mm.brk = (unsigned long) &_end;
1588    
1589     + if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1590     + nospec_auto_detect();
1591     +
1592     parse_early_param();
1593     #ifdef CONFIG_CRASH_DUMP
1594     /* Deactivate elfcorehdr= kernel parameter */
1595     @@ -955,6 +963,10 @@ void __init setup_arch(char **cmdline_p)
1596     conmode_default();
1597     set_preferred_console();
1598    
1599     + apply_alternative_instructions();
1600     + if (IS_ENABLED(CONFIG_EXPOLINE))
1601     + nospec_init_branches();
1602     +
1603     /* Setup zfcpdump support */
1604     setup_zfcpdump();
1605    
1606     diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1607     index 7ffaf9fd6d19..ae5df4177803 100644
1608     --- a/arch/s390/kernel/smp.c
1609     +++ b/arch/s390/kernel/smp.c
1610     @@ -228,6 +228,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1611     lc->mcesad = mcesa_origin | mcesa_bits;
1612     lc->cpu_nr = cpu;
1613     lc->spinlock_lockval = arch_spin_lockval(cpu);
1614     + lc->br_r1_trampoline = 0x07f1; /* br %r1 */
1615     if (vdso_alloc_per_cpu(lc))
1616     goto out;
1617     lowcore_ptr[cpu] = lc;
1618     @@ -282,7 +283,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1619     __ctl_store(lc->cregs_save_area, 0, 15);
1620     save_access_regs((unsigned int *) lc->access_regs_save_area);
1621     memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
1622     - MAX_FACILITY_BIT/8);
1623     + sizeof(lc->stfle_fac_list));
1624     + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
1625     + sizeof(lc->alt_stfle_fac_list));
1626     }
1627    
1628     static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
1629     @@ -332,6 +335,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
1630     mem_assign_absolute(lc->restart_fn, (unsigned long) func);
1631     mem_assign_absolute(lc->restart_data, (unsigned long) data);
1632     mem_assign_absolute(lc->restart_source, source_cpu);
1633     + __bpon();
1634     asm volatile(
1635     "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
1636     " brc 2,0b # busy, try again\n"
1637     @@ -907,6 +911,7 @@ void __cpu_die(unsigned int cpu)
1638     void __noreturn cpu_die(void)
1639     {
1640     idle_task_exit();
1641     + __bpon();
1642     pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
1643     for (;;) ;
1644     }
1645     diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
1646     index d9d1f512f019..5007fac01bb5 100644
1647     --- a/arch/s390/kernel/uprobes.c
1648     +++ b/arch/s390/kernel/uprobes.c
1649     @@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
1650     return orig;
1651     }
1652    
1653     +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1654     + struct pt_regs *regs)
1655     +{
1656     + if (ctx == RP_CHECK_CHAIN_CALL)
1657     + return user_stack_pointer(regs) <= ret->stack;
1658     + else
1659     + return user_stack_pointer(regs) < ret->stack;
1660     +}
1661     +
1662     /* Instruction Emulation */
1663    
1664     static void adjust_psw_addr(psw_t *psw, unsigned long len)
1665     diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
1666     index 96a713a470e7..85dd3c7bdd86 100644
1667     --- a/arch/s390/kernel/vmlinux.lds.S
1668     +++ b/arch/s390/kernel/vmlinux.lds.S
1669     @@ -105,6 +105,43 @@ SECTIONS
1670     EXIT_DATA
1671     }
1672    
1673     + /*
1674     + * struct alt_inst entries. From the header (alternative.h):
1675     + * "Alternative instructions for different CPU types or capabilities"
1676     + * Think locking instructions on spinlocks.
1677     + * Note, that it is a part of __init region.
1678     + */
1679     + . = ALIGN(8);
1680     + .altinstructions : {
1681     + __alt_instructions = .;
1682     + *(.altinstructions)
1683     + __alt_instructions_end = .;
1684     + }
1685     +
1686     + /*
1687     + * And here are the replacement instructions. The linker sticks
1688     + * them as binary blobs. The .altinstructions has enough data to
1689     + * get the address and the length of them to patch the kernel safely.
1690     + * Note, that it is a part of __init region.
1691     + */
1692     + .altinstr_replacement : {
1693     + *(.altinstr_replacement)
1694     + }
1695     +
1696     + /*
1697     + * Table with the patch locations to undo expolines
1698     + */
1699     + .nospec_call_table : {
1700     + __nospec_call_start = . ;
1701     + *(.s390_indirect*)
1702     + __nospec_call_end = . ;
1703     + }
1704     + .nospec_return_table : {
1705     + __nospec_return_start = . ;
1706     + *(.s390_return*)
1707     + __nospec_return_end = . ;
1708     + }
1709     +
1710     /* early.c uses stsi, which requires page aligned data. */
1711     . = ALIGN(PAGE_SIZE);
1712     INIT_DATA_SECTION(0x100)
1713     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1714     index 0bce918db11a..4f6adbea592b 100644
1715     --- a/arch/s390/kvm/kvm-s390.c
1716     +++ b/arch/s390/kvm/kvm-s390.c
1717     @@ -449,6 +449,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1718     case KVM_CAP_S390_GS:
1719     r = test_facility(133);
1720     break;
1721     + case KVM_CAP_S390_BPB:
1722     + r = test_facility(82);
1723     + break;
1724     default:
1725     r = 0;
1726     }
1727     @@ -2231,6 +2234,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1728     kvm_s390_set_prefix(vcpu, 0);
1729     if (test_kvm_facility(vcpu->kvm, 64))
1730     vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1731     + if (test_kvm_facility(vcpu->kvm, 82))
1732     + vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
1733     if (test_kvm_facility(vcpu->kvm, 133))
1734     vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
1735     /* fprs can be synchronized via vrs, even if the guest has no vx. With
1736     @@ -2372,6 +2377,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1737     current->thread.fpu.fpc = 0;
1738     vcpu->arch.sie_block->gbea = 1;
1739     vcpu->arch.sie_block->pp = 0;
1740     + vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1741     vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1742     kvm_clear_async_pf_completion_queue(vcpu);
1743     if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1744     @@ -3318,6 +3324,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1745     vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
1746     vcpu->arch.gs_enabled = 1;
1747     }
1748     + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
1749     + test_kvm_facility(vcpu->kvm, 82)) {
1750     + vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
1751     + vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
1752     + }
1753     save_access_regs(vcpu->arch.host_acrs);
1754     restore_access_regs(vcpu->run->s.regs.acrs);
1755     /* save host (userspace) fprs/vrs */
1756     @@ -3364,6 +3375,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1757     kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1758     kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1759     kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1760     + kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
1761     save_access_regs(vcpu->run->s.regs.acrs);
1762     restore_access_regs(vcpu->arch.host_acrs);
1763     /* Save guest register state */
1764     diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
1765     index a74204db759b..eb7b530d1783 100644
1766     --- a/arch/s390/kvm/vsie.c
1767     +++ b/arch/s390/kvm/vsie.c
1768     @@ -234,6 +234,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1769     memcpy(scb_o->gcr, scb_s->gcr, 128);
1770     scb_o->pp = scb_s->pp;
1771    
1772     + /* branch prediction */
1773     + if (test_kvm_facility(vcpu->kvm, 82)) {
1774     + scb_o->fpf &= ~FPF_BPBC;
1775     + scb_o->fpf |= scb_s->fpf & FPF_BPBC;
1776     + }
1777     +
1778     /* interrupt intercept */
1779     switch (scb_s->icptcode) {
1780     case ICPT_PROGI:
1781     @@ -280,6 +286,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1782     scb_s->ecb3 = 0;
1783     scb_s->ecd = 0;
1784     scb_s->fac = 0;
1785     + scb_s->fpf = 0;
1786    
1787     rc = prepare_cpuflags(vcpu, vsie_page);
1788     if (rc)
1789     @@ -339,6 +346,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1790     prefix_unmapped(vsie_page);
1791     scb_s->ecb |= ECB_TE;
1792     }
1793     + /* branch prediction */
1794     + if (test_kvm_facility(vcpu->kvm, 82))
1795     + scb_s->fpf |= scb_o->fpf & FPF_BPBC;
1796     /* SIMD */
1797     if (test_kvm_facility(vcpu->kvm, 129)) {
1798     scb_s->eca |= scb_o->eca & ECA_VX;
1799     @@ -821,6 +831,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1800     {
1801     struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1802     struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1803     + int guest_bp_isolation;
1804     int rc;
1805    
1806     handle_last_fault(vcpu, vsie_page);
1807     @@ -831,6 +842,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1808     s390_handle_mcck();
1809    
1810     srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1811     +
1812     + /* save current guest state of bp isolation override */
1813     + guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1814     +
1815     + /*
1816     + * The guest is running with BPBC, so we have to force it on for our
1817     + * nested guest. This is done by enabling BPBC globally, so the BPBC
1818     + * control in the SCB (which the nested guest can modify) is simply
1819     + * ignored.
1820     + */
1821     + if (test_kvm_facility(vcpu->kvm, 82) &&
1822     + vcpu->arch.sie_block->fpf & FPF_BPBC)
1823     + set_thread_flag(TIF_ISOLATE_BP_GUEST);
1824     +
1825     local_irq_disable();
1826     guest_enter_irqoff();
1827     local_irq_enable();
1828     @@ -840,6 +865,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1829     local_irq_disable();
1830     guest_exit_irqoff();
1831     local_irq_enable();
1832     +
1833     + /* restore guest state for bp isolation override */
1834     + if (!guest_bp_isolation)
1835     + clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1836     +
1837     vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1838    
1839     if (rc == -EINTR) {
1840     diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1841     index f53ccc680238..dbdd460a9958 100644
1842     --- a/drivers/acpi/acpi_video.c
1843     +++ b/drivers/acpi/acpi_video.c
1844     @@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
1845     return opregion;
1846     }
1847    
1848     +static bool dmi_is_desktop(void)
1849     +{
1850     + const char *chassis_type;
1851     +
1852     + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
1853     + if (!chassis_type)
1854     + return false;
1855     +
1856     + if (!strcmp(chassis_type, "3") || /* 3: Desktop */
1857     + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
1858     + !strcmp(chassis_type, "5") || /* 5: Pizza Box */
1859     + !strcmp(chassis_type, "6") || /* 6: Mini Tower */
1860     + !strcmp(chassis_type, "7") || /* 7: Tower */
1861     + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
1862     + return true;
1863     +
1864     + return false;
1865     +}
1866     +
1867     int acpi_video_register(void)
1868     {
1869     int ret = 0;
1870     @@ -2143,8 +2162,12 @@ int acpi_video_register(void)
1871     * win8 ready (where we also prefer the native backlight driver, so
1872     * normally the acpi_video code should not register there anyways).
1873     */
1874     - if (only_lcd == -1)
1875     - only_lcd = acpi_osi_is_win8();
1876     + if (only_lcd == -1) {
1877     + if (dmi_is_desktop() && acpi_osi_is_win8())
1878     + only_lcd = true;
1879     + else
1880     + only_lcd = false;
1881     + }
1882    
1883     dmi_check_system(video_dmi_table);
1884    
1885     diff --git a/drivers/block/swim.c b/drivers/block/swim.c
1886     index 84434d3ea19b..e88d50f75a4a 100644
1887     --- a/drivers/block/swim.c
1888     +++ b/drivers/block/swim.c
1889     @@ -110,7 +110,7 @@ struct iwm {
1890     /* Select values for swim_select and swim_readbit */
1891    
1892     #define READ_DATA_0 0x074
1893     -#define TWOMEG_DRIVE 0x075
1894     +#define ONEMEG_DRIVE 0x075
1895     #define SINGLE_SIDED 0x076
1896     #define DRIVE_PRESENT 0x077
1897     #define DISK_IN 0x170
1898     @@ -118,9 +118,9 @@ struct iwm {
1899     #define TRACK_ZERO 0x172
1900     #define TACHO 0x173
1901     #define READ_DATA_1 0x174
1902     -#define MFM_MODE 0x175
1903     +#define GCR_MODE 0x175
1904     #define SEEK_COMPLETE 0x176
1905     -#define ONEMEG_MEDIA 0x177
1906     +#define TWOMEG_MEDIA 0x177
1907    
1908     /* Bits in handshake register */
1909    
1910     @@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
1911     struct floppy_struct *g;
1912     fs->disk_in = 1;
1913     fs->write_protected = swim_readbit(base, WRITE_PROT);
1914     - fs->type = swim_readbit(base, ONEMEG_MEDIA);
1915    
1916     if (swim_track00(base))
1917     printk(KERN_ERR
1918     @@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
1919    
1920     swim_track00(base);
1921    
1922     + fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
1923     + HD_MEDIA : DD_MEDIA;
1924     + fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
1925     get_floppy_geometry(fs, 0, &g);
1926     fs->total_secs = g->size;
1927     fs->secpercyl = g->head * g->sect;
1928     @@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
1929    
1930     swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
1931     udelay(10);
1932     - swim_drive(base, INTERNAL_DRIVE);
1933     + swim_drive(base, fs->location);
1934     swim_motor(base, ON);
1935     swim_action(base, SETMFM);
1936     if (fs->ejected)
1937     @@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
1938     goto out;
1939     }
1940    
1941     + set_capacity(fs->disk, fs->total_secs);
1942     +
1943     if (mode & FMODE_NDELAY)
1944     return 0;
1945    
1946     @@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
1947     if (copy_to_user((void __user *) param, (void *) &floppy_type,
1948     sizeof(struct floppy_struct)))
1949     return -EFAULT;
1950     - break;
1951     -
1952     - default:
1953     - printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
1954     - cmd);
1955     - return -ENOSYS;
1956     + return 0;
1957     }
1958     - return 0;
1959     + return -ENOTTY;
1960     }
1961    
1962     static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1963     @@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1964     struct swim_priv *swd = data;
1965     int drive = (*part & 3);
1966    
1967     - if (drive > swd->floppy_count)
1968     + if (drive >= swd->floppy_count)
1969     return NULL;
1970    
1971     *part = 0;
1972     @@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
1973    
1974     swim_motor(base, OFF);
1975    
1976     - if (swim_readbit(base, SINGLE_SIDED))
1977     - fs->head_number = 1;
1978     - else
1979     - fs->head_number = 2;
1980     + fs->type = HD_MEDIA;
1981     + fs->head_number = 2;
1982     +
1983     fs->ref_count = 0;
1984     fs->ejected = 1;
1985    
1986     @@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
1987     /* scan floppy drives */
1988    
1989     swim_drive(base, INTERNAL_DRIVE);
1990     - if (swim_readbit(base, DRIVE_PRESENT))
1991     + if (swim_readbit(base, DRIVE_PRESENT) &&
1992     + !swim_readbit(base, ONEMEG_DRIVE))
1993     swim_add_floppy(swd, INTERNAL_DRIVE);
1994     swim_drive(base, EXTERNAL_DRIVE);
1995     - if (swim_readbit(base, DRIVE_PRESENT))
1996     + if (swim_readbit(base, DRIVE_PRESENT) &&
1997     + !swim_readbit(base, ONEMEG_DRIVE))
1998     swim_add_floppy(swd, EXTERNAL_DRIVE);
1999    
2000     /* register floppy drives */
2001     @@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
2002     &swd->lock);
2003     if (!swd->unit[drive].disk->queue) {
2004     err = -ENOMEM;
2005     - put_disk(swd->unit[drive].disk);
2006     goto exit_put_disks;
2007     }
2008     blk_queue_bounce_limit(swd->unit[drive].disk->queue,
2009     @@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
2010     goto out;
2011     }
2012    
2013     - swim_base = ioremap(res->start, resource_size(res));
2014     + swim_base = (struct swim __iomem *)res->start;
2015     if (!swim_base) {
2016     ret = -ENOMEM;
2017     goto out_release_io;
2018     @@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
2019     if (!get_swim_mode(swim_base)) {
2020     printk(KERN_INFO "SWIM device not found !\n");
2021     ret = -ENODEV;
2022     - goto out_iounmap;
2023     + goto out_release_io;
2024     }
2025    
2026     /* set platform driver data */
2027     @@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
2028     swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
2029     if (!swd) {
2030     ret = -ENOMEM;
2031     - goto out_iounmap;
2032     + goto out_release_io;
2033     }
2034     platform_set_drvdata(dev, swd);
2035    
2036     @@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
2037    
2038     out_kfree:
2039     kfree(swd);
2040     -out_iounmap:
2041     - iounmap(swim_base);
2042     out_release_io:
2043     release_mem_region(res->start, resource_size(res));
2044     out:
2045     @@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
2046     for (drive = 0; drive < swd->floppy_count; drive++)
2047     floppy_eject(&swd->unit[drive]);
2048    
2049     - iounmap(swd->base);
2050     -
2051     res = platform_get_resource(dev, IORESOURCE_MEM, 0);
2052     if (res)
2053     release_mem_region(res->start, resource_size(res));
2054     diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
2055     index 9f931f8f6b4c..0d7527c6825a 100644
2056     --- a/drivers/block/swim3.c
2057     +++ b/drivers/block/swim3.c
2058     @@ -148,7 +148,7 @@ struct swim3 {
2059     #define MOTOR_ON 2
2060     #define RELAX 3 /* also eject in progress */
2061     #define READ_DATA_0 4
2062     -#define TWOMEG_DRIVE 5
2063     +#define ONEMEG_DRIVE 5
2064     #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
2065     #define DRIVE_PRESENT 7
2066     #define DISK_IN 8
2067     @@ -156,9 +156,9 @@ struct swim3 {
2068     #define TRACK_ZERO 10
2069     #define TACHO 11
2070     #define READ_DATA_1 12
2071     -#define MFM_MODE 13
2072     +#define GCR_MODE 13
2073     #define SEEK_COMPLETE 14
2074     -#define ONEMEG_MEDIA 15
2075     +#define TWOMEG_MEDIA 15
2076    
2077     /* Definitions of values used in writing and formatting */
2078     #define DATA_ESCAPE 0x99
2079     diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
2080     index e36d160c458f..5f7d86509f2f 100644
2081     --- a/drivers/cdrom/cdrom.c
2082     +++ b/drivers/cdrom/cdrom.c
2083     @@ -2374,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2084     if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2085     return media_changed(cdi, 1);
2086    
2087     - if ((unsigned int)arg >= cdi->capacity)
2088     + if (arg >= cdi->capacity)
2089     return -EINVAL;
2090    
2091     info = kmalloc(sizeof(*info), GFP_KERNEL);
2092     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
2093     index 1d01a8f77db1..dba5259def60 100644
2094     --- a/drivers/char/tpm/tpm-interface.c
2095     +++ b/drivers/char/tpm/tpm-interface.c
2096     @@ -369,20 +369,40 @@ static int tpm_validate_command(struct tpm_chip *chip,
2097     return -EINVAL;
2098     }
2099    
2100     -/**
2101     - * tmp_transmit - Internal kernel interface to transmit TPM commands.
2102     - *
2103     - * @chip: TPM chip to use
2104     - * @buf: TPM command buffer
2105     - * @bufsiz: length of the TPM command buffer
2106     - * @flags: tpm transmit flags - bitmap
2107     - *
2108     - * Return:
2109     - * 0 when the operation is successful.
2110     - * A negative number for system errors (errno).
2111     - */
2112     -ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
2113     - u8 *buf, size_t bufsiz, unsigned int flags)
2114     +static int tpm_request_locality(struct tpm_chip *chip)
2115     +{
2116     + int rc;
2117     +
2118     + if (!chip->ops->request_locality)
2119     + return 0;
2120     +
2121     + rc = chip->ops->request_locality(chip, 0);
2122     + if (rc < 0)
2123     + return rc;
2124     +
2125     + chip->locality = rc;
2126     +
2127     + return 0;
2128     +}
2129     +
2130     +static void tpm_relinquish_locality(struct tpm_chip *chip)
2131     +{
2132     + int rc;
2133     +
2134     + if (!chip->ops->relinquish_locality)
2135     + return;
2136     +
2137     + rc = chip->ops->relinquish_locality(chip, chip->locality);
2138     + if (rc)
2139     + dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
2140     +
2141     + chip->locality = -1;
2142     +}
2143     +
2144     +static ssize_t tpm_try_transmit(struct tpm_chip *chip,
2145     + struct tpm_space *space,
2146     + u8 *buf, size_t bufsiz,
2147     + unsigned int flags)
2148     {
2149     struct tpm_output_header *header = (void *)buf;
2150     int rc;
2151     @@ -422,8 +442,6 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
2152     if (!(flags & TPM_TRANSMIT_UNLOCKED))
2153     mutex_lock(&chip->tpm_mutex);
2154    
2155     - if (chip->dev.parent)
2156     - pm_runtime_get_sync(chip->dev.parent);
2157    
2158     if (chip->ops->clk_enable != NULL)
2159     chip->ops->clk_enable(chip, true);
2160     @@ -431,14 +449,15 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
2161     /* Store the decision as chip->locality will be changed. */
2162     need_locality = chip->locality == -1;
2163    
2164     - if (!(flags & TPM_TRANSMIT_RAW) &&
2165     - need_locality && chip->ops->request_locality) {
2166     - rc = chip->ops->request_locality(chip, 0);
2167     + if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
2168     + rc = tpm_request_locality(chip);
2169     if (rc < 0)
2170     goto out_no_locality;
2171     - chip->locality = rc;
2172     }
2173    
2174     + if (chip->dev.parent)
2175     + pm_runtime_get_sync(chip->dev.parent);
2176     +
2177     rc = tpm2_prepare_space(chip, space, ordinal, buf);
2178     if (rc)
2179     goto out;
2180     @@ -499,27 +518,83 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
2181     rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
2182    
2183     out:
2184     - if (need_locality && chip->ops->relinquish_locality) {
2185     - chip->ops->relinquish_locality(chip, chip->locality);
2186     - chip->locality = -1;
2187     - }
2188     + if (chip->dev.parent)
2189     + pm_runtime_put_sync(chip->dev.parent);
2190     +
2191     + if (need_locality)
2192     + tpm_relinquish_locality(chip);
2193     +
2194     out_no_locality:
2195     if (chip->ops->clk_enable != NULL)
2196     chip->ops->clk_enable(chip, false);
2197    
2198     - if (chip->dev.parent)
2199     - pm_runtime_put_sync(chip->dev.parent);
2200     -
2201     if (!(flags & TPM_TRANSMIT_UNLOCKED))
2202     mutex_unlock(&chip->tpm_mutex);
2203     return rc ? rc : len;
2204     }
2205    
2206     /**
2207     - * tmp_transmit_cmd - send a tpm command to the device
2208     + * tpm_transmit - Internal kernel interface to transmit TPM commands.
2209     + *
2210     + * @chip: TPM chip to use
2211     + * @space: tpm space
2212     + * @buf: TPM command buffer
2213     + * @bufsiz: length of the TPM command buffer
2214     + * @flags: tpm transmit flags - bitmap
2215     + *
2216     + * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY
2217     + * returns from the TPM and retransmits the command after a delay up
2218     + * to a maximum wait of TPM2_DURATION_LONG.
2219     + *
2220     + * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2
2221     + * only
2222     + *
2223     + * Return:
2224     + * the length of the return when the operation is successful.
2225     + * A negative number for system errors (errno).
2226     + */
2227     +ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
2228     + u8 *buf, size_t bufsiz, unsigned int flags)
2229     +{
2230     + struct tpm_output_header *header = (struct tpm_output_header *)buf;
2231     + /* space for header and handles */
2232     + u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
2233     + unsigned int delay_msec = TPM2_DURATION_SHORT;
2234     + u32 rc = 0;
2235     + ssize_t ret;
2236     + const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE,
2237     + bufsiz);
2238     +
2239     + /*
2240     + * Subtlety here: if we have a space, the handles will be
2241     + * transformed, so when we restore the header we also have to
2242     + * restore the handles.
2243     + */
2244     + memcpy(save, buf, save_size);
2245     +
2246     + for (;;) {
2247     + ret = tpm_try_transmit(chip, space, buf, bufsiz, flags);
2248     + if (ret < 0)
2249     + break;
2250     + rc = be32_to_cpu(header->return_code);
2251     + if (rc != TPM2_RC_RETRY)
2252     + break;
2253     + delay_msec *= 2;
2254     + if (delay_msec > TPM2_DURATION_LONG) {
2255     + dev_err(&chip->dev, "TPM is in retry loop\n");
2256     + break;
2257     + }
2258     + tpm_msleep(delay_msec);
2259     + memcpy(buf, save, save_size);
2260     + }
2261     + return ret;
2262     +}
2263     +/**
2264     + * tpm_transmit_cmd - send a tpm command to the device
2265     * The function extracts tpm out header return code
2266     *
2267     * @chip: TPM chip to use
2268     + * @space: tpm space
2269     * @buf: TPM command buffer
2270     * @bufsiz: length of the buffer
2271     * @min_rsp_body_length: minimum expected length of response body
2272     diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
2273     index 0b5b499f726a..b83b30a3eea5 100644
2274     --- a/drivers/char/tpm/tpm.h
2275     +++ b/drivers/char/tpm/tpm.h
2276     @@ -106,6 +106,7 @@ enum tpm2_return_codes {
2277     TPM2_RC_COMMAND_CODE = 0x0143,
2278     TPM2_RC_TESTING = 0x090A, /* RC_WARN */
2279     TPM2_RC_REFERENCE_H0 = 0x0910,
2280     + TPM2_RC_RETRY = 0x0922,
2281     };
2282    
2283     enum tpm2_algorithms {
2284     diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
2285     index 8f0a98dea327..bb756ad7897e 100644
2286     --- a/drivers/char/tpm/tpm_crb.c
2287     +++ b/drivers/char/tpm/tpm_crb.c
2288     @@ -117,6 +117,25 @@ struct tpm2_crb_smc {
2289     u32 smc_func_id;
2290     };
2291    
2292     +static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
2293     + unsigned long timeout)
2294     +{
2295     + ktime_t start;
2296     + ktime_t stop;
2297     +
2298     + start = ktime_get();
2299     + stop = ktime_add(start, ms_to_ktime(timeout));
2300     +
2301     + do {
2302     + if ((ioread32(reg) & mask) == value)
2303     + return true;
2304     +
2305     + usleep_range(50, 100);
2306     + } while (ktime_before(ktime_get(), stop));
2307     +
2308     + return ((ioread32(reg) & mask) == value);
2309     +}
2310     +
2311     /**
2312     * crb_go_idle - request tpm crb device to go the idle state
2313     *
2314     @@ -132,37 +151,24 @@ struct tpm2_crb_smc {
2315     *
2316     * Return: 0 always
2317     */
2318     -static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
2319     +static int crb_go_idle(struct device *dev, struct crb_priv *priv)
2320     {
2321     if ((priv->flags & CRB_FL_ACPI_START) ||
2322     (priv->flags & CRB_FL_CRB_SMC_START))
2323     return 0;
2324    
2325     iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
2326     - /* we don't really care when this settles */
2327    
2328     + if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
2329     + CRB_CTRL_REQ_GO_IDLE/* mask */,
2330     + 0, /* value */
2331     + TPM2_TIMEOUT_C)) {
2332     + dev_warn(dev, "goIdle timed out\n");
2333     + return -ETIME;
2334     + }
2335     return 0;
2336     }
2337    
2338     -static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
2339     - unsigned long timeout)
2340     -{
2341     - ktime_t start;
2342     - ktime_t stop;
2343     -
2344     - start = ktime_get();
2345     - stop = ktime_add(start, ms_to_ktime(timeout));
2346     -
2347     - do {
2348     - if ((ioread32(reg) & mask) == value)
2349     - return true;
2350     -
2351     - usleep_range(50, 100);
2352     - } while (ktime_before(ktime_get(), stop));
2353     -
2354     - return false;
2355     -}
2356     -
2357     /**
2358     * crb_cmd_ready - request tpm crb device to enter ready state
2359     *
2360     @@ -177,8 +183,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
2361     *
2362     * Return: 0 on success -ETIME on timeout;
2363     */
2364     -static int __maybe_unused crb_cmd_ready(struct device *dev,
2365     - struct crb_priv *priv)
2366     +static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
2367     {
2368     if ((priv->flags & CRB_FL_ACPI_START) ||
2369     (priv->flags & CRB_FL_CRB_SMC_START))
2370     @@ -196,11 +201,11 @@ static int __maybe_unused crb_cmd_ready(struct device *dev,
2371     return 0;
2372     }
2373    
2374     -static int crb_request_locality(struct tpm_chip *chip, int loc)
2375     +static int __crb_request_locality(struct device *dev,
2376     + struct crb_priv *priv, int loc)
2377     {
2378     - struct crb_priv *priv = dev_get_drvdata(&chip->dev);
2379     u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
2380     - CRB_LOC_STATE_TPM_REG_VALID_STS;
2381     + CRB_LOC_STATE_TPM_REG_VALID_STS;
2382    
2383     if (!priv->regs_h)
2384     return 0;
2385     @@ -208,21 +213,45 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
2386     iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
2387     if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
2388     TPM2_TIMEOUT_C)) {
2389     - dev_warn(&chip->dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
2390     + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
2391     return -ETIME;
2392     }
2393    
2394     return 0;
2395     }
2396    
2397     -static void crb_relinquish_locality(struct tpm_chip *chip, int loc)
2398     +static int crb_request_locality(struct tpm_chip *chip, int loc)
2399     {
2400     struct crb_priv *priv = dev_get_drvdata(&chip->dev);
2401    
2402     + return __crb_request_locality(&chip->dev, priv, loc);
2403     +}
2404     +
2405     +static int __crb_relinquish_locality(struct device *dev,
2406     + struct crb_priv *priv, int loc)
2407     +{
2408     + u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
2409     + CRB_LOC_STATE_TPM_REG_VALID_STS;
2410     + u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
2411     +
2412     if (!priv->regs_h)
2413     - return;
2414     + return 0;
2415    
2416     iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
2417     + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
2418     + TPM2_TIMEOUT_C)) {
2419     + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
2420     + return -ETIME;
2421     + }
2422     +
2423     + return 0;
2424     +}
2425     +
2426     +static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
2427     +{
2428     + struct crb_priv *priv = dev_get_drvdata(&chip->dev);
2429     +
2430     + return __crb_relinquish_locality(&chip->dev, priv, loc);
2431     }
2432    
2433     static u8 crb_status(struct tpm_chip *chip)
2434     @@ -466,6 +495,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
2435     dev_warn(dev, FW_BUG "Bad ACPI memory layout");
2436     }
2437    
2438     + ret = __crb_request_locality(dev, priv, 0);
2439     + if (ret)
2440     + return ret;
2441     +
2442     priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
2443     sizeof(struct crb_regs_tail));
2444     if (IS_ERR(priv->regs_t))
2445     @@ -522,6 +555,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
2446    
2447     crb_go_idle(dev, priv);
2448    
2449     + __crb_relinquish_locality(dev, priv, 0);
2450     +
2451     return ret;
2452     }
2453    
2454     @@ -589,10 +624,14 @@ static int crb_acpi_add(struct acpi_device *device)
2455     chip->acpi_dev_handle = device->handle;
2456     chip->flags = TPM_CHIP_FLAG_TPM2;
2457    
2458     - rc = crb_cmd_ready(dev, priv);
2459     + rc = __crb_request_locality(dev, priv, 0);
2460     if (rc)
2461     return rc;
2462    
2463     + rc = crb_cmd_ready(dev, priv);
2464     + if (rc)
2465     + goto out;
2466     +
2467     pm_runtime_get_noresume(dev);
2468     pm_runtime_set_active(dev);
2469     pm_runtime_enable(dev);
2470     @@ -602,12 +641,15 @@ static int crb_acpi_add(struct acpi_device *device)
2471     crb_go_idle(dev, priv);
2472     pm_runtime_put_noidle(dev);
2473     pm_runtime_disable(dev);
2474     - return rc;
2475     + goto out;
2476     }
2477    
2478     - pm_runtime_put(dev);
2479     + pm_runtime_put_sync(dev);
2480    
2481     - return 0;
2482     +out:
2483     + __crb_relinquish_locality(dev, priv, 0);
2484     +
2485     + return rc;
2486     }
2487    
2488     static int crb_acpi_remove(struct acpi_device *device)
2489     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
2490     index a21e31c2b952..58123df6b5f6 100644
2491     --- a/drivers/char/tpm/tpm_tis_core.c
2492     +++ b/drivers/char/tpm/tpm_tis_core.c
2493     @@ -77,11 +77,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
2494     return false;
2495     }
2496    
2497     -static void release_locality(struct tpm_chip *chip, int l)
2498     +static int release_locality(struct tpm_chip *chip, int l)
2499     {
2500     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
2501    
2502     tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
2503     +
2504     + return 0;
2505     }
2506    
2507     static int request_locality(struct tpm_chip *chip, int l)
2508     diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
2509     index bf14214fa464..4db31b89507c 100644
2510     --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
2511     +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
2512     @@ -1634,6 +1634,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
2513     * (and possibly on the platform). So far only i.MX6Q (v1.30a) and
2514     * i.MX6DL (v1.31a) have been identified as needing the workaround, with
2515     * 4 and 1 iterations respectively.
2516     + * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
2517     + * the workaround with a single iteration.
2518     */
2519    
2520     switch (hdmi->version) {
2521     @@ -1641,6 +1643,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
2522     count = 4;
2523     break;
2524     case 0x131a:
2525     + case 0x201a:
2526     count = 1;
2527     break;
2528     default:
2529     diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
2530     index 345f6035599e..f1d93676b0fc 100644
2531     --- a/drivers/message/fusion/mptsas.c
2532     +++ b/drivers/message/fusion/mptsas.c
2533     @@ -1995,6 +1995,7 @@ static struct scsi_host_template mptsas_driver_template = {
2534     .cmd_per_lun = 7,
2535     .use_clustering = ENABLE_CLUSTERING,
2536     .shost_attrs = mptscsih_host_attrs,
2537     + .no_write_same = 1,
2538     };
2539    
2540     static int mptsas_get_linkerrors(struct sas_phy *phy)
2541     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2542     index 82f28ffccddf..bf3be2e6d4a8 100644
2543     --- a/drivers/net/bonding/bond_main.c
2544     +++ b/drivers/net/bonding/bond_main.c
2545     @@ -1656,8 +1656,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2546     } /* switch(bond_mode) */
2547    
2548     #ifdef CONFIG_NET_POLL_CONTROLLER
2549     - slave_dev->npinfo = bond->dev->npinfo;
2550     - if (slave_dev->npinfo) {
2551     + if (bond->dev->npinfo) {
2552     if (slave_enable_netpoll(new_slave)) {
2553     netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2554     res = -EBUSY;
2555     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
2556     index 7ea72ef11a55..d272dc6984ac 100644
2557     --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
2558     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
2559     @@ -1321,6 +1321,10 @@
2560     #define MDIO_VEND2_AN_STAT 0x8002
2561     #endif
2562    
2563     +#ifndef MDIO_VEND2_PMA_CDR_CONTROL
2564     +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
2565     +#endif
2566     +
2567     #ifndef MDIO_CTRL1_SPEED1G
2568     #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
2569     #endif
2570     @@ -1369,6 +1373,10 @@
2571     #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
2572     #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
2573    
2574     +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
2575     +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
2576     +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
2577     +
2578     /* Bit setting and getting macros
2579     * The get macro will extract the current bit field value from within
2580     * the variable
2581     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
2582     index 7d128be61310..b91143947ed2 100644
2583     --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
2584     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
2585     @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
2586     "debugfs_create_file failed\n");
2587     }
2588    
2589     + if (pdata->vdata->an_cdr_workaround) {
2590     + pfile = debugfs_create_bool("an_cdr_workaround", 0600,
2591     + pdata->xgbe_debugfs,
2592     + &pdata->debugfs_an_cdr_workaround);
2593     + if (!pfile)
2594     + netdev_err(pdata->netdev,
2595     + "debugfs_create_bool failed\n");
2596     +
2597     + pfile = debugfs_create_bool("an_cdr_track_early", 0600,
2598     + pdata->xgbe_debugfs,
2599     + &pdata->debugfs_an_cdr_track_early);
2600     + if (!pfile)
2601     + netdev_err(pdata->netdev,
2602     + "debugfs_create_bool failed\n");
2603     + }
2604     +
2605     kfree(buf);
2606     }
2607    
2608     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
2609     index d91fa595be98..e31d9d1fb6a6 100644
2610     --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
2611     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
2612     @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
2613     XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
2614    
2615     /* Call MDIO/PHY initialization routine */
2616     + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
2617     ret = pdata->phy_if.phy_init(pdata);
2618     if (ret)
2619     return ret;
2620     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2621     index 072b9f664597..1b45cd73a258 100644
2622     --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2623     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
2624     @@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
2625     xgbe_an73_set(pdata, false, false);
2626     xgbe_an73_disable_interrupts(pdata);
2627    
2628     + pdata->an_start = 0;
2629     +
2630     netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
2631     }
2632    
2633     static void xgbe_an_restart(struct xgbe_prv_data *pdata)
2634     {
2635     + if (pdata->phy_if.phy_impl.an_pre)
2636     + pdata->phy_if.phy_impl.an_pre(pdata);
2637     +
2638     switch (pdata->an_mode) {
2639     case XGBE_AN_MODE_CL73:
2640     case XGBE_AN_MODE_CL73_REDRV:
2641     @@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
2642    
2643     static void xgbe_an_disable(struct xgbe_prv_data *pdata)
2644     {
2645     + if (pdata->phy_if.phy_impl.an_post)
2646     + pdata->phy_if.phy_impl.an_post(pdata);
2647     +
2648     switch (pdata->an_mode) {
2649     case XGBE_AN_MODE_CL73:
2650     case XGBE_AN_MODE_CL73_REDRV:
2651     @@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
2652     XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
2653     reg);
2654    
2655     - if (pdata->phy_if.phy_impl.kr_training_post)
2656     - pdata->phy_if.phy_impl.kr_training_post(pdata);
2657     -
2658     netif_dbg(pdata, link, pdata->netdev,
2659     "KR training initiated\n");
2660     +
2661     + if (pdata->phy_if.phy_impl.kr_training_post)
2662     + pdata->phy_if.phy_impl.kr_training_post(pdata);
2663     }
2664    
2665     return XGBE_AN_PAGE_RECEIVED;
2666     @@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
2667     return XGBE_AN_NO_LINK;
2668     }
2669    
2670     - xgbe_an73_disable(pdata);
2671     + xgbe_an_disable(pdata);
2672    
2673     xgbe_switch_mode(pdata);
2674    
2675     - xgbe_an73_restart(pdata);
2676     + xgbe_an_restart(pdata);
2677    
2678     return XGBE_AN_INCOMPAT_LINK;
2679     }
2680     @@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
2681     pdata->an_result = pdata->an_state;
2682     pdata->an_state = XGBE_AN_READY;
2683    
2684     + if (pdata->phy_if.phy_impl.an_post)
2685     + pdata->phy_if.phy_impl.an_post(pdata);
2686     +
2687     netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
2688     xgbe_state_as_string(pdata->an_result));
2689     }
2690     @@ -903,6 +914,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
2691     pdata->kx_state = XGBE_RX_BPA;
2692     pdata->an_start = 0;
2693    
2694     + if (pdata->phy_if.phy_impl.an_post)
2695     + pdata->phy_if.phy_impl.an_post(pdata);
2696     +
2697     netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
2698     xgbe_state_as_string(pdata->an_result));
2699     }
2700     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2701     index eb23f9ba1a9a..82d1f416ee2a 100644
2702     --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2703     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2704     @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
2705     .irq_reissue_support = 1,
2706     .tx_desc_prefetch = 5,
2707     .rx_desc_prefetch = 5,
2708     + .an_cdr_workaround = 1,
2709     };
2710    
2711     static const struct xgbe_version_data xgbe_v2b = {
2712     @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
2713     .irq_reissue_support = 1,
2714     .tx_desc_prefetch = 5,
2715     .rx_desc_prefetch = 5,
2716     + .an_cdr_workaround = 1,
2717     };
2718    
2719     static const struct pci_device_id xgbe_pci_table[] = {
2720     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
2721     index 3304a291aa96..aac884314000 100644
2722     --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
2723     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
2724     @@ -147,6 +147,14 @@
2725     /* Rate-change complete wait/retry count */
2726     #define XGBE_RATECHANGE_COUNT 500
2727    
2728     +/* CDR delay values for KR support (in usec) */
2729     +#define XGBE_CDR_DELAY_INIT 10000
2730     +#define XGBE_CDR_DELAY_INC 10000
2731     +#define XGBE_CDR_DELAY_MAX 100000
2732     +
2733     +/* RRC frequency during link status check */
2734     +#define XGBE_RRC_FREQUENCY 10
2735     +
2736     enum xgbe_port_mode {
2737     XGBE_PORT_MODE_RSVD = 0,
2738     XGBE_PORT_MODE_BACKPLANE,
2739     @@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
2740     #define XGBE_SFP_BASE_VENDOR_SN 4
2741     #define XGBE_SFP_BASE_VENDOR_SN_LEN 16
2742    
2743     +#define XGBE_SFP_EXTD_OPT1 1
2744     +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
2745     +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
2746     +
2747     #define XGBE_SFP_EXTD_DIAG 28
2748     #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
2749    
2750     @@ -324,6 +336,7 @@ struct xgbe_phy_data {
2751    
2752     unsigned int sfp_gpio_address;
2753     unsigned int sfp_gpio_mask;
2754     + unsigned int sfp_gpio_inputs;
2755     unsigned int sfp_gpio_rx_los;
2756     unsigned int sfp_gpio_tx_fault;
2757     unsigned int sfp_gpio_mod_absent;
2758     @@ -355,6 +368,10 @@ struct xgbe_phy_data {
2759     unsigned int redrv_addr;
2760     unsigned int redrv_lane;
2761     unsigned int redrv_model;
2762     +
2763     + /* KR AN support */
2764     + unsigned int phy_cdr_notrack;
2765     + unsigned int phy_cdr_delay;
2766     };
2767    
2768     /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
2769     @@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
2770     phy_data->sfp_phy_avail = 1;
2771     }
2772    
2773     +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
2774     +{
2775     + u8 *sfp_extd = phy_data->sfp_eeprom.extd;
2776     +
2777     + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
2778     + return false;
2779     +
2780     + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
2781     + return false;
2782     +
2783     + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
2784     + return true;
2785     +
2786     + return false;
2787     +}
2788     +
2789     +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
2790     +{
2791     + u8 *sfp_extd = phy_data->sfp_eeprom.extd;
2792     +
2793     + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
2794     + return false;
2795     +
2796     + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
2797     + return false;
2798     +
2799     + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
2800     + return true;
2801     +
2802     + return false;
2803     +}
2804     +
2805     +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
2806     +{
2807     + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
2808     + return false;
2809     +
2810     + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
2811     + return true;
2812     +
2813     + return false;
2814     +}
2815     +
2816     static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
2817     {
2818     struct xgbe_phy_data *phy_data = pdata->phy_data;
2819     @@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
2820     if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
2821     return;
2822    
2823     + /* Update transceiver signals (eeprom extd/options) */
2824     + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
2825     + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
2826     +
2827     if (xgbe_phy_sfp_parse_quirks(pdata))
2828     return;
2829    
2830     @@ -1184,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
2831     static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
2832     {
2833     struct xgbe_phy_data *phy_data = pdata->phy_data;
2834     - unsigned int gpio_input;
2835     u8 gpio_reg, gpio_ports[2];
2836     int ret;
2837    
2838     @@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
2839     return;
2840     }
2841    
2842     - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
2843     -
2844     - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
2845     - /* No GPIO, just assume the module is present for now */
2846     - phy_data->sfp_mod_absent = 0;
2847     - } else {
2848     - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
2849     - phy_data->sfp_mod_absent = 0;
2850     - }
2851     -
2852     - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
2853     - (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
2854     - phy_data->sfp_rx_los = 1;
2855     + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
2856    
2857     - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
2858     - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
2859     - phy_data->sfp_tx_fault = 1;
2860     + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
2861     }
2862    
2863     static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
2864     @@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
2865     return 1;
2866    
2867     /* No link, attempt a receiver reset cycle */
2868     - if (phy_data->rrc_count++) {
2869     + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
2870     phy_data->rrc_count = 0;
2871     xgbe_phy_rrc(pdata);
2872     }
2873     @@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
2874     return true;
2875     }
2876    
2877     +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
2878     +{
2879     + struct xgbe_phy_data *phy_data = pdata->phy_data;
2880     +
2881     + if (!pdata->debugfs_an_cdr_workaround)
2882     + return;
2883     +
2884     + if (!phy_data->phy_cdr_notrack)
2885     + return;
2886     +
2887     + usleep_range(phy_data->phy_cdr_delay,
2888     + phy_data->phy_cdr_delay + 500);
2889     +
2890     + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2891     + XGBE_PMA_CDR_TRACK_EN_MASK,
2892     + XGBE_PMA_CDR_TRACK_EN_ON);
2893     +
2894     + phy_data->phy_cdr_notrack = 0;
2895     +}
2896     +
2897     +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
2898     +{
2899     + struct xgbe_phy_data *phy_data = pdata->phy_data;
2900     +
2901     + if (!pdata->debugfs_an_cdr_workaround)
2902     + return;
2903     +
2904     + if (phy_data->phy_cdr_notrack)
2905     + return;
2906     +
2907     + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2908     + XGBE_PMA_CDR_TRACK_EN_MASK,
2909     + XGBE_PMA_CDR_TRACK_EN_OFF);
2910     +
2911     + xgbe_phy_rrc(pdata);
2912     +
2913     + phy_data->phy_cdr_notrack = 1;
2914     +}
2915     +
2916     +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
2917     +{
2918     + if (!pdata->debugfs_an_cdr_track_early)
2919     + xgbe_phy_cdr_track(pdata);
2920     +}
2921     +
2922     +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
2923     +{
2924     + if (pdata->debugfs_an_cdr_track_early)
2925     + xgbe_phy_cdr_track(pdata);
2926     +}
2927     +
2928     +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
2929     +{
2930     + struct xgbe_phy_data *phy_data = pdata->phy_data;
2931     +
2932     + switch (pdata->an_mode) {
2933     + case XGBE_AN_MODE_CL73:
2934     + case XGBE_AN_MODE_CL73_REDRV:
2935     + if (phy_data->cur_mode != XGBE_MODE_KR)
2936     + break;
2937     +
2938     + xgbe_phy_cdr_track(pdata);
2939     +
2940     + switch (pdata->an_result) {
2941     + case XGBE_AN_READY:
2942     + case XGBE_AN_COMPLETE:
2943     + break;
2944     + default:
2945     + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
2946     + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
2947     + else
2948     + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
2949     + break;
2950     + }
2951     + break;
2952     + default:
2953     + break;
2954     + }
2955     +}
2956     +
2957     +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
2958     +{
2959     + struct xgbe_phy_data *phy_data = pdata->phy_data;
2960     +
2961     + switch (pdata->an_mode) {
2962     + case XGBE_AN_MODE_CL73:
2963     + case XGBE_AN_MODE_CL73_REDRV:
2964     + if (phy_data->cur_mode != XGBE_MODE_KR)
2965     + break;
2966     +
2967     + xgbe_phy_cdr_notrack(pdata);
2968     + break;
2969     + default:
2970     + break;
2971     + }
2972     +}
2973     +
2974     static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2975     {
2976     struct xgbe_phy_data *phy_data = pdata->phy_data;
2977     @@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2978     xgbe_phy_sfp_reset(phy_data);
2979     xgbe_phy_sfp_mod_absent(pdata);
2980    
2981     + /* Reset CDR support */
2982     + xgbe_phy_cdr_track(pdata);
2983     +
2984     /* Power off the PHY */
2985     xgbe_phy_power_off(pdata);
2986    
2987     @@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
2988     /* Start in highest supported mode */
2989     xgbe_phy_set_mode(pdata, phy_data->start_mode);
2990    
2991     + /* Reset CDR support */
2992     + xgbe_phy_cdr_track(pdata);
2993     +
2994     /* After starting the I2C controller, we can check for an SFP */
2995     switch (phy_data->port_mode) {
2996     case XGBE_PORT_MODE_SFP:
2997     @@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
2998     }
2999     }
3000    
3001     + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
3002     +
3003     /* Register for driving external PHYs */
3004     mii = devm_mdiobus_alloc(pdata->dev);
3005     if (!mii) {
3006     @@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
3007     phy_impl->an_advertising = xgbe_phy_an_advertising;
3008    
3009     phy_impl->an_outcome = xgbe_phy_an_outcome;
3010     +
3011     + phy_impl->an_pre = xgbe_phy_an_pre;
3012     + phy_impl->an_post = xgbe_phy_an_post;
3013     +
3014     + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
3015     + phy_impl->kr_training_post = xgbe_phy_kr_training_post;
3016     }
3017     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
3018     index ad102c8bac7b..95d4b56448c6 100644
3019     --- a/drivers/net/ethernet/amd/xgbe/xgbe.h
3020     +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
3021     @@ -833,6 +833,7 @@ struct xgbe_hw_if {
3022     /* This structure represents implementation specific routines for an
3023     * implementation of a PHY. All routines are required unless noted below.
3024     * Optional routines:
3025     + * an_pre, an_post
3026     * kr_training_pre, kr_training_post
3027     */
3028     struct xgbe_phy_impl_if {
3029     @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
3030     /* Process results of auto-negotiation */
3031     enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
3032    
3033     + /* Pre/Post auto-negotiation support */
3034     + void (*an_pre)(struct xgbe_prv_data *);
3035     + void (*an_post)(struct xgbe_prv_data *);
3036     +
3037     /* Pre/Post KR training enablement support */
3038     void (*kr_training_pre)(struct xgbe_prv_data *);
3039     void (*kr_training_post)(struct xgbe_prv_data *);
3040     @@ -989,6 +994,7 @@ struct xgbe_version_data {
3041     unsigned int irq_reissue_support;
3042     unsigned int tx_desc_prefetch;
3043     unsigned int rx_desc_prefetch;
3044     + unsigned int an_cdr_workaround;
3045     };
3046    
3047     struct xgbe_vxlan_data {
3048     @@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
3049     unsigned int debugfs_xprop_reg;
3050    
3051     unsigned int debugfs_xi2c_reg;
3052     +
3053     + bool debugfs_an_cdr_workaround;
3054     + bool debugfs_an_cdr_track_early;
3055     };
3056    
3057     /* Function prototypes*/
3058     diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3059     index e368b0237a1b..4a85a24ced1c 100644
3060     --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3061     +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
3062     @@ -2781,6 +2781,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3063     int ret = 0;
3064     struct hlist_node *h;
3065     int bkt;
3066     + u8 i;
3067    
3068     /* validate the request */
3069     if (vf_id >= pf->num_alloc_vfs) {
3070     @@ -2792,6 +2793,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3071    
3072     vf = &(pf->vf[vf_id]);
3073     vsi = pf->vsi[vf->lan_vsi_idx];
3074     +
3075     + /* When the VF is resetting wait until it is done.
3076     + * It can take up to 200 milliseconds,
3077     + * but wait for up to 300 milliseconds to be safe.
3078     + */
3079     + for (i = 0; i < 15; i++) {
3080     + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3081     + break;
3082     + msleep(20);
3083     + }
3084     if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3085     dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3086     vf_id);
3087     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
3088     index a5bb7b19040e..992c43b1868f 100644
3089     --- a/drivers/net/ethernet/ti/cpsw.c
3090     +++ b/drivers/net/ethernet/ti/cpsw.c
3091     @@ -124,7 +124,7 @@ do { \
3092    
3093     #define RX_PRIORITY_MAPPING 0x76543210
3094     #define TX_PRIORITY_MAPPING 0x33221100
3095     -#define CPDMA_TX_PRIORITY_MAP 0x01234567
3096     +#define CPDMA_TX_PRIORITY_MAP 0x76543210
3097    
3098     #define CPSW_VLAN_AWARE BIT(1)
3099     #define CPSW_ALE_VLAN_AWARE 1
3100     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
3101     index 5aa59f41bf8c..71e2aef6b7a1 100644
3102     --- a/drivers/net/ppp/pppoe.c
3103     +++ b/drivers/net/ppp/pppoe.c
3104     @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
3105     lock_sock(sk);
3106    
3107     error = -EINVAL;
3108     +
3109     + if (sockaddr_len != sizeof(struct sockaddr_pppox))
3110     + goto end;
3111     +
3112     if (sp->sa_protocol != PX_PROTO_OE)
3113     goto end;
3114    
3115     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
3116     index 2a366554c503..8a222ae5950e 100644
3117     --- a/drivers/net/team/team.c
3118     +++ b/drivers/net/team/team.c
3119     @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
3120     }
3121     }
3122    
3123     +static bool __team_option_inst_tmp_find(const struct list_head *opts,
3124     + const struct team_option_inst *needle)
3125     +{
3126     + struct team_option_inst *opt_inst;
3127     +
3128     + list_for_each_entry(opt_inst, opts, tmp_list)
3129     + if (opt_inst == needle)
3130     + return true;
3131     + return false;
3132     +}
3133     +
3134     static int __team_options_register(struct team *team,
3135     const struct team_option *option,
3136     size_t option_count)
3137     @@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
3138     }
3139    
3140     #ifdef CONFIG_NET_POLL_CONTROLLER
3141     -static int team_port_enable_netpoll(struct team *team, struct team_port *port)
3142     +static int __team_port_enable_netpoll(struct team_port *port)
3143     {
3144     struct netpoll *np;
3145     int err;
3146    
3147     - if (!team->dev->npinfo)
3148     - return 0;
3149     -
3150     np = kzalloc(sizeof(*np), GFP_KERNEL);
3151     if (!np)
3152     return -ENOMEM;
3153     @@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
3154     return err;
3155     }
3156    
3157     +static int team_port_enable_netpoll(struct team_port *port)
3158     +{
3159     + if (!port->team->dev->npinfo)
3160     + return 0;
3161     +
3162     + return __team_port_enable_netpoll(port);
3163     +}
3164     +
3165     static void team_port_disable_netpoll(struct team_port *port)
3166     {
3167     struct netpoll *np = port->np;
3168     @@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
3169     kfree(np);
3170     }
3171     #else
3172     -static int team_port_enable_netpoll(struct team *team, struct team_port *port)
3173     +static int team_port_enable_netpoll(struct team_port *port)
3174     {
3175     return 0;
3176     }
3177     @@ -1204,7 +1220,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
3178     goto err_vids_add;
3179     }
3180    
3181     - err = team_port_enable_netpoll(team, port);
3182     + err = team_port_enable_netpoll(port);
3183     if (err) {
3184     netdev_err(dev, "Failed to enable netpoll on device %s\n",
3185     portname);
3186     @@ -1901,7 +1917,7 @@ static int team_netpoll_setup(struct net_device *dev,
3187    
3188     mutex_lock(&team->lock);
3189     list_for_each_entry(port, &team->port_list, list) {
3190     - err = team_port_enable_netpoll(team, port);
3191     + err = __team_port_enable_netpoll(port);
3192     if (err) {
3193     __team_netpoll_cleanup(team);
3194     break;
3195     @@ -2561,6 +2577,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
3196     if (err)
3197     goto team_put;
3198     opt_inst->changed = true;
3199     +
3200     + /* dumb/evil user-space can send us duplicate opt,
3201     + * keep only the last one
3202     + */
3203     + if (__team_option_inst_tmp_find(&opt_inst_list,
3204     + opt_inst))
3205     + continue;
3206     +
3207     list_add(&opt_inst->tmp_list, &opt_inst_list);
3208     }
3209     if (!opt_found) {
3210     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
3211     index b0a038e6fda0..bb15b3012aa5 100644
3212     --- a/drivers/net/virtio_net.c
3213     +++ b/drivers/net/virtio_net.c
3214     @@ -116,6 +116,17 @@ struct receive_queue {
3215     char name[40];
3216     };
3217    
3218     +/* Control VQ buffers: protected by the rtnl lock */
3219     +struct control_buf {
3220     + struct virtio_net_ctrl_hdr hdr;
3221     + virtio_net_ctrl_ack status;
3222     + struct virtio_net_ctrl_mq mq;
3223     + u8 promisc;
3224     + u8 allmulti;
3225     + __virtio16 vid;
3226     + u64 offloads;
3227     +};
3228     +
3229     struct virtnet_info {
3230     struct virtio_device *vdev;
3231     struct virtqueue *cvq;
3232     @@ -164,14 +175,7 @@ struct virtnet_info {
3233     struct hlist_node node;
3234     struct hlist_node node_dead;
3235    
3236     - /* Control VQ buffers: protected by the rtnl lock */
3237     - struct virtio_net_ctrl_hdr ctrl_hdr;
3238     - virtio_net_ctrl_ack ctrl_status;
3239     - struct virtio_net_ctrl_mq ctrl_mq;
3240     - u8 ctrl_promisc;
3241     - u8 ctrl_allmulti;
3242     - u16 ctrl_vid;
3243     - u64 ctrl_offloads;
3244     + struct control_buf *ctrl;
3245    
3246     /* Ethtool settings */
3247     u8 duplex;
3248     @@ -1340,25 +1344,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3249     /* Caller should know better */
3250     BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
3251    
3252     - vi->ctrl_status = ~0;
3253     - vi->ctrl_hdr.class = class;
3254     - vi->ctrl_hdr.cmd = cmd;
3255     + vi->ctrl->status = ~0;
3256     + vi->ctrl->hdr.class = class;
3257     + vi->ctrl->hdr.cmd = cmd;
3258     /* Add header */
3259     - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
3260     + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
3261     sgs[out_num++] = &hdr;
3262    
3263     if (out)
3264     sgs[out_num++] = out;
3265    
3266     /* Add return status. */
3267     - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
3268     + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
3269     sgs[out_num] = &stat;
3270    
3271     BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
3272     virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
3273    
3274     if (unlikely(!virtqueue_kick(vi->cvq)))
3275     - return vi->ctrl_status == VIRTIO_NET_OK;
3276     + return vi->ctrl->status == VIRTIO_NET_OK;
3277    
3278     /* Spin for a response, the kick causes an ioport write, trapping
3279     * into the hypervisor, so the request should be handled immediately.
3280     @@ -1367,7 +1371,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3281     !virtqueue_is_broken(vi->cvq))
3282     cpu_relax();
3283    
3284     - return vi->ctrl_status == VIRTIO_NET_OK;
3285     + return vi->ctrl->status == VIRTIO_NET_OK;
3286     }
3287    
3288     static int virtnet_set_mac_address(struct net_device *dev, void *p)
3289     @@ -1478,8 +1482,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
3290     if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
3291     return 0;
3292    
3293     - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3294     - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
3295     + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3296     + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
3297    
3298     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3299     VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
3300     @@ -1537,22 +1541,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
3301     if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
3302     return;
3303    
3304     - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
3305     - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
3306     + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
3307     + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
3308    
3309     - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
3310     + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
3311    
3312     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3313     VIRTIO_NET_CTRL_RX_PROMISC, sg))
3314     dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
3315     - vi->ctrl_promisc ? "en" : "dis");
3316     + vi->ctrl->promisc ? "en" : "dis");
3317    
3318     - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
3319     + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
3320    
3321     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3322     VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
3323     dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
3324     - vi->ctrl_allmulti ? "en" : "dis");
3325     + vi->ctrl->allmulti ? "en" : "dis");
3326    
3327     uc_count = netdev_uc_count(dev);
3328     mc_count = netdev_mc_count(dev);
3329     @@ -1598,8 +1602,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
3330     struct virtnet_info *vi = netdev_priv(dev);
3331     struct scatterlist sg;
3332    
3333     - vi->ctrl_vid = vid;
3334     - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
3335     + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
3336     + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
3337    
3338     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3339     VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3340     @@ -1613,8 +1617,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3341     struct virtnet_info *vi = netdev_priv(dev);
3342     struct scatterlist sg;
3343    
3344     - vi->ctrl_vid = vid;
3345     - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
3346     + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
3347     + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
3348    
3349     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3350     VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3351     @@ -1912,9 +1916,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
3352     static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3353     {
3354     struct scatterlist sg;
3355     - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
3356     + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3357    
3358     - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
3359     + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3360    
3361     if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3362     VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3363     @@ -2134,6 +2138,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
3364    
3365     kfree(vi->rq);
3366     kfree(vi->sq);
3367     + kfree(vi->ctrl);
3368     }
3369    
3370     static void _free_receive_bufs(struct virtnet_info *vi)
3371     @@ -2326,6 +2331,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
3372     {
3373     int i;
3374    
3375     + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
3376     + if (!vi->ctrl)
3377     + goto err_ctrl;
3378     vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
3379     if (!vi->sq)
3380     goto err_sq;
3381     @@ -2351,6 +2359,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
3382     err_rq:
3383     kfree(vi->sq);
3384     err_sq:
3385     + kfree(vi->ctrl);
3386     +err_ctrl:
3387     return -ENOMEM;
3388     }
3389    
3390     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
3391     index 252c2206cbb5..c1772215702a 100644
3392     --- a/drivers/net/wireless/ath/ath10k/mac.c
3393     +++ b/drivers/net/wireless/ath/ath10k/mac.c
3394     @@ -5955,9 +5955,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3395     sta->addr, smps, err);
3396     }
3397    
3398     - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
3399     - changed & IEEE80211_RC_NSS_CHANGED) {
3400     - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
3401     + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
3402     + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
3403     sta->addr);
3404    
3405     err = ath10k_station_assoc(ar, arvif->vif, sta, true);
3406     diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
3407     index 72b4527d690f..71df0f70b61f 100644
3408     --- a/drivers/pinctrl/intel/pinctrl-intel.c
3409     +++ b/drivers/pinctrl/intel/pinctrl-intel.c
3410     @@ -427,18 +427,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
3411     writel(value, padcfg0);
3412     }
3413    
3414     -static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
3415     -{
3416     - u32 value;
3417     -
3418     - /* Put the pad into GPIO mode */
3419     - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
3420     - /* Disable SCI/SMI/NMI generation */
3421     - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
3422     - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
3423     - writel(value, padcfg0);
3424     -}
3425     -
3426     static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
3427     struct pinctrl_gpio_range *range,
3428     unsigned pin)
3429     @@ -446,6 +434,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
3430     struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
3431     void __iomem *padcfg0;
3432     unsigned long flags;
3433     + u32 value;
3434    
3435     raw_spin_lock_irqsave(&pctrl->lock, flags);
3436    
3437     @@ -455,7 +444,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
3438     }
3439    
3440     padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
3441     - intel_gpio_set_gpio_mode(padcfg0);
3442     + /* Put the pad into GPIO mode */
3443     + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
3444     + /* Disable SCI/SMI/NMI generation */
3445     + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
3446     + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
3447     + writel(value, padcfg0);
3448     +
3449     /* Disable TX buffer and enable RX (this will be input) */
3450     __intel_gpio_set_direction(padcfg0, true);
3451    
3452     @@ -940,8 +935,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
3453    
3454     raw_spin_lock_irqsave(&pctrl->lock, flags);
3455    
3456     - intel_gpio_set_gpio_mode(reg);
3457     -
3458     value = readl(reg);
3459    
3460     value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
3461     diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
3462     index 62f5f04d8f61..5e963fe0e38d 100644
3463     --- a/drivers/s390/block/dasd_alias.c
3464     +++ b/drivers/s390/block/dasd_alias.c
3465     @@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
3466     int dasd_alias_add_device(struct dasd_device *device)
3467     {
3468     struct dasd_eckd_private *private = device->private;
3469     - struct alias_lcu *lcu;
3470     + __u8 uaddr = private->uid.real_unit_addr;
3471     + struct alias_lcu *lcu = private->lcu;
3472     unsigned long flags;
3473     int rc;
3474    
3475     - lcu = private->lcu;
3476     rc = 0;
3477     spin_lock_irqsave(&lcu->lock, flags);
3478     + /*
3479     + * Check if device and lcu type differ. If so, the uac data may be
3480     + * outdated and needs to be updated.
3481     + */
3482     + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
3483     + lcu->flags |= UPDATE_PENDING;
3484     + DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3485     + "uid type mismatch - trigger rescan");
3486     + }
3487     if (!(lcu->flags & UPDATE_PENDING)) {
3488     rc = _add_device_to_lcu(lcu, device, device);
3489     if (rc)
3490     diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
3491     index 05ac6ba15a53..ecc24a46e71a 100644
3492     --- a/drivers/s390/char/Makefile
3493     +++ b/drivers/s390/char/Makefile
3494     @@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
3495     CFLAGS_sclp_early_core.o += -march=z900
3496     endif
3497    
3498     +CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
3499     +
3500     obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
3501     sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
3502     sclp_early.o sclp_early_core.o
3503     diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
3504     index 7b0b295b2313..69687c16a150 100644
3505     --- a/drivers/s390/cio/chsc.c
3506     +++ b/drivers/s390/cio/chsc.c
3507     @@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
3508    
3509     static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
3510     {
3511     + struct channel_path *chp;
3512     struct chp_link link;
3513     struct chp_id chpid;
3514     int status;
3515     @@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
3516     chpid.id = sei_area->rsid;
3517     /* allocate a new channel path structure, if needed */
3518     status = chp_get_status(chpid);
3519     - if (status < 0)
3520     - chp_new(chpid);
3521     - else if (!status)
3522     + if (!status)
3523     return;
3524     +
3525     + if (status < 0) {
3526     + chp_new(chpid);
3527     + } else {
3528     + chp = chpid_to_chp(chpid);
3529     + mutex_lock(&chp->lock);
3530     + chp_update_desc(chp);
3531     + mutex_unlock(&chp->lock);
3532     + }
3533     memset(&link, 0, sizeof(struct chp_link));
3534     link.chpid = chpid;
3535     if ((sei_area->vf & 0xc0) != 0) {
3536     diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
3537     index 3597ef78df4d..ce74278a454a 100644
3538     --- a/include/linux/fsnotify_backend.h
3539     +++ b/include/linux/fsnotify_backend.h
3540     @@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
3541     union { /* Object pointer [lock] */
3542     struct inode *inode;
3543     struct vfsmount *mnt;
3544     - };
3545     - union {
3546     - struct hlist_head list;
3547     /* Used listing heads to free after srcu period expires */
3548     struct fsnotify_mark_connector *destroy_next;
3549     };
3550     + struct hlist_head list;
3551     };
3552    
3553     /*
3554     diff --git a/include/linux/hmm.h b/include/linux/hmm.h
3555     index 8198faf16ed6..96e69979f84d 100644
3556     --- a/include/linux/hmm.h
3557     +++ b/include/linux/hmm.h
3558     @@ -498,16 +498,23 @@ struct hmm_device {
3559     struct hmm_device *hmm_device_new(void *drvdata);
3560     void hmm_device_put(struct hmm_device *hmm_device);
3561     #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
3562     +#endif /* IS_ENABLED(CONFIG_HMM) */
3563    
3564     /* Below are for HMM internal use only! Not to be used by device driver! */
3565     +#if IS_ENABLED(CONFIG_HMM_MIRROR)
3566     void hmm_mm_destroy(struct mm_struct *mm);
3567    
3568     static inline void hmm_mm_init(struct mm_struct *mm)
3569     {
3570     mm->hmm = NULL;
3571     }
3572     +#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
3573     +static inline void hmm_mm_destroy(struct mm_struct *mm) {}
3574     +static inline void hmm_mm_init(struct mm_struct *mm) {}
3575     +#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
3576     +
3577     +
3578     #else /* IS_ENABLED(CONFIG_HMM) */
3579     static inline void hmm_mm_destroy(struct mm_struct *mm) {}
3580     static inline void hmm_mm_init(struct mm_struct *mm) {}
3581     -#endif /* IS_ENABLED(CONFIG_HMM) */
3582     #endif /* LINUX_HMM_H */
3583     diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
3584     index 5e6a2d4dc366..ab927383c99d 100644
3585     --- a/include/linux/if_vlan.h
3586     +++ b/include/linux/if_vlan.h
3587     @@ -584,7 +584,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
3588     * Returns true if the skb is tagged with multiple vlan headers, regardless
3589     * of whether it is hardware accelerated or not.
3590     */
3591     -static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
3592     +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
3593     {
3594     __be16 protocol = skb->protocol;
3595    
3596     @@ -594,6 +594,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
3597     if (likely(!eth_type_vlan(protocol)))
3598     return false;
3599    
3600     + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
3601     + return false;
3602     +
3603     veh = (struct vlan_ethhdr *)skb->data;
3604     protocol = veh->h_vlan_encapsulated_proto;
3605     }
3606     @@ -611,7 +614,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
3607     *
3608     * Returns features without unsafe ones if the skb has multiple tags.
3609     */
3610     -static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
3611     +static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
3612     netdev_features_t features)
3613     {
3614     if (skb_vlan_tagged_multi(skb)) {
3615     diff --git a/include/linux/tpm.h b/include/linux/tpm.h
3616     index 881312d85574..2a6c3d96b31f 100644
3617     --- a/include/linux/tpm.h
3618     +++ b/include/linux/tpm.h
3619     @@ -49,7 +49,7 @@ struct tpm_class_ops {
3620     bool (*update_timeouts)(struct tpm_chip *chip,
3621     unsigned long *timeout_cap);
3622     int (*request_locality)(struct tpm_chip *chip, int loc);
3623     - void (*relinquish_locality)(struct tpm_chip *chip, int loc);
3624     + int (*relinquish_locality)(struct tpm_chip *chip, int loc);
3625     void (*clk_enable)(struct tpm_chip *chip, bool value);
3626     };
3627    
3628     diff --git a/include/net/ife.h b/include/net/ife.h
3629     index 44b9c00f7223..e117617e3c34 100644
3630     --- a/include/net/ife.h
3631     +++ b/include/net/ife.h
3632     @@ -12,7 +12,8 @@
3633     void *ife_encode(struct sk_buff *skb, u16 metalen);
3634     void *ife_decode(struct sk_buff *skb, u16 *metalen);
3635    
3636     -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
3637     +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
3638     + u16 *dlen, u16 *totlen);
3639     int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
3640     const void *dval);
3641    
3642     diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
3643     index fe994d2e5286..ea985aa7a6c5 100644
3644     --- a/include/net/llc_conn.h
3645     +++ b/include/net/llc_conn.h
3646     @@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
3647    
3648     struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
3649     struct proto *prot, int kern);
3650     +void llc_sk_stop_all_timers(struct sock *sk, bool sync);
3651     void llc_sk_free(struct sock *sk);
3652    
3653     void llc_sk_reset(struct sock *sk);
3654     diff --git a/include/net/tcp.h b/include/net/tcp.h
3655     index d323d4fa742c..fb653736f335 100644
3656     --- a/include/net/tcp.h
3657     +++ b/include/net/tcp.h
3658     @@ -1616,6 +1616,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
3659     sk_mem_reclaim(sk);
3660     tcp_clear_all_retrans_hints(tcp_sk(sk));
3661     tcp_init_send_head(sk);
3662     + tcp_sk(sk)->packets_out = 0;
3663     }
3664    
3665     static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
3666     diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
3667     index 7e99999d6236..857bad91c454 100644
3668     --- a/include/uapi/linux/kvm.h
3669     +++ b/include/uapi/linux/kvm.h
3670     @@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
3671     #define KVM_CAP_PPC_SMT_POSSIBLE 147
3672     #define KVM_CAP_HYPERV_SYNIC2 148
3673     #define KVM_CAP_HYPERV_VP_INDEX 149
3674     +#define KVM_CAP_S390_BPB 152
3675    
3676     #ifdef KVM_CAP_IRQ_ROUTING
3677    
3678     diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
3679     index e954ae3d82c0..e3a658bac10f 100644
3680     --- a/kernel/trace/trace_entries.h
3681     +++ b/kernel/trace/trace_entries.h
3682     @@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
3683     __field( unsigned int, seqnum )
3684     ),
3685    
3686     - F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
3687     + F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
3688     __entry->seqnum,
3689     __entry->tv_sec,
3690     __entry->tv_nsec,
3691     diff --git a/net/core/dev.c b/net/core/dev.c
3692     index 4be2a4047640..e7d56c5adde6 100644
3693     --- a/net/core/dev.c
3694     +++ b/net/core/dev.c
3695     @@ -2903,7 +2903,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
3696     }
3697     EXPORT_SYMBOL(passthru_features_check);
3698    
3699     -static netdev_features_t dflt_features_check(const struct sk_buff *skb,
3700     +static netdev_features_t dflt_features_check(struct sk_buff *skb,
3701     struct net_device *dev,
3702     netdev_features_t features)
3703     {
3704     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
3705     index 741ae2554190..514d697d4691 100644
3706     --- a/net/core/neighbour.c
3707     +++ b/net/core/neighbour.c
3708     @@ -55,7 +55,8 @@ static void neigh_timer_handler(unsigned long arg);
3709     static void __neigh_notify(struct neighbour *n, int type, int flags,
3710     u32 pid);
3711     static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
3712     -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
3713     +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
3714     + struct net_device *dev);
3715    
3716     #ifdef CONFIG_PROC_FS
3717     static const struct file_operations neigh_stat_seq_fops;
3718     @@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
3719     {
3720     write_lock_bh(&tbl->lock);
3721     neigh_flush_dev(tbl, dev);
3722     - pneigh_ifdown(tbl, dev);
3723     - write_unlock_bh(&tbl->lock);
3724     + pneigh_ifdown_and_unlock(tbl, dev);
3725    
3726     del_timer_sync(&tbl->proxy_timer);
3727     pneigh_queue_purge(&tbl->proxy_queue);
3728     @@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
3729     return -ENOENT;
3730     }
3731    
3732     -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
3733     +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
3734     + struct net_device *dev)
3735     {
3736     - struct pneigh_entry *n, **np;
3737     + struct pneigh_entry *n, **np, *freelist = NULL;
3738     u32 h;
3739    
3740     for (h = 0; h <= PNEIGH_HASHMASK; h++) {
3741     @@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
3742     while ((n = *np) != NULL) {
3743     if (!dev || n->dev == dev) {
3744     *np = n->next;
3745     - if (tbl->pdestructor)
3746     - tbl->pdestructor(n);
3747     - if (n->dev)
3748     - dev_put(n->dev);
3749     - kfree(n);
3750     + n->next = freelist;
3751     + freelist = n;
3752     continue;
3753     }
3754     np = &n->next;
3755     }
3756     }
3757     + write_unlock_bh(&tbl->lock);
3758     + while ((n = freelist)) {
3759     + freelist = n->next;
3760     + n->next = NULL;
3761     + if (tbl->pdestructor)
3762     + tbl->pdestructor(n);
3763     + if (n->dev)
3764     + dev_put(n->dev);
3765     + kfree(n);
3766     + }
3767     return -ENOENT;
3768     }
3769    
3770     @@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
3771    
3772     err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
3773     if (!err) {
3774     - if (tb[NDA_IFINDEX])
3775     + if (tb[NDA_IFINDEX]) {
3776     + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
3777     + return -EINVAL;
3778     filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
3779     -
3780     - if (tb[NDA_MASTER])
3781     + }
3782     + if (tb[NDA_MASTER]) {
3783     + if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
3784     + return -EINVAL;
3785     filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
3786     -
3787     + }
3788     if (filter_idx || filter_master_idx)
3789     flags |= NLM_F_DUMP_FILTERED;
3790     }
3791     diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
3792     index e1d4d898a007..f0252768ecf4 100644
3793     --- a/net/dns_resolver/dns_key.c
3794     +++ b/net/dns_resolver/dns_key.c
3795     @@ -25,6 +25,7 @@
3796     #include <linux/moduleparam.h>
3797     #include <linux/slab.h>
3798     #include <linux/string.h>
3799     +#include <linux/ratelimit.h>
3800     #include <linux/kernel.h>
3801     #include <linux/keyctl.h>
3802     #include <linux/err.h>
3803     @@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3804    
3805     next_opt = memchr(opt, '#', end - opt) ?: end;
3806     opt_len = next_opt - opt;
3807     - if (!opt_len) {
3808     - printk(KERN_WARNING
3809     - "Empty option to dns_resolver key\n");
3810     + if (opt_len <= 0 || opt_len > 128) {
3811     + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
3812     + opt_len);
3813     return -EINVAL;
3814     }
3815    
3816     @@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
3817     }
3818    
3819     bad_option_value:
3820     - printk(KERN_WARNING
3821     - "Option '%*.*s' to dns_resolver key:"
3822     - " bad/missing value\n",
3823     - opt_nlen, opt_nlen, opt);
3824     + pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
3825     + opt_nlen, opt_nlen, opt);
3826     return -EINVAL;
3827     } while (opt = next_opt + 1, opt < end);
3828     }
3829     diff --git a/net/ife/ife.c b/net/ife/ife.c
3830     index 7d1ec76e7f43..13bbf8cb6a39 100644
3831     --- a/net/ife/ife.c
3832     +++ b/net/ife/ife.c
3833     @@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
3834     int total_pull;
3835     u16 ifehdrln;
3836    
3837     + if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
3838     + return NULL;
3839     +
3840     ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
3841     ifehdrln = ntohs(ifehdr->metalen);
3842     total_pull = skb->dev->hard_header_len + ifehdrln;
3843     @@ -92,12 +95,43 @@ struct meta_tlvhdr {
3844     __be16 len;
3845     };
3846    
3847     +static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
3848     + const unsigned char *ifehdr_end)
3849     +{
3850     + const struct meta_tlvhdr *tlv;
3851     + u16 tlvlen;
3852     +
3853     + if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
3854     + return false;
3855     +
3856     + tlv = (const struct meta_tlvhdr *)skbdata;
3857     + tlvlen = ntohs(tlv->len);
3858     +
3859     + /* tlv length field is inc header, check on minimum */
3860     + if (tlvlen < NLA_HDRLEN)
3861     + return false;
3862     +
3863     + /* overflow by NLA_ALIGN check */
3864     + if (NLA_ALIGN(tlvlen) < tlvlen)
3865     + return false;
3866     +
3867     + if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
3868     + return false;
3869     +
3870     + return true;
3871     +}
3872     +
3873     /* Caller takes care of presenting data in network order
3874     */
3875     -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen)
3876     +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
3877     + u16 *dlen, u16 *totlen)
3878     {
3879     - struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
3880     + struct meta_tlvhdr *tlv;
3881     +
3882     + if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
3883     + return NULL;
3884    
3885     + tlv = (struct meta_tlvhdr *)skbdata;
3886     *dlen = ntohs(tlv->len) - NLA_HDRLEN;
3887     *attrtype = ntohs(tlv->type);
3888    
3889     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3890     index 38b9a6276a9d..d023f879e7bb 100644
3891     --- a/net/ipv4/tcp.c
3892     +++ b/net/ipv4/tcp.c
3893     @@ -2354,7 +2354,6 @@ int tcp_disconnect(struct sock *sk, int flags)
3894     icsk->icsk_backoff = 0;
3895     tp->snd_cwnd = 2;
3896     icsk->icsk_probes_out = 0;
3897     - tp->packets_out = 0;
3898     tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
3899     tp->snd_cwnd_cnt = 0;
3900     tp->window_clamp = 0;
3901     @@ -2742,8 +2741,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
3902     #ifdef CONFIG_TCP_MD5SIG
3903     case TCP_MD5SIG:
3904     case TCP_MD5SIG_EXT:
3905     - /* Read the IP->Key mappings from userspace */
3906     - err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3907     + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
3908     + err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3909     + else
3910     + err = -EINVAL;
3911     break;
3912     #endif
3913     case TCP_USER_TIMEOUT:
3914     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3915     index 14474acea0bb..ebbb54bcbcac 100644
3916     --- a/net/ipv4/tcp_input.c
3917     +++ b/net/ipv4/tcp_input.c
3918     @@ -3892,11 +3892,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3919     int length = (th->doff << 2) - sizeof(*th);
3920     const u8 *ptr = (const u8 *)(th + 1);
3921    
3922     - /* If the TCP option is too short, we can short cut */
3923     - if (length < TCPOLEN_MD5SIG)
3924     - return NULL;
3925     -
3926     - while (length > 0) {
3927     + /* If not enough data remaining, we can short cut */
3928     + while (length >= TCPOLEN_MD5SIG) {
3929     int opcode = *ptr++;
3930     int opsize;
3931    
3932     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3933     index 0126d9bfa670..e04c534b573e 100644
3934     --- a/net/ipv6/route.c
3935     +++ b/net/ipv6/route.c
3936     @@ -2959,6 +2959,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3937    
3938     static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3939     [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3940     + [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
3941     [RTA_OIF] = { .type = NLA_U32 },
3942     [RTA_IIF] = { .type = NLA_U32 },
3943     [RTA_PRIORITY] = { .type = NLA_U32 },
3944     @@ -2970,6 +2971,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3945     [RTA_EXPIRES] = { .type = NLA_U32 },
3946     [RTA_UID] = { .type = NLA_U32 },
3947     [RTA_MARK] = { .type = NLA_U32 },
3948     + [RTA_TABLE] = { .type = NLA_U32 },
3949     };
3950    
3951     static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3952     diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
3953     index f343e6f0fc95..5fe139484919 100644
3954     --- a/net/ipv6/seg6_iptunnel.c
3955     +++ b/net/ipv6/seg6_iptunnel.c
3956     @@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
3957     isrh->nexthdr = proto;
3958    
3959     hdr->daddr = isrh->segments[isrh->first_segment];
3960     - set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
3961     + set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
3962    
3963     #ifdef CONFIG_IPV6_SEG6_HMAC
3964     if (sr_has_hmac(isrh)) {
3965     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
3966     index 0c2738349442..8bef35aa8786 100644
3967     --- a/net/l2tp/l2tp_ppp.c
3968     +++ b/net/l2tp/l2tp_ppp.c
3969     @@ -591,6 +591,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
3970     lock_sock(sk);
3971    
3972     error = -EINVAL;
3973     +
3974     + if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
3975     + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
3976     + sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
3977     + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
3978     + goto end;
3979     +
3980     if (sp->sa_protocol != PX_PROTO_OL2TP)
3981     goto end;
3982    
3983     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3984     index c38d16f22d2a..cf41d9b4a0b8 100644
3985     --- a/net/llc/af_llc.c
3986     +++ b/net/llc/af_llc.c
3987     @@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock)
3988     llc->laddr.lsap, llc->daddr.lsap);
3989     if (!llc_send_disc(sk))
3990     llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
3991     - if (!sock_flag(sk, SOCK_ZAPPED))
3992     + if (!sock_flag(sk, SOCK_ZAPPED)) {
3993     + struct llc_sap *sap = llc->sap;
3994     +
3995     + /* Hold this for release_sock(), so that llc_backlog_rcv()
3996     + * could still use it.
3997     + */
3998     + llc_sap_hold(sap);
3999     llc_sap_remove_socket(llc->sap, sk);
4000     - release_sock(sk);
4001     + release_sock(sk);
4002     + llc_sap_put(sap);
4003     + } else {
4004     + release_sock(sk);
4005     + }
4006     if (llc->dev)
4007     dev_put(llc->dev);
4008     sock_put(sk);
4009     diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
4010     index ea225bd2672c..f8d4ab8ca1a5 100644
4011     --- a/net/llc/llc_c_ac.c
4012     +++ b/net/llc/llc_c_ac.c
4013     @@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
4014    
4015     int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
4016     {
4017     - struct llc_sock *llc = llc_sk(sk);
4018     -
4019     - del_timer(&llc->pf_cycle_timer.timer);
4020     - del_timer(&llc->ack_timer.timer);
4021     - del_timer(&llc->rej_sent_timer.timer);
4022     - del_timer(&llc->busy_state_timer.timer);
4023     - llc->ack_must_be_send = 0;
4024     - llc->ack_pf = 0;
4025     + llc_sk_stop_all_timers(sk, false);
4026     return 0;
4027     }
4028    
4029     diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
4030     index 5e91b47f0d2a..9a42448eb182 100644
4031     --- a/net/llc/llc_conn.c
4032     +++ b/net/llc/llc_conn.c
4033     @@ -951,6 +951,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr
4034     return sk;
4035     }
4036    
4037     +void llc_sk_stop_all_timers(struct sock *sk, bool sync)
4038     +{
4039     + struct llc_sock *llc = llc_sk(sk);
4040     +
4041     + if (sync) {
4042     + del_timer_sync(&llc->pf_cycle_timer.timer);
4043     + del_timer_sync(&llc->ack_timer.timer);
4044     + del_timer_sync(&llc->rej_sent_timer.timer);
4045     + del_timer_sync(&llc->busy_state_timer.timer);
4046     + } else {
4047     + del_timer(&llc->pf_cycle_timer.timer);
4048     + del_timer(&llc->ack_timer.timer);
4049     + del_timer(&llc->rej_sent_timer.timer);
4050     + del_timer(&llc->busy_state_timer.timer);
4051     + }
4052     +
4053     + llc->ack_must_be_send = 0;
4054     + llc->ack_pf = 0;
4055     +}
4056     +
4057     /**
4058     * llc_sk_free - Frees a LLC socket
4059     * @sk - socket to free
4060     @@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
4061    
4062     llc->state = LLC_CONN_OUT_OF_SVC;
4063     /* Stop all (possibly) running timers */
4064     - llc_conn_ac_stop_all_timers(sk, NULL);
4065     + llc_sk_stop_all_timers(sk, true);
4066     #ifdef DEBUG_LLC_CONN_ALLOC
4067     printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
4068     skb_queue_len(&llc->pdu_unack_q),
4069     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4070     index f4a0587b7d5e..3994b71f8197 100644
4071     --- a/net/packet/af_packet.c
4072     +++ b/net/packet/af_packet.c
4073     @@ -331,11 +331,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
4074     skb_set_queue_mapping(skb, queue_index);
4075     }
4076    
4077     -/* register_prot_hook must be invoked with the po->bind_lock held,
4078     +/* __register_prot_hook must be invoked through register_prot_hook
4079     * or from a context in which asynchronous accesses to the packet
4080     * socket is not possible (packet_create()).
4081     */
4082     -static void register_prot_hook(struct sock *sk)
4083     +static void __register_prot_hook(struct sock *sk)
4084     {
4085     struct packet_sock *po = pkt_sk(sk);
4086    
4087     @@ -350,8 +350,13 @@ static void register_prot_hook(struct sock *sk)
4088     }
4089     }
4090    
4091     -/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
4092     - * held. If the sync parameter is true, we will temporarily drop
4093     +static void register_prot_hook(struct sock *sk)
4094     +{
4095     + lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
4096     + __register_prot_hook(sk);
4097     +}
4098     +
4099     +/* If the sync parameter is true, we will temporarily drop
4100     * the po->bind_lock and do a synchronize_net to make sure no
4101     * asynchronous packet processing paths still refer to the elements
4102     * of po->prot_hook. If the sync parameter is false, it is the
4103     @@ -361,6 +366,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
4104     {
4105     struct packet_sock *po = pkt_sk(sk);
4106    
4107     + lockdep_assert_held_once(&po->bind_lock);
4108     +
4109     po->running = 0;
4110    
4111     if (po->fanout)
4112     @@ -3017,6 +3024,7 @@ static int packet_release(struct socket *sock)
4113    
4114     packet_flush_mclist(sk);
4115    
4116     + lock_sock(sk);
4117     if (po->rx_ring.pg_vec) {
4118     memset(&req_u, 0, sizeof(req_u));
4119     packet_set_ring(sk, &req_u, 1, 0);
4120     @@ -3026,6 +3034,7 @@ static int packet_release(struct socket *sock)
4121     memset(&req_u, 0, sizeof(req_u));
4122     packet_set_ring(sk, &req_u, 1, 1);
4123     }
4124     + release_sock(sk);
4125    
4126     f = fanout_release(sk);
4127    
4128     @@ -3259,7 +3268,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
4129    
4130     if (proto) {
4131     po->prot_hook.type = proto;
4132     - register_prot_hook(sk);
4133     + __register_prot_hook(sk);
4134     }
4135    
4136     mutex_lock(&net->packet.sklist_lock);
4137     @@ -3654,6 +3663,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4138     union tpacket_req_u req_u;
4139     int len;
4140    
4141     + lock_sock(sk);
4142     switch (po->tp_version) {
4143     case TPACKET_V1:
4144     case TPACKET_V2:
4145     @@ -3664,12 +3674,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4146     len = sizeof(req_u.req3);
4147     break;
4148     }
4149     - if (optlen < len)
4150     - return -EINVAL;
4151     - if (copy_from_user(&req_u.req, optval, len))
4152     - return -EFAULT;
4153     - return packet_set_ring(sk, &req_u, 0,
4154     - optname == PACKET_TX_RING);
4155     + if (optlen < len) {
4156     + ret = -EINVAL;
4157     + } else {
4158     + if (copy_from_user(&req_u.req, optval, len))
4159     + ret = -EFAULT;
4160     + else
4161     + ret = packet_set_ring(sk, &req_u, 0,
4162     + optname == PACKET_TX_RING);
4163     + }
4164     + release_sock(sk);
4165     + return ret;
4166     }
4167     case PACKET_COPY_THRESH:
4168     {
4169     @@ -3735,12 +3750,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4170    
4171     if (optlen != sizeof(val))
4172     return -EINVAL;
4173     - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
4174     - return -EBUSY;
4175     if (copy_from_user(&val, optval, sizeof(val)))
4176     return -EFAULT;
4177     - po->tp_loss = !!val;
4178     - return 0;
4179     +
4180     + lock_sock(sk);
4181     + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4182     + ret = -EBUSY;
4183     + } else {
4184     + po->tp_loss = !!val;
4185     + ret = 0;
4186     + }
4187     + release_sock(sk);
4188     + return ret;
4189     }
4190     case PACKET_AUXDATA:
4191     {
4192     @@ -3751,7 +3772,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4193     if (copy_from_user(&val, optval, sizeof(val)))
4194     return -EFAULT;
4195    
4196     + lock_sock(sk);
4197     po->auxdata = !!val;
4198     + release_sock(sk);
4199     return 0;
4200     }
4201     case PACKET_ORIGDEV:
4202     @@ -3763,7 +3786,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4203     if (copy_from_user(&val, optval, sizeof(val)))
4204     return -EFAULT;
4205    
4206     + lock_sock(sk);
4207     po->origdev = !!val;
4208     + release_sock(sk);
4209     return 0;
4210     }
4211     case PACKET_VNET_HDR:
4212     @@ -3772,15 +3797,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4213    
4214     if (sock->type != SOCK_RAW)
4215     return -EINVAL;
4216     - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
4217     - return -EBUSY;
4218     if (optlen < sizeof(val))
4219     return -EINVAL;
4220     if (copy_from_user(&val, optval, sizeof(val)))
4221     return -EFAULT;
4222    
4223     - po->has_vnet_hdr = !!val;
4224     - return 0;
4225     + lock_sock(sk);
4226     + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4227     + ret = -EBUSY;
4228     + } else {
4229     + po->has_vnet_hdr = !!val;
4230     + ret = 0;
4231     + }
4232     + release_sock(sk);
4233     + return ret;
4234     }
4235     case PACKET_TIMESTAMP:
4236     {
4237     @@ -3818,11 +3848,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
4238    
4239     if (optlen != sizeof(val))
4240     return -EINVAL;
4241     - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
4242     - return -EBUSY;
4243     if (copy_from_user(&val, optval, sizeof(val)))
4244     return -EFAULT;
4245     - po->tp_tx_has_off = !!val;
4246     +
4247     + lock_sock(sk);
4248     + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4249     + ret = -EBUSY;
4250     + } else {
4251     + po->tp_tx_has_off = !!val;
4252     + ret = 0;
4253     + }
4254     + release_sock(sk);
4255     return 0;
4256     }
4257     case PACKET_QDISC_BYPASS:
4258     @@ -4219,8 +4255,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4259     /* Added to avoid minimal code churn */
4260     struct tpacket_req *req = &req_u->req;
4261    
4262     - lock_sock(sk);
4263     -
4264     rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4265     rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4266    
4267     @@ -4358,7 +4392,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4268     if (pg_vec)
4269     free_pg_vec(pg_vec, order, req->tp_block_nr);
4270     out:
4271     - release_sock(sk);
4272     return err;
4273     }
4274    
4275     diff --git a/net/packet/internal.h b/net/packet/internal.h
4276     index a1d2b2319ae9..3bb7c5fb3bff 100644
4277     --- a/net/packet/internal.h
4278     +++ b/net/packet/internal.h
4279     @@ -112,10 +112,12 @@ struct packet_sock {
4280     int copy_thresh;
4281     spinlock_t bind_lock;
4282     struct mutex pg_vec_lock;
4283     - unsigned int running:1, /* prot_hook is attached*/
4284     - auxdata:1,
4285     + unsigned int running; /* bind_lock must be held */
4286     + unsigned int auxdata:1, /* writer must hold sock lock */
4287     origdev:1,
4288     - has_vnet_hdr:1;
4289     + has_vnet_hdr:1,
4290     + tp_loss:1,
4291     + tp_tx_has_off:1;
4292     int pressure;
4293     int ifindex; /* bound device */
4294     __be16 num;
4295     @@ -125,8 +127,6 @@ struct packet_sock {
4296     enum tpacket_versions tp_version;
4297     unsigned int tp_hdrlen;
4298     unsigned int tp_reserve;
4299     - unsigned int tp_loss:1;
4300     - unsigned int tp_tx_has_off:1;
4301     unsigned int tp_tstamp;
4302     struct net_device __rcu *cached_dev;
4303     int (*xmit)(struct sk_buff *skb);
4304     diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
4305     index 8ccd35825b6b..85757af7f150 100644
4306     --- a/net/sched/act_ife.c
4307     +++ b/net/sched/act_ife.c
4308     @@ -605,7 +605,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
4309     }
4310     }
4311    
4312     - return 0;
4313     + return -ENOENT;
4314     }
4315    
4316     static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
4317     @@ -639,7 +639,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
4318     u16 mtype;
4319     u16 dlen;
4320    
4321     - curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
4322     + curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
4323     + &dlen, NULL);
4324     + if (!curr_data) {
4325     + qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
4326     + return TC_ACT_SHOT;
4327     + }
4328    
4329     if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
4330     /* abuse overlimits to count when we receive metadata
4331     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
4332     index 08b5705e7381..7219a1c041f7 100644
4333     --- a/net/sctp/ipv6.c
4334     +++ b/net/sctp/ipv6.c
4335     @@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
4336     addr->v6.sin6_scope_id = 0;
4337     }
4338    
4339     -/* Compare addresses exactly.
4340     - * v4-mapped-v6 is also in consideration.
4341     - */
4342     -static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
4343     - const union sctp_addr *addr2)
4344     +static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
4345     + const union sctp_addr *addr2)
4346     {
4347     if (addr1->sa.sa_family != addr2->sa.sa_family) {
4348     if (addr1->sa.sa_family == AF_INET &&
4349     addr2->sa.sa_family == AF_INET6 &&
4350     - ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
4351     - if (addr2->v6.sin6_port == addr1->v4.sin_port &&
4352     - addr2->v6.sin6_addr.s6_addr32[3] ==
4353     - addr1->v4.sin_addr.s_addr)
4354     - return 1;
4355     - }
4356     + ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
4357     + addr2->v6.sin6_addr.s6_addr32[3] ==
4358     + addr1->v4.sin_addr.s_addr)
4359     + return 1;
4360     +
4361     if (addr2->sa.sa_family == AF_INET &&
4362     addr1->sa.sa_family == AF_INET6 &&
4363     - ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
4364     - if (addr1->v6.sin6_port == addr2->v4.sin_port &&
4365     - addr1->v6.sin6_addr.s6_addr32[3] ==
4366     - addr2->v4.sin_addr.s_addr)
4367     - return 1;
4368     - }
4369     + ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
4370     + addr1->v6.sin6_addr.s6_addr32[3] ==
4371     + addr2->v4.sin_addr.s_addr)
4372     + return 1;
4373     +
4374     return 0;
4375     }
4376     - if (addr1->v6.sin6_port != addr2->v6.sin6_port)
4377     - return 0;
4378     +
4379     if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
4380     return 0;
4381     +
4382     /* If this is a linklocal address, compare the scope_id. */
4383     - if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
4384     - if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
4385     - (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
4386     - return 0;
4387     - }
4388     - }
4389     + if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
4390     + addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
4391     + addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
4392     + return 0;
4393    
4394     return 1;
4395     }
4396    
4397     +/* Compare addresses exactly.
4398     + * v4-mapped-v6 is also in consideration.
4399     + */
4400     +static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
4401     + const union sctp_addr *addr2)
4402     +{
4403     + return __sctp_v6_cmp_addr(addr1, addr2) &&
4404     + addr1->v6.sin6_port == addr2->v6.sin6_port;
4405     +}
4406     +
4407     /* Initialize addr struct to INADDR_ANY. */
4408     static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
4409     {
4410     @@ -845,8 +848,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
4411     const union sctp_addr *addr2,
4412     struct sctp_sock *opt)
4413     {
4414     - struct sctp_af *af1, *af2;
4415     struct sock *sk = sctp_opt2sk(opt);
4416     + struct sctp_af *af1, *af2;
4417    
4418     af1 = sctp_get_af_specific(addr1->sa.sa_family);
4419     af2 = sctp_get_af_specific(addr2->sa.sa_family);
4420     @@ -862,10 +865,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
4421     if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
4422     return 1;
4423    
4424     - if (addr1->sa.sa_family != addr2->sa.sa_family)
4425     - return 0;
4426     -
4427     - return af1->cmp_addr(addr1, addr2);
4428     + return __sctp_v6_cmp_addr(addr1, addr2);
4429     }
4430    
4431     /* Verify that the provided sockaddr looks bindable. Common verification,
4432     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4433     index a6d604fd9695..f9c289e05707 100644
4434     --- a/net/smc/af_smc.c
4435     +++ b/net/smc/af_smc.c
4436     @@ -1203,14 +1203,12 @@ static int smc_shutdown(struct socket *sock, int how)
4437     rc = smc_close_shutdown_write(smc);
4438     break;
4439     case SHUT_RD:
4440     - if (sk->sk_state == SMC_LISTEN)
4441     - rc = smc_close_active(smc);
4442     - else
4443     - rc = 0;
4444     - /* nothing more to do because peer is not involved */
4445     + rc = 0;
4446     + /* nothing more to do because peer is not involved */
4447     break;
4448     }
4449     - rc1 = kernel_sock_shutdown(smc->clcsock, how);
4450     + if (smc->clcsock)
4451     + rc1 = kernel_sock_shutdown(smc->clcsock, how);
4452     /* map sock_shutdown_cmd constants to sk_shutdown value range */
4453     sk->sk_shutdown |= how + 1;
4454    
4455     diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
4456     index 4a3a3f1331ee..c741365f77da 100644
4457     --- a/net/strparser/strparser.c
4458     +++ b/net/strparser/strparser.c
4459     @@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
4460    
4461     static void strp_start_timer(struct strparser *strp, long timeo)
4462     {
4463     - if (timeo)
4464     + if (timeo && timeo != LONG_MAX)
4465     mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
4466     }
4467    
4468     @@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4469     strp_start_timer(strp, timeo);
4470     }
4471    
4472     + stm->accum_len += cand_len;
4473     strp->need_bytes = stm->strp.full_len -
4474     stm->accum_len;
4475     - stm->accum_len += cand_len;
4476     stm->early_eaten = cand_len;
4477     STRP_STATS_ADD(strp->stats.bytes, cand_len);
4478     desc->count = 0; /* Stop reading socket */
4479     @@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
4480     /* Hurray, we have a new message! */
4481     cancel_delayed_work(&strp->msg_timer_work);
4482     strp->skb_head = NULL;
4483     + strp->need_bytes = 0;
4484     STRP_STATS_INCR(strp->stats.msgs);
4485    
4486     /* Give skb to upper layer */
4487     @@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp)
4488     return;
4489    
4490     if (strp->need_bytes) {
4491     - if (strp_peek_len(strp) >= strp->need_bytes)
4492     - strp->need_bytes = 0;
4493     - else
4494     + if (strp_peek_len(strp) < strp->need_bytes)
4495     return;
4496     }
4497    
4498     diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
4499     index b76f13f6fea1..d4e0bbeee727 100644
4500     --- a/net/tipc/netlink.c
4501     +++ b/net/tipc/netlink.c
4502     @@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
4503    
4504     const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
4505     [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
4506     - [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
4507     + [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
4508     + [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
4509     };
4510    
4511     const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
4512     diff --git a/security/commoncap.c b/security/commoncap.c
4513     index 7b01431d1e19..1c1f64582bb5 100644
4514     --- a/security/commoncap.c
4515     +++ b/security/commoncap.c
4516     @@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
4517     magic |= VFS_CAP_FLAGS_EFFECTIVE;
4518     memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
4519     cap->magic_etc = cpu_to_le32(magic);
4520     + } else {
4521     + size = -ENOMEM;
4522     }
4523     }
4524     kfree(tmpbuf);