Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0238-4.9.139-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3291 - (hide annotations) (download)
Tue Mar 12 10:43:02 2019 UTC (5 years, 3 months ago) by niro
File size: 78298 byte(s)
-linux-4.9.139
1 niro 3291 diff --git a/.gitignore b/.gitignore
2     index c2ed4ecb0acd..0c39aa20b6ba 100644
3     --- a/.gitignore
4     +++ b/.gitignore
5     @@ -33,6 +33,7 @@
6     *.lzo
7     *.patch
8     *.gcno
9     +*.ll
10     modules.builtin
11     Module.symvers
12     *.dwo
13     diff --git a/Kbuild b/Kbuild
14     index 3d0ae152af7c..94c752762bc2 100644
15     --- a/Kbuild
16     +++ b/Kbuild
17     @@ -7,31 +7,6 @@
18     # 4) Check for missing system calls
19     # 5) Generate constants.py (may need bounds.h)
20    
21     -# Default sed regexp - multiline due to syntax constraints
22     -define sed-y
23     - "/^->/{s:->#\(.*\):/* \1 */:; \
24     - s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
25     - s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
26     - s:->::; p;}"
27     -endef
28     -
29     -# Use filechk to avoid rebuilds when a header changes, but the resulting file
30     -# does not
31     -define filechk_offsets
32     - (set -e; \
33     - echo "#ifndef $2"; \
34     - echo "#define $2"; \
35     - echo "/*"; \
36     - echo " * DO NOT MODIFY."; \
37     - echo " *"; \
38     - echo " * This file was generated by Kbuild"; \
39     - echo " */"; \
40     - echo ""; \
41     - sed -ne $(sed-y); \
42     - echo ""; \
43     - echo "#endif" )
44     -endef
45     -
46     #####
47     # 1) Generate bounds.h
48    
49     diff --git a/Makefile b/Makefile
50     index ccf2602f664d..a6959d96316d 100644
51     --- a/Makefile
52     +++ b/Makefile
53     @@ -1,6 +1,6 @@
54     VERSION = 4
55     PATCHLEVEL = 9
56     -SUBLEVEL = 138
57     +SUBLEVEL = 139
58     EXTRAVERSION =
59     NAME = Roaring Lionus
60    
61     @@ -303,7 +303,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
62    
63     HOSTCC = gcc
64     HOSTCXX = g++
65     -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
66     +HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
67     HOSTCXXFLAGS = -O2
68    
69     ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
70     @@ -394,7 +394,7 @@ LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
71    
72     KBUILD_AFLAGS := -D__ASSEMBLY__
73     KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
74     - -fno-strict-aliasing -fno-common \
75     + -fno-strict-aliasing -fno-common -fshort-wchar \
76     -Werror-implicit-function-declaration \
77     -Wno-format-security \
78     -std=gnu89
79     @@ -644,7 +644,8 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
80     endif
81    
82     ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
83     -KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
84     +KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
85     +KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
86     else
87     ifdef CONFIG_PROFILE_ALL_BRANCHES
88     KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
89     @@ -704,11 +705,20 @@ endif
90     KBUILD_CFLAGS += $(stackp-flag)
91    
92     ifeq ($(cc-name),clang)
93     +ifneq ($(CROSS_COMPILE),)
94     +CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
95     +GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
96     +endif
97     +ifneq ($(GCC_TOOLCHAIN),)
98     +CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
99     +endif
100     +KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
101     +KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
102     KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
103     -KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
104     KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
105     KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
106     KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
107     +KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
108     # Quiet clang warning: comparison of unsigned expression < 0 is always false
109     KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
110     # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
111     @@ -716,6 +726,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
112     # See modpost pattern 2
113     KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
114     KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
115     +KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
116     +KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
117     else
118    
119     # These warnings generated too much noise in a regular build.
120     @@ -1379,6 +1391,8 @@ help:
121     @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
122     @echo ' dir/ - Build all files in dir and below'
123     @echo ' dir/file.[ois] - Build specified target only'
124     + @echo ' dir/file.ll - Build the LLVM assembly file'
125     + @echo ' (requires compiler support for LLVM assembly generation)'
126     @echo ' dir/file.lst - Build specified mixed source/assembly target only'
127     @echo ' (requires a recent binutils and recent build (System.map))'
128     @echo ' dir/file.ko - Build module including final link'
129     @@ -1563,6 +1577,7 @@ clean: $(clean-dirs)
130     -o -name '*.symtypes' -o -name 'modules.order' \
131     -o -name modules.builtin -o -name '.tmp_*.o.*' \
132     -o -name '*.c.[012]*.*' \
133     + -o -name '*.ll' \
134     -o -name '*.gcno' \) -type f -print | xargs rm -f
135    
136     # Generate tags for editors
137     @@ -1666,6 +1681,8 @@ endif
138     $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
139     %.symtypes: %.c prepare scripts FORCE
140     $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
141     +%.ll: %.c prepare scripts FORCE
142     + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
143    
144     # Modules
145     /: prepare scripts FORCE
146     diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
147     index 3aed4492c9a7..e616f61f859d 100644
148     --- a/arch/arm/include/asm/assembler.h
149     +++ b/arch/arm/include/asm/assembler.h
150     @@ -445,11 +445,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
151     .size \name , . - \name
152     .endm
153    
154     + .macro csdb
155     +#ifdef CONFIG_THUMB2_KERNEL
156     + .inst.w 0xf3af8014
157     +#else
158     + .inst 0xe320f014
159     +#endif
160     + .endm
161     +
162     .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
163     #ifndef CONFIG_CPU_USE_DOMAINS
164     adds \tmp, \addr, #\size - 1
165     sbcccs \tmp, \tmp, \limit
166     bcs \bad
167     +#ifdef CONFIG_CPU_SPECTRE
168     + movcs \addr, #0
169     + csdb
170     +#endif
171     #endif
172     .endm
173    
174     diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
175     index f5d698182d50..513e03d138ea 100644
176     --- a/arch/arm/include/asm/barrier.h
177     +++ b/arch/arm/include/asm/barrier.h
178     @@ -16,6 +16,12 @@
179     #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
180     #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
181     #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
182     +#ifdef CONFIG_THUMB2_KERNEL
183     +#define CSDB ".inst.w 0xf3af8014"
184     +#else
185     +#define CSDB ".inst 0xe320f014"
186     +#endif
187     +#define csdb() __asm__ __volatile__(CSDB : : : "memory")
188     #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
189     #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
190     : : "r" (0) : "memory")
191     @@ -36,6 +42,13 @@
192     #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
193     #endif
194    
195     +#ifndef CSDB
196     +#define CSDB
197     +#endif
198     +#ifndef csdb
199     +#define csdb()
200     +#endif
201     +
202     #ifdef CONFIG_ARM_HEAVY_MB
203     extern void (*soc_mb)(void);
204     extern void arm_heavy_mb(void);
205     @@ -62,6 +75,25 @@ extern void arm_heavy_mb(void);
206     #define __smp_rmb() __smp_mb()
207     #define __smp_wmb() dmb(ishst)
208    
209     +#ifdef CONFIG_CPU_SPECTRE
210     +static inline unsigned long array_index_mask_nospec(unsigned long idx,
211     + unsigned long sz)
212     +{
213     + unsigned long mask;
214     +
215     + asm volatile(
216     + "cmp %1, %2\n"
217     + " sbc %0, %1, %1\n"
218     + CSDB
219     + : "=r" (mask)
220     + : "r" (idx), "Ir" (sz)
221     + : "cc");
222     +
223     + return mask;
224     +}
225     +#define array_index_mask_nospec array_index_mask_nospec
226     +#endif
227     +
228     #include <asm-generic/barrier.h>
229    
230     #endif /* !__ASSEMBLY__ */
231     diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
232     index a97f1ea708d1..73a99c72a930 100644
233     --- a/arch/arm/include/asm/bugs.h
234     +++ b/arch/arm/include/asm/bugs.h
235     @@ -10,12 +10,14 @@
236     #ifndef __ASM_BUGS_H
237     #define __ASM_BUGS_H
238    
239     -#ifdef CONFIG_MMU
240     extern void check_writebuffer_bugs(void);
241    
242     -#define check_bugs() check_writebuffer_bugs()
243     +#ifdef CONFIG_MMU
244     +extern void check_bugs(void);
245     +extern void check_other_bugs(void);
246     #else
247     #define check_bugs() do { } while (0)
248     +#define check_other_bugs() do { } while (0)
249     #endif
250    
251     #endif
252     diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
253     index dbdbce1b3a72..b74b174ac9fc 100644
254     --- a/arch/arm/include/asm/cp15.h
255     +++ b/arch/arm/include/asm/cp15.h
256     @@ -64,6 +64,9 @@
257     #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
258     #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
259    
260     +#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
261     +#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
262     +
263     extern unsigned long cr_alignment; /* defined in entry-armv.S */
264    
265     static inline unsigned long get_cr(void)
266     diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
267     index b62eaeb147aa..c55db1e22f0c 100644
268     --- a/arch/arm/include/asm/cputype.h
269     +++ b/arch/arm/include/asm/cputype.h
270     @@ -76,8 +76,16 @@
271     #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
272     #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
273     #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
274     +#define ARM_CPU_PART_CORTEX_A53 0x4100d030
275     +#define ARM_CPU_PART_CORTEX_A57 0x4100d070
276     +#define ARM_CPU_PART_CORTEX_A72 0x4100d080
277     +#define ARM_CPU_PART_CORTEX_A73 0x4100d090
278     +#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
279     #define ARM_CPU_PART_MASK 0xff00fff0
280    
281     +/* Broadcom cores */
282     +#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
283     +
284     /* DEC implemented cores */
285     #define ARM_CPU_PART_SA1100 0x4400a110
286    
287     diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
288     index 8ef05381984b..24f3ec7c9fbe 100644
289     --- a/arch/arm/include/asm/kvm_asm.h
290     +++ b/arch/arm/include/asm/kvm_asm.h
291     @@ -61,8 +61,6 @@ struct kvm_vcpu;
292     extern char __kvm_hyp_init[];
293     extern char __kvm_hyp_init_end[];
294    
295     -extern char __kvm_hyp_vector[];
296     -
297     extern void __kvm_flush_vm_context(void);
298     extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
299     extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
300     diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
301     index 0833d8a1dbbb..2fda7e905754 100644
302     --- a/arch/arm/include/asm/kvm_host.h
303     +++ b/arch/arm/include/asm/kvm_host.h
304     @@ -21,6 +21,7 @@
305    
306     #include <linux/types.h>
307     #include <linux/kvm_types.h>
308     +#include <asm/cputype.h>
309     #include <asm/kvm.h>
310     #include <asm/kvm_asm.h>
311     #include <asm/kvm_mmio.h>
312     @@ -323,8 +324,17 @@ static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
313    
314     static inline bool kvm_arm_harden_branch_predictor(void)
315     {
316     - /* No way to detect it yet, pretend it is not there. */
317     - return false;
318     + switch(read_cpuid_part()) {
319     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
320     + case ARM_CPU_PART_BRAHMA_B15:
321     + case ARM_CPU_PART_CORTEX_A12:
322     + case ARM_CPU_PART_CORTEX_A15:
323     + case ARM_CPU_PART_CORTEX_A17:
324     + return true;
325     +#endif
326     + default:
327     + return false;
328     + }
329     }
330    
331     #define KVM_SSBD_UNKNOWN -1
332     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
333     index e2f05cedaf97..d26395754b56 100644
334     --- a/arch/arm/include/asm/kvm_mmu.h
335     +++ b/arch/arm/include/asm/kvm_mmu.h
336     @@ -248,7 +248,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
337    
338     static inline void *kvm_get_hyp_vector(void)
339     {
340     - return kvm_ksym_ref(__kvm_hyp_vector);
341     + switch(read_cpuid_part()) {
342     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
343     + case ARM_CPU_PART_CORTEX_A12:
344     + case ARM_CPU_PART_CORTEX_A17:
345     + {
346     + extern char __kvm_hyp_vector_bp_inv[];
347     + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
348     + }
349     +
350     + case ARM_CPU_PART_BRAHMA_B15:
351     + case ARM_CPU_PART_CORTEX_A15:
352     + {
353     + extern char __kvm_hyp_vector_ic_inv[];
354     + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
355     + }
356     +#endif
357     + default:
358     + {
359     + extern char __kvm_hyp_vector[];
360     + return kvm_ksym_ref(__kvm_hyp_vector);
361     + }
362     + }
363     }
364    
365     static inline int kvm_map_vectors(void)
366     diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
367     index 8877ad5ffe10..f379f5f849a9 100644
368     --- a/arch/arm/include/asm/proc-fns.h
369     +++ b/arch/arm/include/asm/proc-fns.h
370     @@ -36,6 +36,10 @@ extern struct processor {
371     * Set up any processor specifics
372     */
373     void (*_proc_init)(void);
374     + /*
375     + * Check for processor bugs
376     + */
377     + void (*check_bugs)(void);
378     /*
379     * Disable any processor specifics
380     */
381     diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
382     index a3d61ad984af..1fed41440af9 100644
383     --- a/arch/arm/include/asm/system_misc.h
384     +++ b/arch/arm/include/asm/system_misc.h
385     @@ -7,6 +7,7 @@
386     #include <linux/linkage.h>
387     #include <linux/irqflags.h>
388     #include <linux/reboot.h>
389     +#include <linux/percpu.h>
390    
391     extern void cpu_init(void);
392    
393     @@ -14,6 +15,20 @@ void soft_restart(unsigned long);
394     extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
395     extern void (*arm_pm_idle)(void);
396    
397     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
398     +typedef void (*harden_branch_predictor_fn_t)(void);
399     +DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
400     +static inline void harden_branch_predictor(void)
401     +{
402     + harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
403     + smp_processor_id());
404     + if (fn)
405     + fn();
406     +}
407     +#else
408     +#define harden_branch_predictor() do { } while (0)
409     +#endif
410     +
411     #define UDBG_UNDEFINED (1 << 0)
412     #define UDBG_SYSCALL (1 << 1)
413     #define UDBG_BADABORT (1 << 2)
414     diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
415     index 776757d1604a..57d2ad9c75ca 100644
416     --- a/arch/arm/include/asm/thread_info.h
417     +++ b/arch/arm/include/asm/thread_info.h
418     @@ -126,8 +126,8 @@ struct user_vfp_exc;
419    
420     extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
421     struct user_vfp_exc __user *);
422     -extern int vfp_restore_user_hwstate(struct user_vfp __user *,
423     - struct user_vfp_exc __user *);
424     +extern int vfp_restore_user_hwstate(struct user_vfp *,
425     + struct user_vfp_exc *);
426     #endif
427    
428     /*
429     diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
430     index b7e0125c0bbf..7b17460127fd 100644
431     --- a/arch/arm/include/asm/uaccess.h
432     +++ b/arch/arm/include/asm/uaccess.h
433     @@ -114,6 +114,13 @@ static inline void set_fs(mm_segment_t fs)
434     : "cc"); \
435     flag; })
436    
437     +/*
438     + * This is a type: either unsigned long, if the argument fits into
439     + * that type, or otherwise unsigned long long.
440     + */
441     +#define __inttype(x) \
442     + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
443     +
444     /*
445     * Single-value transfer routines. They automatically use the right
446     * size if we just have the right pointer type. Note that the functions
447     @@ -183,7 +190,7 @@ extern int __get_user_64t_4(void *);
448     ({ \
449     unsigned long __limit = current_thread_info()->addr_limit - 1; \
450     register const typeof(*(p)) __user *__p asm("r0") = (p);\
451     - register typeof(x) __r2 asm("r2"); \
452     + register __inttype(x) __r2 asm("r2"); \
453     register unsigned long __l asm("r1") = __limit; \
454     register int __e asm("r0"); \
455     unsigned int __ua_flags = uaccess_save_and_enable(); \
456     @@ -273,6 +280,16 @@ static inline void set_fs(mm_segment_t fs)
457     #define user_addr_max() \
458     (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
459    
460     +#ifdef CONFIG_CPU_SPECTRE
461     +/*
462     + * When mitigating Spectre variant 1, it is not worth fixing the non-
463     + * verifying accessors, because we need to add verification of the
464     + * address space there. Force these to use the standard get_user()
465     + * version instead.
466     + */
467     +#define __get_user(x, ptr) get_user(x, ptr)
468     +#else
469     +
470     /*
471     * The "__xxx" versions of the user access functions do not verify the
472     * address space - it must have been done previously with a separate
473     @@ -289,12 +306,6 @@ static inline void set_fs(mm_segment_t fs)
474     __gu_err; \
475     })
476    
477     -#define __get_user_error(x, ptr, err) \
478     -({ \
479     - __get_user_err((x), (ptr), err); \
480     - (void) 0; \
481     -})
482     -
483     #define __get_user_err(x, ptr, err) \
484     do { \
485     unsigned long __gu_addr = (unsigned long)(ptr); \
486     @@ -354,6 +365,7 @@ do { \
487    
488     #define __get_user_asm_word(x, addr, err) \
489     __get_user_asm(x, addr, err, ldr)
490     +#endif
491    
492    
493     #define __put_user_switch(x, ptr, __err, __fn) \
494     diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
495     index ad325a8c7e1e..adb9add28b6f 100644
496     --- a/arch/arm/kernel/Makefile
497     +++ b/arch/arm/kernel/Makefile
498     @@ -30,6 +30,7 @@ else
499     obj-y += entry-armv.o
500     endif
501    
502     +obj-$(CONFIG_MMU) += bugs.o
503     obj-$(CONFIG_CPU_IDLE) += cpuidle.o
504     obj-$(CONFIG_ISA_DMA_API) += dma.o
505     obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
506     diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
507     new file mode 100644
508     index 000000000000..7be511310191
509     --- /dev/null
510     +++ b/arch/arm/kernel/bugs.c
511     @@ -0,0 +1,18 @@
512     +// SPDX-Identifier: GPL-2.0
513     +#include <linux/init.h>
514     +#include <asm/bugs.h>
515     +#include <asm/proc-fns.h>
516     +
517     +void check_other_bugs(void)
518     +{
519     +#ifdef MULTI_CPU
520     + if (processor.check_bugs)
521     + processor.check_bugs();
522     +#endif
523     +}
524     +
525     +void __init check_bugs(void)
526     +{
527     + check_writebuffer_bugs();
528     + check_other_bugs();
529     +}
530     diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
531     index 10c3283d6c19..56be67ecf0fa 100644
532     --- a/arch/arm/kernel/entry-common.S
533     +++ b/arch/arm/kernel/entry-common.S
534     @@ -223,9 +223,7 @@ local_restart:
535     tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
536     bne __sys_trace
537    
538     - cmp scno, #NR_syscalls @ check upper syscall limit
539     - badr lr, ret_fast_syscall @ return address
540     - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
541     + invoke_syscall tbl, scno, r10, ret_fast_syscall
542    
543     add r1, sp, #S_OFF
544     2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
545     @@ -258,14 +256,8 @@ __sys_trace:
546     mov r1, scno
547     add r0, sp, #S_OFF
548     bl syscall_trace_enter
549     -
550     - badr lr, __sys_trace_return @ return address
551     - mov scno, r0 @ syscall number (possibly new)
552     - add r1, sp, #S_R0 + S_OFF @ pointer to regs
553     - cmp scno, #NR_syscalls @ check upper syscall limit
554     - ldmccia r1, {r0 - r6} @ have to reload r0 - r6
555     - stmccia sp, {r4, r5} @ and update the stack args
556     - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
557     + mov scno, r0
558     + invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
559     cmp scno, #-1 @ skip the syscall?
560     bne 2b
561     add sp, sp, #S_OFF @ restore stack
562     @@ -317,6 +309,10 @@ sys_syscall:
563     bic scno, r0, #__NR_OABI_SYSCALL_BASE
564     cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
565     cmpne scno, #NR_syscalls @ check range
566     +#ifdef CONFIG_CPU_SPECTRE
567     + movhs scno, #0
568     + csdb
569     +#endif
570     stmloia sp, {r5, r6} @ shuffle args
571     movlo r0, r1
572     movlo r1, r2
573     diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
574     index e056c9a9aa9d..fa7c6e5c17e7 100644
575     --- a/arch/arm/kernel/entry-header.S
576     +++ b/arch/arm/kernel/entry-header.S
577     @@ -377,6 +377,31 @@
578     #endif
579     .endm
580    
581     + .macro invoke_syscall, table, nr, tmp, ret, reload=0
582     +#ifdef CONFIG_CPU_SPECTRE
583     + mov \tmp, \nr
584     + cmp \tmp, #NR_syscalls @ check upper syscall limit
585     + movcs \tmp, #0
586     + csdb
587     + badr lr, \ret @ return address
588     + .if \reload
589     + add r1, sp, #S_R0 + S_OFF @ pointer to regs
590     + ldmccia r1, {r0 - r6} @ reload r0-r6
591     + stmccia sp, {r4, r5} @ update stack arguments
592     + .endif
593     + ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
594     +#else
595     + cmp \nr, #NR_syscalls @ check upper syscall limit
596     + badr lr, \ret @ return address
597     + .if \reload
598     + add r1, sp, #S_R0 + S_OFF @ pointer to regs
599     + ldmccia r1, {r0 - r6} @ reload r0-r6
600     + stmccia sp, {r4, r5} @ update stack arguments
601     + .endif
602     + ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
603     +#endif
604     + .endm
605     +
606     /*
607     * These are the registers used in the syscall handler, and allow us to
608     * have in theory up to 7 arguments to a function - r0 to r6.
609     diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
610     index 7b8f2141427b..6bee5c9b1133 100644
611     --- a/arch/arm/kernel/signal.c
612     +++ b/arch/arm/kernel/signal.c
613     @@ -107,21 +107,20 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
614     return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
615     }
616    
617     -static int restore_vfp_context(struct vfp_sigframe __user *frame)
618     +static int restore_vfp_context(struct vfp_sigframe __user *auxp)
619     {
620     - unsigned long magic;
621     - unsigned long size;
622     - int err = 0;
623     + struct vfp_sigframe frame;
624     + int err;
625    
626     - __get_user_error(magic, &frame->magic, err);
627     - __get_user_error(size, &frame->size, err);
628     + err = __copy_from_user(&frame, (char __user *) auxp, sizeof(frame));
629    
630     if (err)
631     - return -EFAULT;
632     - if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
633     + return err;
634     +
635     + if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
636     return -EINVAL;
637    
638     - return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
639     + return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
640     }
641    
642     #endif
643     @@ -141,6 +140,7 @@ struct rt_sigframe {
644    
645     static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
646     {
647     + struct sigcontext context;
648     struct aux_sigframe __user *aux;
649     sigset_t set;
650     int err;
651     @@ -149,23 +149,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
652     if (err == 0)
653     set_current_blocked(&set);
654    
655     - __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
656     - __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
657     - __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
658     - __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
659     - __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
660     - __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
661     - __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
662     - __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
663     - __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
664     - __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
665     - __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
666     - __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
667     - __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
668     - __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
669     - __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
670     - __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
671     - __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
672     + err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
673     + if (err == 0) {
674     + regs->ARM_r0 = context.arm_r0;
675     + regs->ARM_r1 = context.arm_r1;
676     + regs->ARM_r2 = context.arm_r2;
677     + regs->ARM_r3 = context.arm_r3;
678     + regs->ARM_r4 = context.arm_r4;
679     + regs->ARM_r5 = context.arm_r5;
680     + regs->ARM_r6 = context.arm_r6;
681     + regs->ARM_r7 = context.arm_r7;
682     + regs->ARM_r8 = context.arm_r8;
683     + regs->ARM_r9 = context.arm_r9;
684     + regs->ARM_r10 = context.arm_r10;
685     + regs->ARM_fp = context.arm_fp;
686     + regs->ARM_ip = context.arm_ip;
687     + regs->ARM_sp = context.arm_sp;
688     + regs->ARM_lr = context.arm_lr;
689     + regs->ARM_pc = context.arm_pc;
690     + regs->ARM_cpsr = context.arm_cpsr;
691     + }
692    
693     err |= !valid_user_regs(regs);
694    
695     diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
696     index 7dd14e8395e6..d2ce37da87d8 100644
697     --- a/arch/arm/kernel/smp.c
698     +++ b/arch/arm/kernel/smp.c
699     @@ -29,6 +29,7 @@
700     #include <linux/irq_work.h>
701    
702     #include <linux/atomic.h>
703     +#include <asm/bugs.h>
704     #include <asm/smp.h>
705     #include <asm/cacheflush.h>
706     #include <asm/cpu.h>
707     @@ -400,6 +401,9 @@ asmlinkage void secondary_start_kernel(void)
708     * before we continue - which happens after __cpu_up returns.
709     */
710     set_cpu_online(cpu, true);
711     +
712     + check_other_bugs();
713     +
714     complete(&cpu_running);
715    
716     local_irq_enable();
717     diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
718     index 9a2f882a0a2d..134f0d432610 100644
719     --- a/arch/arm/kernel/suspend.c
720     +++ b/arch/arm/kernel/suspend.c
721     @@ -1,6 +1,7 @@
722     #include <linux/init.h>
723     #include <linux/slab.h>
724    
725     +#include <asm/bugs.h>
726     #include <asm/cacheflush.h>
727     #include <asm/idmap.h>
728     #include <asm/pgalloc.h>
729     @@ -34,6 +35,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
730     cpu_switch_mm(mm->pgd, mm);
731     local_flush_bp_all();
732     local_flush_tlb_all();
733     + check_other_bugs();
734     }
735    
736     return ret;
737     diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
738     index 5f221acd21ae..640748e27035 100644
739     --- a/arch/arm/kernel/sys_oabi-compat.c
740     +++ b/arch/arm/kernel/sys_oabi-compat.c
741     @@ -328,9 +328,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
742     return -ENOMEM;
743     err = 0;
744     for (i = 0; i < nsops; i++) {
745     - __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
746     - __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
747     - __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
748     + struct oabi_sembuf osb;
749     + err |= __copy_from_user(&osb, tsops, sizeof(osb));
750     + sops[i].sem_num = osb.sem_num;
751     + sops[i].sem_op = osb.sem_op;
752     + sops[i].sem_flg = osb.sem_flg;
753     tsops++;
754     }
755     if (timeout) {
756     diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
757     index 96beb53934c9..64d4a39f4b4b 100644
758     --- a/arch/arm/kvm/hyp/hyp-entry.S
759     +++ b/arch/arm/kvm/hyp/hyp-entry.S
760     @@ -16,6 +16,7 @@
761     * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
762     */
763    
764     +#include <linux/arm-smccc.h>
765     #include <linux/linkage.h>
766     #include <asm/kvm_arm.h>
767     #include <asm/kvm_asm.h>
768     @@ -71,6 +72,90 @@ __kvm_hyp_vector:
769     W(b) hyp_irq
770     W(b) hyp_fiq
771    
772     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
773     + .align 5
774     +__kvm_hyp_vector_ic_inv:
775     + .global __kvm_hyp_vector_ic_inv
776     +
777     + /*
778     + * We encode the exception entry in the bottom 3 bits of
779     + * SP, and we have to guarantee to be 8 bytes aligned.
780     + */
781     + W(add) sp, sp, #1 /* Reset 7 */
782     + W(add) sp, sp, #1 /* Undef 6 */
783     + W(add) sp, sp, #1 /* Syscall 5 */
784     + W(add) sp, sp, #1 /* Prefetch abort 4 */
785     + W(add) sp, sp, #1 /* Data abort 3 */
786     + W(add) sp, sp, #1 /* HVC 2 */
787     + W(add) sp, sp, #1 /* IRQ 1 */
788     + W(nop) /* FIQ 0 */
789     +
790     + mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
791     + isb
792     +
793     + b decode_vectors
794     +
795     + .align 5
796     +__kvm_hyp_vector_bp_inv:
797     + .global __kvm_hyp_vector_bp_inv
798     +
799     + /*
800     + * We encode the exception entry in the bottom 3 bits of
801     + * SP, and we have to guarantee to be 8 bytes aligned.
802     + */
803     + W(add) sp, sp, #1 /* Reset 7 */
804     + W(add) sp, sp, #1 /* Undef 6 */
805     + W(add) sp, sp, #1 /* Syscall 5 */
806     + W(add) sp, sp, #1 /* Prefetch abort 4 */
807     + W(add) sp, sp, #1 /* Data abort 3 */
808     + W(add) sp, sp, #1 /* HVC 2 */
809     + W(add) sp, sp, #1 /* IRQ 1 */
810     + W(nop) /* FIQ 0 */
811     +
812     + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
813     + isb
814     +
815     +decode_vectors:
816     +
817     +#ifdef CONFIG_THUMB2_KERNEL
818     + /*
819     + * Yet another silly hack: Use VPIDR as a temp register.
820     + * Thumb2 is really a pain, as SP cannot be used with most
821     + * of the bitwise instructions. The vect_br macro ensures
822     + * things gets cleaned-up.
823     + */
824     + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
825     + mov r0, sp
826     + and r0, r0, #7
827     + sub sp, sp, r0
828     + push {r1, r2}
829     + mov r1, r0
830     + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
831     + mrc p15, 0, r2, c0, c0, 0 /* MIDR */
832     + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
833     +#endif
834     +
835     +.macro vect_br val, targ
836     +ARM( eor sp, sp, #\val )
837     +ARM( tst sp, #7 )
838     +ARM( eorne sp, sp, #\val )
839     +
840     +THUMB( cmp r1, #\val )
841     +THUMB( popeq {r1, r2} )
842     +
843     + beq \targ
844     +.endm
845     +
846     + vect_br 0, hyp_fiq
847     + vect_br 1, hyp_irq
848     + vect_br 2, hyp_hvc
849     + vect_br 3, hyp_dabt
850     + vect_br 4, hyp_pabt
851     + vect_br 5, hyp_svc
852     + vect_br 6, hyp_undef
853     + vect_br 7, hyp_reset
854     +#endif
855     +
856     .macro invalid_vector label, cause
857     .align
858     \label: mov r0, #\cause
859     @@ -118,7 +203,7 @@ hyp_hvc:
860     lsr r2, r2, #16
861     and r2, r2, #0xff
862     cmp r2, #0
863     - bne guest_trap @ Guest called HVC
864     + bne guest_hvc_trap @ Guest called HVC
865    
866     /*
867     * Getting here means host called HVC, we shift parameters and branch
868     @@ -131,7 +216,14 @@ hyp_hvc:
869     mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
870     beq 1f
871    
872     - push {lr}
873     + /*
874     + * Pushing r2 here is just a way of keeping the stack aligned to
875     + * 8 bytes on any path that can trigger a HYP exception. Here,
876     + * we may well be about to jump into the guest, and the guest
877     + * exit would otherwise be badly decoded by our fancy
878     + * "decode-exception-without-a-branch" code...
879     + */
880     + push {r2, lr}
881    
882     mov lr, r0
883     mov r0, r1
884     @@ -141,9 +233,23 @@ hyp_hvc:
885     THUMB( orr lr, #1)
886     blx lr @ Call the HYP function
887    
888     - pop {lr}
889     + pop {r2, lr}
890     1: eret
891    
892     +guest_hvc_trap:
893     + movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
894     + movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
895     + ldr r0, [sp] @ Guest's r0
896     + teq r0, r2
897     + bne guest_trap
898     + add sp, sp, #12
899     + @ Returns:
900     + @ r0 = 0
901     + @ r1 = HSR value (perfectly predictable)
902     + @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
903     + mov r0, #0
904     + eret
905     +
906     guest_trap:
907     load_vcpu r0 @ Load VCPU pointer to r0
908    
909     diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
910     index 7a4b06049001..a826df3d3814 100644
911     --- a/arch/arm/lib/copy_from_user.S
912     +++ b/arch/arm/lib/copy_from_user.S
913     @@ -90,6 +90,15 @@
914     .text
915    
916     ENTRY(arm_copy_from_user)
917     +#ifdef CONFIG_CPU_SPECTRE
918     + get_thread_info r3
919     + ldr r3, [r3, #TI_ADDR_LIMIT]
920     + adds ip, r1, r2 @ ip=addr+size
921     + sub r3, r3, #1 @ addr_limit - 1
922     + cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
923     + movcs r1, #0 @ addr = NULL
924     + csdb
925     +#endif
926    
927     #include "copy_template.S"
928    
929     diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
930     index c1799dd1d0d9..7f3760fa9c15 100644
931     --- a/arch/arm/mm/Kconfig
932     +++ b/arch/arm/mm/Kconfig
933     @@ -396,6 +396,7 @@ config CPU_V7
934     select CPU_CP15_MPU if !MMU
935     select CPU_HAS_ASID if MMU
936     select CPU_PABRT_V7
937     + select CPU_SPECTRE if MMU
938     select CPU_TLB_V7 if MMU
939    
940     # ARMv7M
941     @@ -800,6 +801,28 @@ config CPU_BPREDICT_DISABLE
942     help
943     Say Y here to disable branch prediction. If unsure, say N.
944    
945     +config CPU_SPECTRE
946     + bool
947     +
948     +config HARDEN_BRANCH_PREDICTOR
949     + bool "Harden the branch predictor against aliasing attacks" if EXPERT
950     + depends on CPU_SPECTRE
951     + default y
952     + help
953     + Speculation attacks against some high-performance processors rely
954     + on being able to manipulate the branch predictor for a victim
955     + context by executing aliasing branches in the attacker context.
956     + Such attacks can be partially mitigated against by clearing
957     + internal branch predictor state and limiting the prediction
958     + logic in some situations.
959     +
960     + This config option will take CPU-specific actions to harden
961     + the branch predictor against aliasing attacks and may rely on
962     + specific instruction sequences or control bits being set by
963     + the system firmware.
964     +
965     + If unsure, say Y.
966     +
967     config TLS_REG_EMUL
968     bool
969     select NEED_KUSER_HELPERS
970     diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
971     index e8698241ece9..92d47c8cbbc3 100644
972     --- a/arch/arm/mm/Makefile
973     +++ b/arch/arm/mm/Makefile
974     @@ -94,7 +94,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
975     obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
976     obj-$(CONFIG_CPU_V6) += proc-v6.o
977     obj-$(CONFIG_CPU_V6K) += proc-v6.o
978     -obj-$(CONFIG_CPU_V7) += proc-v7.o
979     +obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
980     obj-$(CONFIG_CPU_V7M) += proc-v7m.o
981    
982     AFLAGS_proc-v6.o :=-Wa,-march=armv6
983     diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
984     index f7861dc83182..5ca207ada852 100644
985     --- a/arch/arm/mm/fault.c
986     +++ b/arch/arm/mm/fault.c
987     @@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
988     {
989     struct siginfo si;
990    
991     + if (addr > TASK_SIZE)
992     + harden_branch_predictor();
993     +
994     #ifdef CONFIG_DEBUG_USER
995     if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
996     ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
997     diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
998     index 0d40c285bd86..7d9176c4a21d 100644
999     --- a/arch/arm/mm/proc-macros.S
1000     +++ b/arch/arm/mm/proc-macros.S
1001     @@ -274,13 +274,14 @@
1002     mcr p15, 0, ip, c7, c10, 4 @ data write barrier
1003     .endm
1004    
1005     -.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
1006     +.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
1007     .type \name\()_processor_functions, #object
1008     .align 2
1009     ENTRY(\name\()_processor_functions)
1010     .word \dabort
1011     .word \pabort
1012     .word cpu_\name\()_proc_init
1013     + .word \bugs
1014     .word cpu_\name\()_proc_fin
1015     .word cpu_\name\()_reset
1016     .word cpu_\name\()_do_idle
1017     diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
1018     index c6141a5435c3..f8d45ad2a515 100644
1019     --- a/arch/arm/mm/proc-v7-2level.S
1020     +++ b/arch/arm/mm/proc-v7-2level.S
1021     @@ -41,11 +41,6 @@
1022     * even on Cortex-A8 revisions not affected by 430973.
1023     * If IBE is not set, the flush BTAC/BTB won't do anything.
1024     */
1025     -ENTRY(cpu_ca8_switch_mm)
1026     -#ifdef CONFIG_MMU
1027     - mov r2, #0
1028     - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
1029     -#endif
1030     ENTRY(cpu_v7_switch_mm)
1031     #ifdef CONFIG_MMU
1032     mmid r1, r1 @ get mm->context.id
1033     @@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
1034     #endif
1035     bx lr
1036     ENDPROC(cpu_v7_switch_mm)
1037     -ENDPROC(cpu_ca8_switch_mm)
1038    
1039     /*
1040     * cpu_v7_set_pte_ext(ptep, pte)
1041     diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
1042     new file mode 100644
1043     index 000000000000..5544b82a2e7a
1044     --- /dev/null
1045     +++ b/arch/arm/mm/proc-v7-bugs.c
1046     @@ -0,0 +1,174 @@
1047     +// SPDX-License-Identifier: GPL-2.0
1048     +#include <linux/arm-smccc.h>
1049     +#include <linux/kernel.h>
1050     +#include <linux/psci.h>
1051     +#include <linux/smp.h>
1052     +
1053     +#include <asm/cp15.h>
1054     +#include <asm/cputype.h>
1055     +#include <asm/proc-fns.h>
1056     +#include <asm/system_misc.h>
1057     +
1058     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1059     +DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
1060     +
1061     +extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1062     +extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1063     +extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1064     +extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
1065     +
1066     +static void harden_branch_predictor_bpiall(void)
1067     +{
1068     + write_sysreg(0, BPIALL);
1069     +}
1070     +
1071     +static void harden_branch_predictor_iciallu(void)
1072     +{
1073     + write_sysreg(0, ICIALLU);
1074     +}
1075     +
1076     +static void __maybe_unused call_smc_arch_workaround_1(void)
1077     +{
1078     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1079     +}
1080     +
1081     +static void __maybe_unused call_hvc_arch_workaround_1(void)
1082     +{
1083     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
1084     +}
1085     +
1086     +static void cpu_v7_spectre_init(void)
1087     +{
1088     + const char *spectre_v2_method = NULL;
1089     + int cpu = smp_processor_id();
1090     +
1091     + if (per_cpu(harden_branch_predictor_fn, cpu))
1092     + return;
1093     +
1094     + switch (read_cpuid_part()) {
1095     + case ARM_CPU_PART_CORTEX_A8:
1096     + case ARM_CPU_PART_CORTEX_A9:
1097     + case ARM_CPU_PART_CORTEX_A12:
1098     + case ARM_CPU_PART_CORTEX_A17:
1099     + case ARM_CPU_PART_CORTEX_A73:
1100     + case ARM_CPU_PART_CORTEX_A75:
1101     + if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
1102     + goto bl_error;
1103     + per_cpu(harden_branch_predictor_fn, cpu) =
1104     + harden_branch_predictor_bpiall;
1105     + spectre_v2_method = "BPIALL";
1106     + break;
1107     +
1108     + case ARM_CPU_PART_CORTEX_A15:
1109     + case ARM_CPU_PART_BRAHMA_B15:
1110     + if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
1111     + goto bl_error;
1112     + per_cpu(harden_branch_predictor_fn, cpu) =
1113     + harden_branch_predictor_iciallu;
1114     + spectre_v2_method = "ICIALLU";
1115     + break;
1116     +
1117     +#ifdef CONFIG_ARM_PSCI
1118     + default:
1119     + /* Other ARM CPUs require no workaround */
1120     + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
1121     + break;
1122     + /* fallthrough */
1123     + /* Cortex A57/A72 require firmware workaround */
1124     + case ARM_CPU_PART_CORTEX_A57:
1125     + case ARM_CPU_PART_CORTEX_A72: {
1126     + struct arm_smccc_res res;
1127     +
1128     + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1129     + break;
1130     +
1131     + switch (psci_ops.conduit) {
1132     + case PSCI_CONDUIT_HVC:
1133     + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1134     + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1135     + if ((int)res.a0 != 0)
1136     + break;
1137     + if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
1138     + goto bl_error;
1139     + per_cpu(harden_branch_predictor_fn, cpu) =
1140     + call_hvc_arch_workaround_1;
1141     + processor.switch_mm = cpu_v7_hvc_switch_mm;
1142     + spectre_v2_method = "hypervisor";
1143     + break;
1144     +
1145     + case PSCI_CONDUIT_SMC:
1146     + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1147     + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1148     + if ((int)res.a0 != 0)
1149     + break;
1150     + if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
1151     + goto bl_error;
1152     + per_cpu(harden_branch_predictor_fn, cpu) =
1153     + call_smc_arch_workaround_1;
1154     + processor.switch_mm = cpu_v7_smc_switch_mm;
1155     + spectre_v2_method = "firmware";
1156     + break;
1157     +
1158     + default:
1159     + break;
1160     + }
1161     + }
1162     +#endif
1163     + }
1164     +
1165     + if (spectre_v2_method)
1166     + pr_info("CPU%u: Spectre v2: using %s workaround\n",
1167     + smp_processor_id(), spectre_v2_method);
1168     + return;
1169     +
1170     +bl_error:
1171     + pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
1172     + cpu);
1173     +}
1174     +#else
1175     +static void cpu_v7_spectre_init(void)
1176     +{
1177     +}
1178     +#endif
1179     +
1180     +static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
1181     + u32 mask, const char *msg)
1182     +{
1183     + u32 aux_cr;
1184     +
1185     + asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
1186     +
1187     + if ((aux_cr & mask) != mask) {
1188     + if (!*warned)
1189     + pr_err("CPU%u: %s", smp_processor_id(), msg);
1190     + *warned = true;
1191     + return false;
1192     + }
1193     + return true;
1194     +}
1195     +
1196     +static DEFINE_PER_CPU(bool, spectre_warned);
1197     +
1198     +static bool check_spectre_auxcr(bool *warned, u32 bit)
1199     +{
1200     + return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
1201     + cpu_v7_check_auxcr_set(warned, bit,
1202     + "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
1203     +}
1204     +
1205     +void cpu_v7_ca8_ibe(void)
1206     +{
1207     + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
1208     + cpu_v7_spectre_init();
1209     +}
1210     +
1211     +void cpu_v7_ca15_ibe(void)
1212     +{
1213     + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
1214     + cpu_v7_spectre_init();
1215     +}
1216     +
1217     +void cpu_v7_bugs_init(void)
1218     +{
1219     + cpu_v7_spectre_init();
1220     +}
1221     diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
1222     index d00d52c9de3e..850c22bca19c 100644
1223     --- a/arch/arm/mm/proc-v7.S
1224     +++ b/arch/arm/mm/proc-v7.S
1225     @@ -9,6 +9,7 @@
1226     *
1227     * This is the "shell" of the ARMv7 processor support.
1228     */
1229     +#include <linux/arm-smccc.h>
1230     #include <linux/init.h>
1231     #include <linux/linkage.h>
1232     #include <asm/assembler.h>
1233     @@ -88,6 +89,37 @@ ENTRY(cpu_v7_dcache_clean_area)
1234     ret lr
1235     ENDPROC(cpu_v7_dcache_clean_area)
1236    
1237     +#ifdef CONFIG_ARM_PSCI
1238     + .arch_extension sec
1239     +ENTRY(cpu_v7_smc_switch_mm)
1240     + stmfd sp!, {r0 - r3}
1241     + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1242     + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1243     + smc #0
1244     + ldmfd sp!, {r0 - r3}
1245     + b cpu_v7_switch_mm
1246     +ENDPROC(cpu_v7_smc_switch_mm)
1247     + .arch_extension virt
1248     +ENTRY(cpu_v7_hvc_switch_mm)
1249     + stmfd sp!, {r0 - r3}
1250     + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1251     + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1252     + hvc #0
1253     + ldmfd sp!, {r0 - r3}
1254     + b cpu_v7_switch_mm
1255     +ENDPROC(cpu_v7_hvc_switch_mm)
1256     +#endif
1257     +ENTRY(cpu_v7_iciallu_switch_mm)
1258     + mov r3, #0
1259     + mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
1260     + b cpu_v7_switch_mm
1261     +ENDPROC(cpu_v7_iciallu_switch_mm)
1262     +ENTRY(cpu_v7_bpiall_switch_mm)
1263     + mov r3, #0
1264     + mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
1265     + b cpu_v7_switch_mm
1266     +ENDPROC(cpu_v7_bpiall_switch_mm)
1267     +
1268     string cpu_v7_name, "ARMv7 Processor"
1269     .align
1270    
1271     @@ -153,31 +185,6 @@ ENTRY(cpu_v7_do_resume)
1272     ENDPROC(cpu_v7_do_resume)
1273     #endif
1274    
1275     -/*
1276     - * Cortex-A8
1277     - */
1278     - globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1279     - globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1280     - globl_equ cpu_ca8_reset, cpu_v7_reset
1281     - globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1282     - globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1283     - globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1284     - globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1285     -#ifdef CONFIG_ARM_CPU_SUSPEND
1286     - globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1287     - globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1288     -#endif
1289     -
1290     -/*
1291     - * Cortex-A9 processor functions
1292     - */
1293     - globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1294     - globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1295     - globl_equ cpu_ca9mp_reset, cpu_v7_reset
1296     - globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1297     - globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1298     - globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1299     - globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1300     .globl cpu_ca9mp_suspend_size
1301     .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
1302     #ifdef CONFIG_ARM_CPU_SUSPEND
1303     @@ -541,12 +548,79 @@ __v7_setup_stack:
1304    
1305     __INITDATA
1306    
1307     + .weak cpu_v7_bugs_init
1308     +
1309     @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
1310     - define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1311     + define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1312     +
1313     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1314     + @ generic v7 bpiall on context switch
1315     + globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
1316     + globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
1317     + globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
1318     + globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
1319     + globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
1320     + globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
1321     + globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
1322     +#ifdef CONFIG_ARM_CPU_SUSPEND
1323     + globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
1324     + globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
1325     +#endif
1326     + define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1327     +
1328     +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
1329     +#else
1330     +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
1331     +#endif
1332     +
1333     #ifndef CONFIG_ARM_LPAE
1334     - define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1335     - define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1336     + @ Cortex-A8 - always needs bpiall switch_mm implementation
1337     + globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1338     + globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1339     + globl_equ cpu_ca8_reset, cpu_v7_reset
1340     + globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1341     + globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1342     + globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1343     + globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
1344     + globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1345     +#ifdef CONFIG_ARM_CPU_SUSPEND
1346     + globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1347     + globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1348     #endif
1349     + define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
1350     +
1351     + @ Cortex-A9 - needs more registers preserved across suspend/resume
1352     + @ and bpiall switch_mm for hardening
1353     + globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1354     + globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1355     + globl_equ cpu_ca9mp_reset, cpu_v7_reset
1356     + globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1357     + globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1358     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1359     + globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
1360     +#else
1361     + globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1362     +#endif
1363     + globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1364     + define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1365     +#endif
1366     +
1367     + @ Cortex-A15 - needs iciallu switch_mm for hardening
1368     + globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
1369     + globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
1370     + globl_equ cpu_ca15_reset, cpu_v7_reset
1371     + globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
1372     + globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
1373     +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1374     + globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
1375     +#else
1376     + globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
1377     +#endif
1378     + globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
1379     + globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
1380     + globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
1381     + globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
1382     + define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
1383     #ifdef CONFIG_CPU_PJ4B
1384     define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1385     #endif
1386     @@ -653,7 +727,7 @@ __v7_ca7mp_proc_info:
1387     __v7_ca12mp_proc_info:
1388     .long 0x410fc0d0
1389     .long 0xff0ffff0
1390     - __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
1391     + __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1392     .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
1393    
1394     /*
1395     @@ -663,7 +737,7 @@ __v7_ca12mp_proc_info:
1396     __v7_ca15mp_proc_info:
1397     .long 0x410fc0f0
1398     .long 0xff0ffff0
1399     - __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
1400     + __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
1401     .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
1402    
1403     /*
1404     @@ -673,7 +747,7 @@ __v7_ca15mp_proc_info:
1405     __v7_b15mp_proc_info:
1406     .long 0x420f00f0
1407     .long 0xff0ffff0
1408     - __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
1409     + __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
1410     .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
1411    
1412     /*
1413     @@ -683,9 +757,25 @@ __v7_b15mp_proc_info:
1414     __v7_ca17mp_proc_info:
1415     .long 0x410fc0e0
1416     .long 0xff0ffff0
1417     - __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
1418     + __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1419     .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
1420    
1421     + /* ARM Ltd. Cortex A73 processor */
1422     + .type __v7_ca73_proc_info, #object
1423     +__v7_ca73_proc_info:
1424     + .long 0x410fd090
1425     + .long 0xff0ffff0
1426     + __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1427     + .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
1428     +
1429     + /* ARM Ltd. Cortex A75 processor */
1430     + .type __v7_ca75_proc_info, #object
1431     +__v7_ca75_proc_info:
1432     + .long 0x410fd0a0
1433     + .long 0xff0ffff0
1434     + __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1435     + .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
1436     +
1437     /*
1438     * Qualcomm Inc. Krait processors.
1439     */
1440     diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
1441     index 5629d7580973..8e5e97989fda 100644
1442     --- a/arch/arm/vfp/vfpmodule.c
1443     +++ b/arch/arm/vfp/vfpmodule.c
1444     @@ -597,13 +597,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
1445     }
1446    
1447     /* Sanitise and restore the current VFP state from the provided structures. */
1448     -int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1449     - struct user_vfp_exc __user *ufp_exc)
1450     +int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
1451     {
1452     struct thread_info *thread = current_thread_info();
1453     struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
1454     unsigned long fpexc;
1455     - int err = 0;
1456    
1457     /* Disable VFP to avoid corrupting the new thread state. */
1458     vfp_flush_hwstate(thread);
1459     @@ -612,17 +610,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1460     * Copy the floating point registers. There can be unused
1461     * registers see asm/hwcap.h for details.
1462     */
1463     - err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
1464     - sizeof(hwstate->fpregs));
1465     + memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
1466     /*
1467     * Copy the status and control register.
1468     */
1469     - __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
1470     + hwstate->fpscr = ufp->fpscr;
1471    
1472     /*
1473     * Sanitise and restore the exception registers.
1474     */
1475     - __get_user_error(fpexc, &ufp_exc->fpexc, err);
1476     + fpexc = ufp_exc->fpexc;
1477    
1478     /* Ensure the VFP is enabled. */
1479     fpexc |= FPEXC_EN;
1480     @@ -631,10 +628,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1481     fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
1482     hwstate->fpexc = fpexc;
1483    
1484     - __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
1485     - __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
1486     + hwstate->fpinst = ufp_exc->fpinst;
1487     + hwstate->fpinst2 = ufp_exc->fpinst2;
1488    
1489     - return err ? -EFAULT : 0;
1490     + return 0;
1491     }
1492    
1493     /*
1494     diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
1495     index c98e7e849f06..8550408735a0 100644
1496     --- a/arch/arm64/crypto/sha1-ce-core.S
1497     +++ b/arch/arm64/crypto/sha1-ce-core.S
1498     @@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
1499     ldr dgb, [x0, #16]
1500    
1501     /* load sha1_ce_state::finalize */
1502     - ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
1503     + ldr_l w4, sha1_ce_offsetof_finalize, x4
1504     + ldr w4, [x0, x4]
1505    
1506     /* load input */
1507     0: ld1 {v8.4s-v11.4s}, [x1], #64
1508     @@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
1509     * the padding is handled by the C code in that case.
1510     */
1511     cbz x4, 3f
1512     - ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
1513     + ldr_l w4, sha1_ce_offsetof_count, x4
1514     + ldr x4, [x0, x4]
1515     movi v9.2d, #0
1516     mov x8, #0x80000000
1517     movi v10.2d, #0
1518     diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
1519     index aefda9868627..ea319c055f5d 100644
1520     --- a/arch/arm64/crypto/sha1-ce-glue.c
1521     +++ b/arch/arm64/crypto/sha1-ce-glue.c
1522     @@ -17,9 +17,6 @@
1523     #include <linux/crypto.h>
1524     #include <linux/module.h>
1525    
1526     -#define ASM_EXPORT(sym, val) \
1527     - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
1528     -
1529     MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
1530     MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
1531     MODULE_LICENSE("GPL v2");
1532     @@ -32,6 +29,9 @@ struct sha1_ce_state {
1533     asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
1534     int blocks);
1535    
1536     +const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
1537     +const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
1538     +
1539     static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
1540     unsigned int len)
1541     {
1542     @@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
1543     struct sha1_ce_state *sctx = shash_desc_ctx(desc);
1544     bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
1545    
1546     - ASM_EXPORT(sha1_ce_offsetof_count,
1547     - offsetof(struct sha1_ce_state, sst.count));
1548     - ASM_EXPORT(sha1_ce_offsetof_finalize,
1549     - offsetof(struct sha1_ce_state, finalize));
1550     -
1551     /*
1552     * Allow the asm code to perform the finalization if there is no
1553     * partial data and the input is a round multiple of the block size.
1554     diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
1555     index 01cfee066837..679c6c002f4f 100644
1556     --- a/arch/arm64/crypto/sha2-ce-core.S
1557     +++ b/arch/arm64/crypto/sha2-ce-core.S
1558     @@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
1559     ld1 {dgav.4s, dgbv.4s}, [x0]
1560    
1561     /* load sha256_ce_state::finalize */
1562     - ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
1563     + ldr_l w4, sha256_ce_offsetof_finalize, x4
1564     + ldr w4, [x0, x4]
1565    
1566     /* load input */
1567     0: ld1 {v16.4s-v19.4s}, [x1], #64
1568     @@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
1569     * the padding is handled by the C code in that case.
1570     */
1571     cbz x4, 3f
1572     - ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
1573     + ldr_l w4, sha256_ce_offsetof_count, x4
1574     + ldr x4, [x0, x4]
1575     movi v17.2d, #0
1576     mov x8, #0x80000000
1577     movi v18.2d, #0
1578     diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
1579     index 7cd587564a41..0ed9486f75dd 100644
1580     --- a/arch/arm64/crypto/sha2-ce-glue.c
1581     +++ b/arch/arm64/crypto/sha2-ce-glue.c
1582     @@ -17,9 +17,6 @@
1583     #include <linux/crypto.h>
1584     #include <linux/module.h>
1585    
1586     -#define ASM_EXPORT(sym, val) \
1587     - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
1588     -
1589     MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
1590     MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
1591     MODULE_LICENSE("GPL v2");
1592     @@ -32,6 +29,11 @@ struct sha256_ce_state {
1593     asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
1594     int blocks);
1595    
1596     +const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
1597     + sst.count);
1598     +const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
1599     + finalize);
1600     +
1601     static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
1602     unsigned int len)
1603     {
1604     @@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
1605     struct sha256_ce_state *sctx = shash_desc_ctx(desc);
1606     bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
1607    
1608     - ASM_EXPORT(sha256_ce_offsetof_count,
1609     - offsetof(struct sha256_ce_state, sst.count));
1610     - ASM_EXPORT(sha256_ce_offsetof_finalize,
1611     - offsetof(struct sha256_ce_state, finalize));
1612     -
1613     /*
1614     * Allow the asm code to perform the finalization if there is no
1615     * partial data and the input is a round multiple of the block size.
1616     diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
1617     index a9e54aad15ef..65615820155e 100644
1618     --- a/arch/arm64/include/asm/efi.h
1619     +++ b/arch/arm64/include/asm/efi.h
1620     @@ -54,6 +54,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
1621     #define alloc_screen_info(x...) &screen_info
1622     #define free_screen_info(x...)
1623    
1624     +/* redeclare as 'hidden' so the compiler will generate relative references */
1625     +extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
1626     +
1627     static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
1628     {
1629     }
1630     diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
1631     index 1d047d6c421b..f5cd96c60eb9 100644
1632     --- a/arch/arm64/include/asm/uaccess.h
1633     +++ b/arch/arm64/include/asm/uaccess.h
1634     @@ -198,7 +198,7 @@ do { \
1635     (err), ARM64_HAS_UAO); \
1636     break; \
1637     case 8: \
1638     - __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
1639     + __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
1640     (err), ARM64_HAS_UAO); \
1641     break; \
1642     default: \
1643     @@ -272,7 +272,7 @@ do { \
1644     (err), ARM64_HAS_UAO); \
1645     break; \
1646     case 8: \
1647     - __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
1648     + __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
1649     (err), ARM64_HAS_UAO); \
1650     break; \
1651     default: \
1652     diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
1653     index 3686d6abafde..9edda5466020 100644
1654     --- a/arch/ia64/kernel/Makefile
1655     +++ b/arch/ia64/kernel/Makefile
1656     @@ -50,32 +50,10 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
1657     # The gate DSO image is built using a special linker script.
1658     include $(src)/Makefile.gate
1659    
1660     -# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
1661     -define sed-y
1662     - "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
1663     -endef
1664     -quiet_cmd_nr_irqs = GEN $@
1665     -define cmd_nr_irqs
1666     - (set -e; \
1667     - echo "#ifndef __ASM_NR_IRQS_H__"; \
1668     - echo "#define __ASM_NR_IRQS_H__"; \
1669     - echo "/*"; \
1670     - echo " * DO NOT MODIFY."; \
1671     - echo " *"; \
1672     - echo " * This file was generated by Kbuild"; \
1673     - echo " *"; \
1674     - echo " */"; \
1675     - echo ""; \
1676     - sed -ne $(sed-y) $<; \
1677     - echo ""; \
1678     - echo "#endif" ) > $@
1679     -endef
1680     -
1681     # We use internal kbuild rules to avoid the "is up to date" message from make
1682     arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
1683     $(Q)mkdir -p $(dir $@)
1684     $(call if_changed_dep,cc_s_c)
1685    
1686     -include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
1687     - $(Q)mkdir -p $(dir $@)
1688     - $(call cmd,nr_irqs)
1689     +include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
1690     + $(call filechk,offsets,__ASM_NR_IRQS_H__)
1691     diff --git a/arch/x86/Makefile b/arch/x86/Makefile
1692     index f408babdf746..b5226a009973 100644
1693     --- a/arch/x86/Makefile
1694     +++ b/arch/x86/Makefile
1695     @@ -11,6 +11,16 @@ else
1696     KBUILD_DEFCONFIG := $(ARCH)_defconfig
1697     endif
1698    
1699     +# For gcc stack alignment is specified with -mpreferred-stack-boundary,
1700     +# clang has the option -mstack-alignment for that purpose.
1701     +ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
1702     + cc_stack_align4 := -mpreferred-stack-boundary=2
1703     + cc_stack_align8 := -mpreferred-stack-boundary=3
1704     +else ifneq ($(call cc-option, -mstack-alignment=16),)
1705     + cc_stack_align4 := -mstack-alignment=4
1706     + cc_stack_align8 := -mstack-alignment=8
1707     +endif
1708     +
1709     # How to compile the 16-bit code. Note we always compile for -march=i386;
1710     # that way we can complain to the user if the CPU is insufficient.
1711     #
1712     @@ -24,10 +34,11 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
1713     -DDISABLE_BRANCH_PROFILING \
1714     -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
1715     -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
1716     - -mno-mmx -mno-sse \
1717     - $(call cc-option, -ffreestanding) \
1718     - $(call cc-option, -fno-stack-protector) \
1719     - $(call cc-option, -mpreferred-stack-boundary=2)
1720     + -mno-mmx -mno-sse
1721     +
1722     +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
1723     +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
1724     +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
1725     export REALMODE_CFLAGS
1726    
1727     # BITS is used as extension for files which are available in a 32 bit
1728     @@ -64,8 +75,10 @@ ifeq ($(CONFIG_X86_32),y)
1729     # with nonstandard options
1730     KBUILD_CFLAGS += -fno-pic
1731    
1732     - # prevent gcc from keeping the stack 16 byte aligned
1733     - KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
1734     + # Align the stack to the register width instead of using the default
1735     + # alignment of 16 bytes. This reduces stack usage and the number of
1736     + # alignment instructions.
1737     + KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
1738    
1739     # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
1740     # a lot more stack due to the lack of sharing of stacklots:
1741     @@ -88,17 +101,23 @@ else
1742     KBUILD_CFLAGS += -m64
1743    
1744     # Align jump targets to 1 byte, not the default 16 bytes:
1745     - KBUILD_CFLAGS += -falign-jumps=1
1746     + KBUILD_CFLAGS += $(call cc-option,-falign-jumps=1)
1747    
1748     # Pack loops tightly as well:
1749     - KBUILD_CFLAGS += -falign-loops=1
1750     + KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
1751    
1752     # Don't autogenerate traditional x87 instructions
1753     KBUILD_CFLAGS += $(call cc-option,-mno-80387)
1754     KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
1755    
1756     - # Use -mpreferred-stack-boundary=3 if supported.
1757     - KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
1758     + # By default gcc and clang use a stack alignment of 16 bytes for x86.
1759     + # However the standard kernel entry on x86-64 leaves the stack on an
1760     + # 8-byte boundary. If the compiler isn't informed about the actual
1761     + # alignment it will generate extra alignment instructions for the
1762     + # default alignment which keep the stack *mis*aligned.
1763     + # Furthermore an alignment to the register width reduces stack usage
1764     + # and the number of alignment instructions.
1765     + KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
1766    
1767     # Use -mskip-rax-setup if supported.
1768     KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
1769     diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
1770     index 9e240fcba784..08dfce02362c 100644
1771     --- a/arch/x86/boot/string.c
1772     +++ b/arch/x86/boot/string.c
1773     @@ -16,6 +16,15 @@
1774     #include "ctype.h"
1775     #include "string.h"
1776    
1777     +/*
1778     + * Undef these macros so that the functions that we provide
1779     + * here will have the correct names regardless of how string.h
1780     + * may have chosen to #define them.
1781     + */
1782     +#undef memcpy
1783     +#undef memset
1784     +#undef memcmp
1785     +
1786     int memcmp(const void *s1, const void *s2, size_t len)
1787     {
1788     bool diff;
1789     diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1790     index a916c4a61165..5f6a5af9c489 100644
1791     --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1792     +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
1793     @@ -65,7 +65,6 @@
1794     #include <linux/linkage.h>
1795     #include <asm/inst.h>
1796    
1797     -#define CONCAT(a,b) a##b
1798     #define VMOVDQ vmovdqu
1799    
1800     #define xdata0 %xmm0
1801     @@ -92,8 +91,6 @@
1802     #define num_bytes %r8
1803    
1804     #define tmp %r10
1805     -#define DDQ(i) CONCAT(ddq_add_,i)
1806     -#define XMM(i) CONCAT(%xmm, i)
1807     #define DDQ_DATA 0
1808     #define XDATA 1
1809     #define KEY_128 1
1810     @@ -131,12 +128,12 @@ ddq_add_8:
1811     /* generate a unique variable for ddq_add_x */
1812    
1813     .macro setddq n
1814     - var_ddq_add = DDQ(\n)
1815     + var_ddq_add = ddq_add_\n
1816     .endm
1817    
1818     /* generate a unique variable for xmm register */
1819     .macro setxdata n
1820     - var_xdata = XMM(\n)
1821     + var_xdata = %xmm\n
1822     .endm
1823    
1824     /* club the numeric 'id' to the symbol 'name' */
1825     diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
1826     index 5e23e2d305e7..2cd9496eb696 100644
1827     --- a/drivers/firmware/efi/libstub/Makefile
1828     +++ b/drivers/firmware/efi/libstub/Makefile
1829     @@ -10,8 +10,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
1830     -fPIC -fno-strict-aliasing -mno-red-zone \
1831     -mno-mmx -mno-sse
1832    
1833     -cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
1834     -cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
1835     +cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
1836     +cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
1837     -fno-builtin -fpic -mno-single-pic-base
1838    
1839     cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
1840     @@ -60,7 +60,7 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
1841     extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
1842     lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
1843    
1844     -STUBCOPY_FLAGS-y := -R .debug* -R *ksymtab* -R *kcrctab*
1845     +STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
1846     STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
1847     --prefix-symbols=__efistub_
1848     STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
1849     @@ -68,17 +68,25 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
1850     $(obj)/%.stub.o: $(obj)/%.o FORCE
1851     $(call if_changed,stubcopy)
1852    
1853     +#
1854     +# Strip debug sections and some other sections that may legally contain
1855     +# absolute relocations, so that we can inspect the remaining sections for
1856     +# such relocations. If none are found, regenerate the output object, but
1857     +# this time, use objcopy and leave all sections in place.
1858     +#
1859     quiet_cmd_stubcopy = STUBCPY $@
1860     - cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then \
1861     - $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \
1862     - && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
1863     - rm -f $@; /bin/false); else /bin/false; fi
1864     + cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
1865     + then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
1866     + then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
1867     + rm -f $@; /bin/false); \
1868     + else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
1869     + else /bin/false; fi
1870    
1871     #
1872     # ARM discards the .data section because it disallows r/w data in the
1873     # decompressor. So move our .data to .data.efistub, which is preserved
1874     # explicitly by the decompressor linker script.
1875     #
1876     -STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
1877     - -R ___ksymtab+sort -R ___kcrctab+sort
1878     +STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
1879     +STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort
1880     STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
1881     diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
1882     index eae693eb3e91..959d9b8d4845 100644
1883     --- a/drivers/firmware/efi/libstub/arm64-stub.c
1884     +++ b/drivers/firmware/efi/libstub/arm64-stub.c
1885     @@ -9,9 +9,17 @@
1886     * published by the Free Software Foundation.
1887     *
1888     */
1889     +
1890     +/*
1891     + * To prevent the compiler from emitting GOT-indirected (and thus absolute)
1892     + * references to the section markers, override their visibility as 'hidden'
1893     + */
1894     +#pragma GCC visibility push(hidden)
1895     +#include <asm/sections.h>
1896     +#pragma GCC visibility pop
1897     +
1898     #include <linux/efi.h>
1899     #include <asm/efi.h>
1900     -#include <asm/sections.h>
1901     #include <asm/sysreg.h>
1902    
1903     #include "efistub.h"
1904     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1905     index 6250989c83d8..c069a04a6e7e 100644
1906     --- a/drivers/net/ethernet/broadcom/tg3.c
1907     +++ b/drivers/net/ethernet/broadcom/tg3.c
1908     @@ -12389,6 +12389,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
1909     {
1910     struct tg3 *tp = netdev_priv(dev);
1911     int i, irq_sync = 0, err = 0;
1912     + bool reset_phy = false;
1913    
1914     if ((ering->rx_pending > tp->rx_std_ring_mask) ||
1915     (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
1916     @@ -12420,7 +12421,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
1917    
1918     if (netif_running(dev)) {
1919     tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1920     - err = tg3_restart_hw(tp, false);
1921     + /* Reset PHY to avoid PHY lock up */
1922     + if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
1923     + tg3_asic_rev(tp) == ASIC_REV_5719 ||
1924     + tg3_asic_rev(tp) == ASIC_REV_5720)
1925     + reset_phy = true;
1926     +
1927     + err = tg3_restart_hw(tp, reset_phy);
1928     if (!err)
1929     tg3_netif_start(tp);
1930     }
1931     @@ -12454,6 +12461,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
1932     {
1933     struct tg3 *tp = netdev_priv(dev);
1934     int err = 0;
1935     + bool reset_phy = false;
1936    
1937     if (tp->link_config.autoneg == AUTONEG_ENABLE)
1938     tg3_warn_mgmt_link_flap(tp);
1939     @@ -12544,7 +12552,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
1940    
1941     if (netif_running(dev)) {
1942     tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1943     - err = tg3_restart_hw(tp, false);
1944     + /* Reset PHY to avoid PHY lock up */
1945     + if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
1946     + tg3_asic_rev(tp) == ASIC_REV_5719 ||
1947     + tg3_asic_rev(tp) == ASIC_REV_5720)
1948     + reset_phy = true;
1949     +
1950     + err = tg3_restart_hw(tp, reset_phy);
1951     if (!err)
1952     tg3_netif_start(tp);
1953     }
1954     diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1955     index a167116ceeee..e29f4c0767eb 100644
1956     --- a/drivers/net/usb/smsc95xx.c
1957     +++ b/drivers/net/usb/smsc95xx.c
1958     @@ -1590,6 +1590,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1959     return ret;
1960     }
1961    
1962     + cancel_delayed_work_sync(&pdata->carrier_check);
1963     +
1964     if (pdata->suspend_flags) {
1965     netdev_warn(dev->net, "error during last resume\n");
1966     pdata->suspend_flags = 0;
1967     @@ -1832,6 +1834,11 @@ done:
1968     */
1969     if (ret && PMSG_IS_AUTO(message))
1970     usbnet_resume(intf);
1971     +
1972     + if (ret)
1973     + schedule_delayed_work(&pdata->carrier_check,
1974     + CARRIER_CHECK_DELAY);
1975     +
1976     return ret;
1977     }
1978    
1979     diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
1980     index 8feab810aed9..7f188b8d0c67 100644
1981     --- a/drivers/xen/Makefile
1982     +++ b/drivers/xen/Makefile
1983     @@ -7,9 +7,6 @@ obj-y += xenbus/
1984     nostackp := $(call cc-option, -fno-stack-protector)
1985     CFLAGS_features.o := $(nostackp)
1986    
1987     -CFLAGS_efi.o += -fshort-wchar
1988     -LDFLAGS += $(call ld-option, --no-wchar-size-warning)
1989     -
1990     dom0-$(CONFIG_ARM64) += arm-device.o
1991     dom0-$(CONFIG_PCI) += pci.o
1992     dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
1993     diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
1994     index 22a72198c14b..4e80f3a9ad58 100644
1995     --- a/include/linux/kbuild.h
1996     +++ b/include/linux/kbuild.h
1997     @@ -2,14 +2,14 @@
1998     #define __LINUX_KBUILD_H
1999    
2000     #define DEFINE(sym, val) \
2001     - asm volatile("\n->" #sym " %0 " #val : : "i" (val))
2002     + asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val))
2003    
2004     -#define BLANK() asm volatile("\n->" : : )
2005     +#define BLANK() asm volatile("\n.ascii \"->\"" : : )
2006    
2007     #define OFFSET(sym, str, mem) \
2008     DEFINE(sym, offsetof(struct str, mem))
2009    
2010     #define COMMENT(x) \
2011     - asm volatile("\n->#" x)
2012     + asm volatile("\n.ascii \"->#" x "\"")
2013    
2014     #endif
2015     diff --git a/include/linux/module.h b/include/linux/module.h
2016     index d2224a09b4b5..fd9e121c7b3f 100644
2017     --- a/include/linux/module.h
2018     +++ b/include/linux/module.h
2019     @@ -127,13 +127,13 @@ extern void cleanup_module(void);
2020    
2021     /* Each module must use one module_init(). */
2022     #define module_init(initfn) \
2023     - static inline initcall_t __inittest(void) \
2024     + static inline initcall_t __maybe_unused __inittest(void) \
2025     { return initfn; } \
2026     int init_module(void) __attribute__((alias(#initfn)));
2027    
2028     /* This is only required if you want to be unloadable. */
2029     #define module_exit(exitfn) \
2030     - static inline exitcall_t __exittest(void) \
2031     + static inline exitcall_t __maybe_unused __exittest(void) \
2032     { return exitfn; } \
2033     void cleanup_module(void) __attribute__((alias(#exitfn)));
2034    
2035     diff --git a/net/core/dev.c b/net/core/dev.c
2036     index 15e3bb94156b..071c589f7994 100644
2037     --- a/net/core/dev.c
2038     +++ b/net/core/dev.c
2039     @@ -4756,6 +4756,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2040     skb->vlan_tci = 0;
2041     skb->dev = napi->dev;
2042     skb->skb_iif = 0;
2043     +
2044     + /* eth_type_trans() assumes pkt_type is PACKET_HOST */
2045     + skb->pkt_type = PACKET_HOST;
2046     +
2047     skb->encapsulation = 0;
2048     skb_shinfo(skb)->gso_type = 0;
2049     skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
2050     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
2051     index 862d63ec56e4..ab7c50026cae 100644
2052     --- a/net/core/flow_dissector.c
2053     +++ b/net/core/flow_dissector.c
2054     @@ -538,8 +538,8 @@ ip_proto_again:
2055     break;
2056     }
2057    
2058     - if (dissector_uses_key(flow_dissector,
2059     - FLOW_DISSECTOR_KEY_PORTS)) {
2060     + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
2061     + !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
2062     key_ports = skb_flow_dissector_target(flow_dissector,
2063     FLOW_DISSECTOR_KEY_PORTS,
2064     target_container);
2065     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
2066     index 8323d33c0ce2..5a8c26c9872d 100644
2067     --- a/net/ipv4/inet_fragment.c
2068     +++ b/net/ipv4/inet_fragment.c
2069     @@ -180,21 +180,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
2070     }
2071    
2072     static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
2073     - void *arg)
2074     + void *arg,
2075     + struct inet_frag_queue **prev)
2076     {
2077     struct inet_frags *f = nf->f;
2078     struct inet_frag_queue *q;
2079     - int err;
2080    
2081     q = inet_frag_alloc(nf, f, arg);
2082     - if (!q)
2083     + if (!q) {
2084     + *prev = ERR_PTR(-ENOMEM);
2085     return NULL;
2086     -
2087     + }
2088     mod_timer(&q->timer, jiffies + nf->timeout);
2089    
2090     - err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
2091     - f->rhash_params);
2092     - if (err < 0) {
2093     + *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
2094     + &q->node, f->rhash_params);
2095     + if (*prev) {
2096     q->flags |= INET_FRAG_COMPLETE;
2097     inet_frag_kill(q);
2098     inet_frag_destroy(q);
2099     @@ -207,17 +208,18 @@ EXPORT_SYMBOL(inet_frag_create);
2100     /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
2101     struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
2102     {
2103     - struct inet_frag_queue *fq;
2104     + struct inet_frag_queue *fq = NULL, *prev;
2105    
2106     rcu_read_lock();
2107     - fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
2108     - if (fq) {
2109     + prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
2110     + if (!prev)
2111     + fq = inet_frag_create(nf, key, &prev);
2112     + if (prev && !IS_ERR(prev)) {
2113     + fq = prev;
2114     if (!atomic_inc_not_zero(&fq->refcnt))
2115     fq = NULL;
2116     - rcu_read_unlock();
2117     - return fq;
2118     }
2119     rcu_read_unlock();
2120     - return inet_frag_create(nf, key);
2121     + return fq;
2122     }
2123     EXPORT_SYMBOL(inet_frag_find);
2124     diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
2125     index 0fd1976ab63b..2220a1b396af 100644
2126     --- a/net/ipv4/ip_tunnel_core.c
2127     +++ b/net/ipv4/ip_tunnel_core.c
2128     @@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
2129    
2130     iph->version = 4;
2131     iph->ihl = sizeof(struct iphdr) >> 2;
2132     - iph->frag_off = df;
2133     + iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
2134     iph->protocol = proto;
2135     iph->tos = tos;
2136     iph->daddr = dst;
2137     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2138     index 4cc12eeca7ab..0db120d2a4fe 100644
2139     --- a/net/ipv6/route.c
2140     +++ b/net/ipv6/route.c
2141     @@ -1439,10 +1439,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2142    
2143     void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2144     {
2145     + int oif = sk->sk_bound_dev_if;
2146     struct dst_entry *dst;
2147    
2148     - ip6_update_pmtu(skb, sock_net(sk), mtu,
2149     - sk->sk_bound_dev_if, sk->sk_mark);
2150     + if (!oif && skb->dev)
2151     + oif = l3mdev_master_ifindex(skb->dev);
2152     +
2153     + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark);
2154    
2155     dst = __sk_dst_get(sk);
2156     if (!dst || !dst->obsolete ||
2157     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2158     index 9827ba4b9f74..93e60068800b 100644
2159     --- a/net/sctp/socket.c
2160     +++ b/net/sctp/socket.c
2161     @@ -3732,32 +3732,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
2162     unsigned int optlen)
2163     {
2164     struct sctp_assoc_value params;
2165     - struct sctp_association *asoc;
2166     - int retval = -EINVAL;
2167    
2168     if (optlen != sizeof(params))
2169     - goto out;
2170     -
2171     - if (copy_from_user(&params, optval, optlen)) {
2172     - retval = -EFAULT;
2173     - goto out;
2174     - }
2175     -
2176     - asoc = sctp_id2assoc(sk, params.assoc_id);
2177     - if (asoc) {
2178     - asoc->prsctp_enable = !!params.assoc_value;
2179     - } else if (!params.assoc_id) {
2180     - struct sctp_sock *sp = sctp_sk(sk);
2181     + return -EINVAL;
2182    
2183     - sp->ep->prsctp_enable = !!params.assoc_value;
2184     - } else {
2185     - goto out;
2186     - }
2187     + if (copy_from_user(&params, optval, optlen))
2188     + return -EFAULT;
2189    
2190     - retval = 0;
2191     + sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
2192    
2193     -out:
2194     - return retval;
2195     + return 0;
2196     }
2197    
2198     static int sctp_setsockopt_default_prinfo(struct sock *sk,
2199     diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
2200     index 8f8965608ee3..123840d827e8 100644
2201     --- a/scripts/Kbuild.include
2202     +++ b/scripts/Kbuild.include
2203     @@ -109,6 +109,11 @@ as-option = $(call try-run,\
2204     as-instr = $(call try-run,\
2205     printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
2206    
2207     +# __cc-option
2208     +# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
2209     +__cc-option = $(call try-run,\
2210     + $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
2211     +
2212     # Do not attempt to build with gcc plugins during cc-option tests.
2213     # (And this uses delayed resolution so the flags will be up to date.)
2214     CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
2215     @@ -116,13 +121,18 @@ CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
2216     # cc-option
2217     # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
2218    
2219     -cc-option = $(call try-run,\
2220     - $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
2221     +cc-option = $(call __cc-option, $(CC),\
2222     + $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS),$(1),$(2))
2223     +
2224     +# hostcc-option
2225     +# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
2226     +hostcc-option = $(call __cc-option, $(HOSTCC),\
2227     + $(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
2228    
2229     # cc-option-yn
2230     # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
2231     cc-option-yn = $(call try-run,\
2232     - $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
2233     + $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
2234    
2235     # cc-option-align
2236     # Prefix align with either -falign or -malign
2237     @@ -132,7 +142,7 @@ cc-option-align = $(subst -functions=0,,\
2238     # cc-disable-warning
2239     # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
2240     cc-disable-warning = $(call try-run,\
2241     - $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
2242     + $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
2243    
2244     # cc-name
2245     # Expands to either gcc or clang
2246     diff --git a/scripts/Makefile.build b/scripts/Makefile.build
2247     index abfd4f4b66dd..6228a83156ea 100644
2248     --- a/scripts/Makefile.build
2249     +++ b/scripts/Makefile.build
2250     @@ -176,6 +176,14 @@ cmd_cc_symtypes_c = \
2251     $(obj)/%.symtypes : $(src)/%.c FORCE
2252     $(call cmd,cc_symtypes_c)
2253    
2254     +# LLVM assembly
2255     +# Generate .ll files from .c
2256     +quiet_cmd_cc_ll_c = CC $(quiet_modtag) $@
2257     + cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -o $@ $<
2258     +
2259     +$(obj)/%.ll: $(src)/%.c FORCE
2260     + $(call if_changed_dep,cc_ll_c)
2261     +
2262     # C (.c) files
2263     # The C file is compiled and updated dependency information is generated.
2264     # (See cmd_cc_o_c + relevant part of rule_cc_o_c)
2265     diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
2266     index 7c321a603b07..fb3522fd8702 100644
2267     --- a/scripts/Makefile.extrawarn
2268     +++ b/scripts/Makefile.extrawarn
2269     @@ -64,7 +64,6 @@ ifeq ($(cc-name),clang)
2270     KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
2271     KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
2272     KBUILD_CFLAGS += $(call cc-disable-warning, format)
2273     -KBUILD_CFLAGS += $(call cc-disable-warning, unknown-warning-option)
2274     KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
2275     KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
2276     KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
2277     diff --git a/scripts/Makefile.host b/scripts/Makefile.host
2278     index 45b5b1aaedbd..9cfd5c84d76f 100644
2279     --- a/scripts/Makefile.host
2280     +++ b/scripts/Makefile.host
2281     @@ -20,12 +20,6 @@
2282     # Will compile qconf as a C++ program, and menu as a C program.
2283     # They are linked as C++ code to the executable qconf
2284    
2285     -# hostcc-option
2286     -# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
2287     -
2288     -hostcc-option = $(call try-run,\
2289     - $(HOSTCC) $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
2290     -
2291     __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
2292     host-cshlib := $(sort $(hostlibs-y) $(hostlibs-m))
2293     host-cxxshlib := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
2294     diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
2295     index c954040c3cf2..4e02d51dfc62 100644
2296     --- a/scripts/Makefile.lib
2297     +++ b/scripts/Makefile.lib
2298     @@ -408,3 +408,34 @@ quiet_cmd_xzmisc = XZMISC $@
2299     cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
2300     xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
2301     (rm -f $@ ; false)
2302     +
2303     +# ASM offsets
2304     +# ---------------------------------------------------------------------------
2305     +
2306     +# Default sed regexp - multiline due to syntax constraints
2307     +#
2308     +# Use [:space:] because LLVM's integrated assembler inserts <tab> around
2309     +# the .ascii directive whereas GCC keeps the <space> as-is.
2310     +define sed-offsets
2311     + 's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \
2312     + /^->/{s:->#\(.*\):/* \1 */:; \
2313     + s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
2314     + s:->::; p;}'
2315     +endef
2316     +
2317     +# Use filechk to avoid rebuilds when a header changes, but the resulting file
2318     +# does not
2319     +define filechk_offsets
2320     + (set -e; \
2321     + echo "#ifndef $2"; \
2322     + echo "#define $2"; \
2323     + echo "/*"; \
2324     + echo " * DO NOT MODIFY."; \
2325     + echo " *"; \
2326     + echo " * This file was generated by Kbuild"; \
2327     + echo " */"; \
2328     + echo ""; \
2329     + sed -ne $(sed-offsets); \
2330     + echo ""; \
2331     + echo "#endif" )
2332     +endef
2333     diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
2334     index 19d9bcadc0cc..b497d9764dcf 100644
2335     --- a/scripts/mod/Makefile
2336     +++ b/scripts/mod/Makefile
2337     @@ -7,32 +7,8 @@ modpost-objs := modpost.o file2alias.o sumversion.o
2338    
2339     devicetable-offsets-file := devicetable-offsets.h
2340    
2341     -define sed-y
2342     - "/^->/{s:->#\(.*\):/* \1 */:; \
2343     - s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
2344     - s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
2345     - s:->::; p;}"
2346     -endef
2347     -
2348     -quiet_cmd_offsets = GEN $@
2349     -define cmd_offsets
2350     - (set -e; \
2351     - echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \
2352     - echo "#define __DEVICETABLE_OFFSETS_H__"; \
2353     - echo "/*"; \
2354     - echo " * DO NOT MODIFY."; \
2355     - echo " *"; \
2356     - echo " * This file was generated by Kbuild"; \
2357     - echo " *"; \
2358     - echo " */"; \
2359     - echo ""; \
2360     - sed -ne $(sed-y) $<; \
2361     - echo ""; \
2362     - echo "#endif" ) > $@
2363     -endef
2364     -
2365     -$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s
2366     - $(call if_changed,offsets)
2367     +$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s FORCE
2368     + $(call filechk,offsets,__DEVICETABLE_OFFSETS_H__)
2369    
2370     targets += $(devicetable-offsets-file) devicetable-offsets.s
2371