Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0268-4.9.169-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3345 - (show annotations) (download)
Tue Jun 18 09:41:59 2019 UTC (4 years, 10 months ago) by niro
File size: 97398 byte(s)
-linux-4.9.169
1 diff --git a/Makefile b/Makefile
2 index f44094d2b147..23cc23c47adf 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 168
9 +SUBLEVEL = 169
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 @@ -507,7 +507,7 @@ endif
14 ifeq ($(cc-name),clang)
15 ifneq ($(CROSS_COMPILE),)
16 CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
17 -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
18 +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
19 CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
20 GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
21 endif
22 diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
23 index 8a394f336003..ee65702f9645 100644
24 --- a/arch/arm/boot/dts/sama5d2-pinfunc.h
25 +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
26 @@ -517,7 +517,7 @@
27 #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
28 #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
29 #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
30 -#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
31 +#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
32 #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
33 #define PIN_PC10 74
34 #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
35 diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
36 index 2a5090fb9113..d7116f5935fb 100644
37 --- a/arch/arm64/include/asm/futex.h
38 +++ b/arch/arm64/include/asm/futex.h
39 @@ -33,8 +33,8 @@
40 " prfm pstl1strm, %2\n" \
41 "1: ldxr %w1, %2\n" \
42 insn "\n" \
43 -"2: stlxr %w3, %w0, %2\n" \
44 -" cbnz %w3, 1b\n" \
45 +"2: stlxr %w0, %w3, %2\n" \
46 +" cbnz %w0, 1b\n" \
47 " dmb ish\n" \
48 "3:\n" \
49 " .pushsection .fixup,\"ax\"\n" \
50 @@ -53,29 +53,29 @@
51 static inline int
52 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
53 {
54 - int oldval = 0, ret, tmp;
55 + int oldval, ret, tmp;
56
57 pagefault_disable();
58
59 switch (op) {
60 case FUTEX_OP_SET:
61 - __futex_atomic_op("mov %w0, %w4",
62 + __futex_atomic_op("mov %w3, %w4",
63 ret, oldval, uaddr, tmp, oparg);
64 break;
65 case FUTEX_OP_ADD:
66 - __futex_atomic_op("add %w0, %w1, %w4",
67 + __futex_atomic_op("add %w3, %w1, %w4",
68 ret, oldval, uaddr, tmp, oparg);
69 break;
70 case FUTEX_OP_OR:
71 - __futex_atomic_op("orr %w0, %w1, %w4",
72 + __futex_atomic_op("orr %w3, %w1, %w4",
73 ret, oldval, uaddr, tmp, oparg);
74 break;
75 case FUTEX_OP_ANDN:
76 - __futex_atomic_op("and %w0, %w1, %w4",
77 + __futex_atomic_op("and %w3, %w1, %w4",
78 ret, oldval, uaddr, tmp, ~oparg);
79 break;
80 case FUTEX_OP_XOR:
81 - __futex_atomic_op("eor %w0, %w1, %w4",
82 + __futex_atomic_op("eor %w3, %w1, %w4",
83 ret, oldval, uaddr, tmp, oparg);
84 break;
85 default:
86 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
87 index fa6b2fad7a3d..5d3df68272f5 100644
88 --- a/arch/arm64/mm/init.c
89 +++ b/arch/arm64/mm/init.c
90 @@ -272,7 +272,7 @@ void __init arm64_memblock_init(void)
91 * memory spans, randomize the linear region as well.
92 */
93 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
94 - range = range / ARM64_MEMSTART_ALIGN + 1;
95 + range /= ARM64_MEMSTART_ALIGN;
96 memstart_addr -= ARM64_MEMSTART_ALIGN *
97 ((range * memstart_offset_seed) >> 16);
98 }
99 diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
100 index 2e674e13e005..656984ec1958 100644
101 --- a/arch/parisc/include/asm/processor.h
102 +++ b/arch/parisc/include/asm/processor.h
103 @@ -323,6 +323,8 @@ extern int _parisc_requires_coherency;
104 #define parisc_requires_coherency() (0)
105 #endif
106
107 +extern int running_on_qemu;
108 +
109 #endif /* __ASSEMBLY__ */
110
111 #endif /* __ASM_PARISC_PROCESSOR_H */
112 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
113 index c3a532abac03..2e5216c28bb1 100644
114 --- a/arch/parisc/kernel/process.c
115 +++ b/arch/parisc/kernel/process.c
116 @@ -206,12 +206,6 @@ void __cpuidle arch_cpu_idle(void)
117
118 static int __init parisc_idle_init(void)
119 {
120 - const char *marker;
121 -
122 - /* check QEMU/SeaBIOS marker in PAGE0 */
123 - marker = (char *) &PAGE0->pad0;
124 - running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
125 -
126 if (!running_on_qemu)
127 cpu_idle_poll_ctrl(1);
128
129 diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
130 index 2e66a887788e..581b0c66e521 100644
131 --- a/arch/parisc/kernel/setup.c
132 +++ b/arch/parisc/kernel/setup.c
133 @@ -403,6 +403,9 @@ void start_parisc(void)
134 int ret, cpunum;
135 struct pdc_coproc_cfg coproc_cfg;
136
137 + /* check QEMU/SeaBIOS marker in PAGE0 */
138 + running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
139 +
140 cpunum = smp_processor_id();
141
142 set_firmware_width_unlocked();
143 diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
144 index 47ef8fdcd382..22754e0c3bda 100644
145 --- a/arch/parisc/kernel/time.c
146 +++ b/arch/parisc/kernel/time.c
147 @@ -299,7 +299,7 @@ static int __init init_cr16_clocksource(void)
148 * The cr16 interval timers are not syncronized across CPUs, so mark
149 * them unstable and lower rating on SMP systems.
150 */
151 - if (num_online_cpus() > 1) {
152 + if (num_online_cpus() > 1 && !running_on_qemu) {
153 clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
154 clocksource_cr16.rating = 0;
155 }
156 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
157 index 0a6bb48854e3..fa8f2aa88189 100644
158 --- a/arch/powerpc/Kconfig
159 +++ b/arch/powerpc/Kconfig
160 @@ -128,7 +128,7 @@ config PPC
161 select ARCH_HAS_GCOV_PROFILE_ALL
162 select GENERIC_SMP_IDLE_THREAD
163 select GENERIC_CMOS_UPDATE
164 - select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
165 + select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
166 select GENERIC_TIME_VSYSCALL_OLD
167 select GENERIC_CLOCKEVENTS
168 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
169 @@ -164,6 +164,11 @@ config PPC
170 select HAVE_ARCH_HARDENED_USERCOPY
171 select HAVE_KERNEL_GZIP
172
173 +config PPC_BARRIER_NOSPEC
174 + bool
175 + default y
176 + depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
177 +
178 config GENERIC_CSUM
179 def_bool CPU_LITTLE_ENDIAN
180
181 diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
182 index e0baba1535e6..f3daa175f86c 100644
183 --- a/arch/powerpc/include/asm/asm-prototypes.h
184 +++ b/arch/powerpc/include/asm/asm-prototypes.h
185 @@ -121,4 +121,10 @@ extern s64 __ashrdi3(s64, int);
186 extern int __cmpdi2(s64, s64);
187 extern int __ucmpdi2(u64, u64);
188
189 +/* Patch sites */
190 +extern s32 patch__call_flush_count_cache;
191 +extern s32 patch__flush_count_cache_return;
192 +
193 +extern long flush_count_cache;
194 +
195 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
196 diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
197 index 798ab37c9930..80024c4f2093 100644
198 --- a/arch/powerpc/include/asm/barrier.h
199 +++ b/arch/powerpc/include/asm/barrier.h
200 @@ -77,6 +77,27 @@ do { \
201
202 #define smp_mb__before_spinlock() smp_mb()
203
204 +#ifdef CONFIG_PPC_BOOK3S_64
205 +#define NOSPEC_BARRIER_SLOT nop
206 +#elif defined(CONFIG_PPC_FSL_BOOK3E)
207 +#define NOSPEC_BARRIER_SLOT nop; nop
208 +#endif
209 +
210 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
211 +/*
212 + * Prevent execution of subsequent instructions until preceding branches have
213 + * been fully resolved and are no longer executing speculatively.
214 + */
215 +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
216 +
217 +// This also acts as a compiler barrier due to the memory clobber.
218 +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
219 +
220 +#else /* !CONFIG_PPC_BARRIER_NOSPEC */
221 +#define barrier_nospec_asm
222 +#define barrier_nospec()
223 +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
224 +
225 #include <asm-generic/barrier.h>
226
227 #endif /* _ASM_POWERPC_BARRIER_H */
228 diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
229 new file mode 100644
230 index 000000000000..ed7b1448493a
231 --- /dev/null
232 +++ b/arch/powerpc/include/asm/code-patching-asm.h
233 @@ -0,0 +1,18 @@
234 +/* SPDX-License-Identifier: GPL-2.0+ */
235 +/*
236 + * Copyright 2018, Michael Ellerman, IBM Corporation.
237 + */
238 +#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
239 +#define _ASM_POWERPC_CODE_PATCHING_ASM_H
240 +
241 +/* Define a "site" that can be patched */
242 +.macro patch_site label name
243 + .pushsection ".rodata"
244 + .balign 4
245 + .global \name
246 +\name:
247 + .4byte \label - .
248 + .popsection
249 +.endm
250 +
251 +#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
252 diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
253 index b4ab1f497335..ab934f8232bd 100644
254 --- a/arch/powerpc/include/asm/code-patching.h
255 +++ b/arch/powerpc/include/asm/code-patching.h
256 @@ -28,6 +28,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
257 unsigned long target, int flags);
258 int patch_branch(unsigned int *addr, unsigned long target, int flags);
259 int patch_instruction(unsigned int *addr, unsigned int instr);
260 +int patch_instruction_site(s32 *addr, unsigned int instr);
261 +int patch_branch_site(s32 *site, unsigned long target, int flags);
262
263 int instr_is_relative_branch(unsigned int instr);
264 int instr_is_relative_link_branch(unsigned int instr);
265 diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
266 index 0bf8202feca6..175128e19025 100644
267 --- a/arch/powerpc/include/asm/feature-fixups.h
268 +++ b/arch/powerpc/include/asm/feature-fixups.h
269 @@ -213,6 +213,25 @@ void setup_feature_keys(void);
270 FTR_ENTRY_OFFSET 951b-952b; \
271 .popsection;
272
273 +#define NOSPEC_BARRIER_FIXUP_SECTION \
274 +953: \
275 + .pushsection __barrier_nospec_fixup,"a"; \
276 + .align 2; \
277 +954: \
278 + FTR_ENTRY_OFFSET 953b-954b; \
279 + .popsection;
280 +
281 +#define START_BTB_FLUSH_SECTION \
282 +955: \
283 +
284 +#define END_BTB_FLUSH_SECTION \
285 +956: \
286 + .pushsection __btb_flush_fixup,"a"; \
287 + .align 2; \
288 +957: \
289 + FTR_ENTRY_OFFSET 955b-957b; \
290 + FTR_ENTRY_OFFSET 956b-957b; \
291 + .popsection;
292
293 #ifndef __ASSEMBLY__
294
295 @@ -220,6 +239,8 @@ extern long stf_barrier_fallback;
296 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
297 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
298 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
299 +extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
300 +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
301
302 #endif
303
304 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
305 index 9d978102bf0d..9587d301db55 100644
306 --- a/arch/powerpc/include/asm/hvcall.h
307 +++ b/arch/powerpc/include/asm/hvcall.h
308 @@ -316,10 +316,12 @@
309 #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
310 #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
311 #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
312 +#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
313
314 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
315 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
316 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
317 +#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
318
319 #ifndef __ASSEMBLY__
320 #include <linux/types.h>
321 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
322 index c73750b0d9fa..bbd35ba36a22 100644
323 --- a/arch/powerpc/include/asm/ppc_asm.h
324 +++ b/arch/powerpc/include/asm/ppc_asm.h
325 @@ -437,7 +437,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
326 .machine push ; \
327 .machine "power4" ; \
328 lis scratch,0x60000000@h; \
329 - dcbt r0,scratch,0b01010; \
330 + dcbt 0,scratch,0b01010; \
331 .machine pop
332
333 /*
334 @@ -780,4 +780,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
335 .long 0x2400004c /* rfid */
336 #endif /* !CONFIG_PPC_BOOK3E */
337 #endif /* __ASSEMBLY__ */
338 +
339 +/*
340 + * Helper macro for exception table entries
341 + */
342 +#define EX_TABLE(_fault, _target) \
343 + stringify_in_c(.section __ex_table,"a";)\
344 + stringify_in_c(.balign 4;) \
345 + stringify_in_c(.long (_fault) - . ;) \
346 + stringify_in_c(.long (_target) - . ;) \
347 + stringify_in_c(.previous)
348 +
349 +#ifdef CONFIG_PPC_FSL_BOOK3E
350 +#define BTB_FLUSH(reg) \
351 + lis reg,BUCSR_INIT@h; \
352 + ori reg,reg,BUCSR_INIT@l; \
353 + mtspr SPRN_BUCSR,reg; \
354 + isync;
355 +#else
356 +#define BTB_FLUSH(reg)
357 +#endif /* CONFIG_PPC_FSL_BOOK3E */
358 +
359 #endif /* _ASM_POWERPC_PPC_ASM_H */
360 diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
361 index 44989b22383c..759597bf0fd8 100644
362 --- a/arch/powerpc/include/asm/security_features.h
363 +++ b/arch/powerpc/include/asm/security_features.h
364 @@ -22,6 +22,7 @@ enum stf_barrier_type {
365
366 void setup_stf_barrier(void);
367 void do_stf_barrier_fixups(enum stf_barrier_type types);
368 +void setup_count_cache_flush(void);
369
370 static inline void security_ftr_set(unsigned long feature)
371 {
372 @@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
373 // Indirect branch prediction cache disabled
374 #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
375
376 +// bcctr 2,0,0 triggers a hardware assisted count cache flush
377 +#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
378 +
379
380 // Features indicating need for Spectre/Meltdown mitigations
381
382 @@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
383 // Firmware configuration indicates user favours security over performance
384 #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
385
386 +// Software required to flush count cache on context switch
387 +#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
388 +
389
390 // Features enabled by default
391 #define SEC_FTR_DEFAULT \
392 diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
393 index 3f160cd20107..862ebce3ae54 100644
394 --- a/arch/powerpc/include/asm/setup.h
395 +++ b/arch/powerpc/include/asm/setup.h
396 @@ -8,6 +8,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
397
398 extern unsigned int rtas_data;
399 extern unsigned long long memory_limit;
400 +extern bool init_mem_is_free;
401 extern unsigned long klimit;
402 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
403
404 @@ -50,6 +51,26 @@ enum l1d_flush_type {
405
406 void setup_rfi_flush(enum l1d_flush_type, bool enable);
407 void do_rfi_flush_fixups(enum l1d_flush_type types);
408 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
409 +void setup_barrier_nospec(void);
410 +#else
411 +static inline void setup_barrier_nospec(void) { };
412 +#endif
413 +void do_barrier_nospec_fixups(bool enable);
414 +extern bool barrier_nospec_enabled;
415 +
416 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
417 +void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
418 +#else
419 +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
420 +#endif
421 +
422 +#ifdef CONFIG_PPC_FSL_BOOK3E
423 +void setup_spectre_v2(void);
424 +#else
425 +static inline void setup_spectre_v2(void) {};
426 +#endif
427 +void do_btb_flush_fixups(void);
428
429 #endif /* !__ASSEMBLY__ */
430
431 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
432 index 31913b3ac7ab..da852153c1f8 100644
433 --- a/arch/powerpc/include/asm/uaccess.h
434 +++ b/arch/powerpc/include/asm/uaccess.h
435 @@ -269,6 +269,7 @@ do { \
436 __chk_user_ptr(ptr); \
437 if (!is_kernel_addr((unsigned long)__gu_addr)) \
438 might_fault(); \
439 + barrier_nospec(); \
440 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
441 (x) = (__typeof__(*(ptr)))__gu_val; \
442 __gu_err; \
443 @@ -280,8 +281,10 @@ do { \
444 unsigned long __gu_val = 0; \
445 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
446 might_fault(); \
447 - if (access_ok(VERIFY_READ, __gu_addr, (size))) \
448 + if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
449 + barrier_nospec(); \
450 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
451 + } \
452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
453 __gu_err; \
454 })
455 @@ -292,6 +295,7 @@ do { \
456 unsigned long __gu_val; \
457 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
458 __chk_user_ptr(ptr); \
459 + barrier_nospec(); \
460 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
461 (x) = (__force __typeof__(*(ptr)))__gu_val; \
462 __gu_err; \
463 @@ -348,15 +352,19 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
464
465 switch (n) {
466 case 1:
467 + barrier_nospec();
468 __get_user_size(*(u8 *)to, from, 1, ret);
469 break;
470 case 2:
471 + barrier_nospec();
472 __get_user_size(*(u16 *)to, from, 2, ret);
473 break;
474 case 4:
475 + barrier_nospec();
476 __get_user_size(*(u32 *)to, from, 4, ret);
477 break;
478 case 8:
479 + barrier_nospec();
480 __get_user_size(*(u64 *)to, from, 8, ret);
481 break;
482 }
483 @@ -366,6 +374,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
484
485 check_object_size(to, n, false);
486
487 + barrier_nospec();
488 return __copy_tofrom_user((__force void __user *)to, from, n);
489 }
490
491 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
492 index 13885786282b..d80fbf0884ff 100644
493 --- a/arch/powerpc/kernel/Makefile
494 +++ b/arch/powerpc/kernel/Makefile
495 @@ -44,9 +44,10 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
496 obj-$(CONFIG_VDSO32) += vdso32/
497 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
498 obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
499 -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
500 +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
501 obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
502 obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
503 +obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
504 obj-$(CONFIG_PPC64) += vdso64/
505 obj-$(CONFIG_ALTIVEC) += vecemu.o
506 obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
507 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
508 index 370645687cc7..bdd88f9d7926 100644
509 --- a/arch/powerpc/kernel/entry_32.S
510 +++ b/arch/powerpc/kernel/entry_32.S
511 @@ -34,6 +34,7 @@
512 #include <asm/ftrace.h>
513 #include <asm/ptrace.h>
514 #include <asm/export.h>
515 +#include <asm/barrier.h>
516
517 /*
518 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
519 @@ -347,6 +348,15 @@ syscall_dotrace_cont:
520 ori r10,r10,sys_call_table@l
521 slwi r0,r0,2
522 bge- 66f
523 +
524 + barrier_nospec_asm
525 + /*
526 + * Prevent the load of the handler below (based on the user-passed
527 + * system call number) being speculatively executed until the test
528 + * against NR_syscalls and branch to .66f above has
529 + * committed.
530 + */
531 +
532 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
533 mtlr r10
534 addi r9,r1,STACK_FRAME_OVERHEAD
535 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
536 index e24ae0fa80ed..390ebf4ef384 100644
537 --- a/arch/powerpc/kernel/entry_64.S
538 +++ b/arch/powerpc/kernel/entry_64.S
539 @@ -26,6 +26,7 @@
540 #include <asm/page.h>
541 #include <asm/mmu.h>
542 #include <asm/thread_info.h>
543 +#include <asm/code-patching-asm.h>
544 #include <asm/ppc_asm.h>
545 #include <asm/asm-offsets.h>
546 #include <asm/cputable.h>
547 @@ -38,6 +39,7 @@
548 #include <asm/context_tracking.h>
549 #include <asm/tm.h>
550 #include <asm/ppc-opcode.h>
551 +#include <asm/barrier.h>
552 #include <asm/export.h>
553 #ifdef CONFIG_PPC_BOOK3S
554 #include <asm/exception-64s.h>
555 @@ -78,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
556 std r0,GPR0(r1)
557 std r10,GPR1(r1)
558 beq 2f /* if from kernel mode */
559 +#ifdef CONFIG_PPC_FSL_BOOK3E
560 +START_BTB_FLUSH_SECTION
561 + BTB_FLUSH(r10)
562 +END_BTB_FLUSH_SECTION
563 +#endif
564 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
565 2: std r2,GPR2(r1)
566 std r3,GPR3(r1)
567 @@ -180,6 +187,15 @@ system_call: /* label this so stack traces look sane */
568 clrldi r8,r8,32
569 15:
570 slwi r0,r0,4
571 +
572 + barrier_nospec_asm
573 + /*
574 + * Prevent the load of the handler below (based on the user-passed
575 + * system call number) being speculatively executed until the test
576 + * against NR_syscalls and branch to .Lsyscall_enosys above has
577 + * committed.
578 + */
579 +
580 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
581 mtctr r12
582 bctrl /* Call handler */
583 @@ -473,6 +489,57 @@ _GLOBAL(ret_from_kernel_thread)
584 li r3,0
585 b .Lsyscall_exit
586
587 +#ifdef CONFIG_PPC_BOOK3S_64
588 +
589 +#define FLUSH_COUNT_CACHE \
590 +1: nop; \
591 + patch_site 1b, patch__call_flush_count_cache
592 +
593 +
594 +#define BCCTR_FLUSH .long 0x4c400420
595 +
596 +.macro nops number
597 + .rept \number
598 + nop
599 + .endr
600 +.endm
601 +
602 +.balign 32
603 +.global flush_count_cache
604 +flush_count_cache:
605 + /* Save LR into r9 */
606 + mflr r9
607 +
608 + .rept 64
609 + bl .+4
610 + .endr
611 + b 1f
612 + nops 6
613 +
614 + .balign 32
615 + /* Restore LR */
616 +1: mtlr r9
617 + li r9,0x7fff
618 + mtctr r9
619 +
620 + BCCTR_FLUSH
621 +
622 +2: nop
623 + patch_site 2b patch__flush_count_cache_return
624 +
625 + nops 3
626 +
627 + .rept 278
628 + .balign 32
629 + BCCTR_FLUSH
630 + nops 7
631 + .endr
632 +
633 + blr
634 +#else
635 +#define FLUSH_COUNT_CACHE
636 +#endif /* CONFIG_PPC_BOOK3S_64 */
637 +
638 /*
639 * This routine switches between two different tasks. The process
640 * state of one is saved on its kernel stack. Then the state
641 @@ -504,6 +571,8 @@ _GLOBAL(_switch)
642 std r23,_CCR(r1)
643 std r1,KSP(r3) /* Set old stack pointer */
644
645 + FLUSH_COUNT_CACHE
646 +
647 #ifdef CONFIG_SMP
648 /* We need a sync somewhere here to make sure that if the
649 * previous task gets rescheduled on another CPU, it sees all
650 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
651 index ca03eb229a9a..423b5257d3a1 100644
652 --- a/arch/powerpc/kernel/exceptions-64e.S
653 +++ b/arch/powerpc/kernel/exceptions-64e.S
654 @@ -295,7 +295,8 @@ ret_from_mc_except:
655 andi. r10,r11,MSR_PR; /* save stack pointer */ \
656 beq 1f; /* branch around if supervisor */ \
657 ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
658 -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
659 +1: type##_BTB_FLUSH \
660 + cmpdi cr1,r1,0; /* check if SP makes sense */ \
661 bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
662 mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
663
664 @@ -327,6 +328,30 @@ ret_from_mc_except:
665 #define SPRN_MC_SRR0 SPRN_MCSRR0
666 #define SPRN_MC_SRR1 SPRN_MCSRR1
667
668 +#ifdef CONFIG_PPC_FSL_BOOK3E
669 +#define GEN_BTB_FLUSH \
670 + START_BTB_FLUSH_SECTION \
671 + beq 1f; \
672 + BTB_FLUSH(r10) \
673 + 1: \
674 + END_BTB_FLUSH_SECTION
675 +
676 +#define CRIT_BTB_FLUSH \
677 + START_BTB_FLUSH_SECTION \
678 + BTB_FLUSH(r10) \
679 + END_BTB_FLUSH_SECTION
680 +
681 +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
682 +#define MC_BTB_FLUSH CRIT_BTB_FLUSH
683 +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
684 +#else
685 +#define GEN_BTB_FLUSH
686 +#define CRIT_BTB_FLUSH
687 +#define DBG_BTB_FLUSH
688 +#define MC_BTB_FLUSH
689 +#define GDBELL_BTB_FLUSH
690 +#endif
691 +
692 #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
693 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
694
695 diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
696 index a620203f7de3..7b98c7351f6c 100644
697 --- a/arch/powerpc/kernel/head_booke.h
698 +++ b/arch/powerpc/kernel/head_booke.h
699 @@ -31,6 +31,16 @@
700 */
701 #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
702
703 +#ifdef CONFIG_PPC_FSL_BOOK3E
704 +#define BOOKE_CLEAR_BTB(reg) \
705 +START_BTB_FLUSH_SECTION \
706 + BTB_FLUSH(reg) \
707 +END_BTB_FLUSH_SECTION
708 +#else
709 +#define BOOKE_CLEAR_BTB(reg)
710 +#endif
711 +
712 +
713 #define NORMAL_EXCEPTION_PROLOG(intno) \
714 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
715 mfspr r10, SPRN_SPRG_THREAD; \
716 @@ -42,6 +52,7 @@
717 andi. r11, r11, MSR_PR; /* check whether user or kernel */\
718 mr r11, r1; \
719 beq 1f; \
720 + BOOKE_CLEAR_BTB(r11) \
721 /* if from user, start at top of this thread's kernel stack */ \
722 lwz r11, THREAD_INFO-THREAD(r10); \
723 ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
724 @@ -127,6 +138,7 @@
725 stw r9,_CCR(r8); /* save CR on stack */\
726 mfspr r11,exc_level_srr1; /* check whether user or kernel */\
727 DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
728 + BOOKE_CLEAR_BTB(r10) \
729 andi. r11,r11,MSR_PR; \
730 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
731 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
732 diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
733 index bf4c6021515f..60a0aeefc4a7 100644
734 --- a/arch/powerpc/kernel/head_fsl_booke.S
735 +++ b/arch/powerpc/kernel/head_fsl_booke.S
736 @@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
737 mfcr r13
738 stw r13, THREAD_NORMSAVE(3)(r10)
739 DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
740 +START_BTB_FLUSH_SECTION
741 + mfspr r11, SPRN_SRR1
742 + andi. r10,r11,MSR_PR
743 + beq 1f
744 + BTB_FLUSH(r10)
745 +1:
746 +END_BTB_FLUSH_SECTION
747 mfspr r10, SPRN_DEAR /* Get faulting address */
748
749 /* If we are faulting a kernel address, we have to use the
750 @@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
751 mfcr r13
752 stw r13, THREAD_NORMSAVE(3)(r10)
753 DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
754 +START_BTB_FLUSH_SECTION
755 + mfspr r11, SPRN_SRR1
756 + andi. r10,r11,MSR_PR
757 + beq 1f
758 + BTB_FLUSH(r10)
759 +1:
760 +END_BTB_FLUSH_SECTION
761 +
762 mfspr r10, SPRN_SRR0 /* Get faulting address */
763
764 /* If we are faulting a kernel address, we have to use the
765 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
766 index 30b89d5cbb03..3b1c3bb91025 100644
767 --- a/arch/powerpc/kernel/module.c
768 +++ b/arch/powerpc/kernel/module.c
769 @@ -72,7 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
770 do_feature_fixups(powerpc_firmware_features,
771 (void *)sect->sh_addr,
772 (void *)sect->sh_addr + sect->sh_size);
773 -#endif
774 +#endif /* CONFIG_PPC64 */
775 +
776 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
777 + sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
778 + if (sect != NULL)
779 + do_barrier_nospec_fixups_range(barrier_nospec_enabled,
780 + (void *)sect->sh_addr,
781 + (void *)sect->sh_addr + sect->sh_size);
782 +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
783
784 sect = find_section(hdr, sechdrs, "__lwsync_fixup");
785 if (sect != NULL)
786 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
787 index 2277df84ef6e..30542e833ebe 100644
788 --- a/arch/powerpc/kernel/security.c
789 +++ b/arch/powerpc/kernel/security.c
790 @@ -9,11 +9,121 @@
791 #include <linux/device.h>
792 #include <linux/seq_buf.h>
793
794 +#include <asm/asm-prototypes.h>
795 +#include <asm/code-patching.h>
796 +#include <asm/debug.h>
797 #include <asm/security_features.h>
798 +#include <asm/setup.h>
799
800
801 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
802
803 +enum count_cache_flush_type {
804 + COUNT_CACHE_FLUSH_NONE = 0x1,
805 + COUNT_CACHE_FLUSH_SW = 0x2,
806 + COUNT_CACHE_FLUSH_HW = 0x4,
807 +};
808 +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
809 +
810 +bool barrier_nospec_enabled;
811 +static bool no_nospec;
812 +static bool btb_flush_enabled;
813 +#ifdef CONFIG_PPC_FSL_BOOK3E
814 +static bool no_spectrev2;
815 +#endif
816 +
817 +static void enable_barrier_nospec(bool enable)
818 +{
819 + barrier_nospec_enabled = enable;
820 + do_barrier_nospec_fixups(enable);
821 +}
822 +
823 +void setup_barrier_nospec(void)
824 +{
825 + bool enable;
826 +
827 + /*
828 + * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
829 + * But there's a good reason not to. The two flags we check below are
830 + * both are enabled by default in the kernel, so if the hcall is not
831 + * functional they will be enabled.
832 + * On a system where the host firmware has been updated (so the ori
833 + * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
834 + * not been updated, we would like to enable the barrier. Dropping the
835 + * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
836 + * we potentially enable the barrier on systems where the host firmware
837 + * is not updated, but that's harmless as it's a no-op.
838 + */
839 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
840 + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
841 +
842 + if (!no_nospec)
843 + enable_barrier_nospec(enable);
844 +}
845 +
846 +static int __init handle_nospectre_v1(char *p)
847 +{
848 + no_nospec = true;
849 +
850 + return 0;
851 +}
852 +early_param("nospectre_v1", handle_nospectre_v1);
853 +
854 +#ifdef CONFIG_DEBUG_FS
855 +static int barrier_nospec_set(void *data, u64 val)
856 +{
857 + switch (val) {
858 + case 0:
859 + case 1:
860 + break;
861 + default:
862 + return -EINVAL;
863 + }
864 +
865 + if (!!val == !!barrier_nospec_enabled)
866 + return 0;
867 +
868 + enable_barrier_nospec(!!val);
869 +
870 + return 0;
871 +}
872 +
873 +static int barrier_nospec_get(void *data, u64 *val)
874 +{
875 + *val = barrier_nospec_enabled ? 1 : 0;
876 + return 0;
877 +}
878 +
879 +DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
880 + barrier_nospec_get, barrier_nospec_set, "%llu\n");
881 +
882 +static __init int barrier_nospec_debugfs_init(void)
883 +{
884 + debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
885 + &fops_barrier_nospec);
886 + return 0;
887 +}
888 +device_initcall(barrier_nospec_debugfs_init);
889 +#endif /* CONFIG_DEBUG_FS */
890 +
891 +#ifdef CONFIG_PPC_FSL_BOOK3E
892 +static int __init handle_nospectre_v2(char *p)
893 +{
894 + no_spectrev2 = true;
895 +
896 + return 0;
897 +}
898 +early_param("nospectre_v2", handle_nospectre_v2);
899 +void setup_spectre_v2(void)
900 +{
901 + if (no_spectrev2)
902 + do_btb_flush_fixups();
903 + else
904 + btb_flush_enabled = true;
905 +}
906 +#endif /* CONFIG_PPC_FSL_BOOK3E */
907 +
908 +#ifdef CONFIG_PPC_BOOK3S_64
909 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
910 {
911 bool thread_priv;
912 @@ -46,25 +156,39 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
913
914 return sprintf(buf, "Vulnerable\n");
915 }
916 +#endif
917
918 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
919 {
920 - if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
921 - return sprintf(buf, "Not affected\n");
922 + struct seq_buf s;
923
924 - return sprintf(buf, "Vulnerable\n");
925 + seq_buf_init(&s, buf, PAGE_SIZE - 1);
926 +
927 + if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
928 + if (barrier_nospec_enabled)
929 + seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
930 + else
931 + seq_buf_printf(&s, "Vulnerable");
932 +
933 + if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
934 + seq_buf_printf(&s, ", ori31 speculation barrier enabled");
935 +
936 + seq_buf_printf(&s, "\n");
937 + } else
938 + seq_buf_printf(&s, "Not affected\n");
939 +
940 + return s.len;
941 }
942
943 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
944 {
945 - bool bcs, ccd, ori;
946 struct seq_buf s;
947 + bool bcs, ccd;
948
949 seq_buf_init(&s, buf, PAGE_SIZE - 1);
950
951 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
952 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
953 - ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
954
955 if (bcs || ccd) {
956 seq_buf_printf(&s, "Mitigation: ");
957 @@ -77,17 +201,23 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
958
959 if (ccd)
960 seq_buf_printf(&s, "Indirect branch cache disabled");
961 - } else
962 + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
963 + seq_buf_printf(&s, "Mitigation: Software count cache flush");
964 +
965 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
966 + seq_buf_printf(&s, " (hardware accelerated)");
967 + } else if (btb_flush_enabled) {
968 + seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
969 + } else {
970 seq_buf_printf(&s, "Vulnerable");
971 -
972 - if (ori)
973 - seq_buf_printf(&s, ", ori31 speculation barrier enabled");
974 + }
975
976 seq_buf_printf(&s, "\n");
977
978 return s.len;
979 }
980
981 +#ifdef CONFIG_PPC_BOOK3S_64
982 /*
983 * Store-forwarding barrier support.
984 */
985 @@ -235,3 +365,71 @@ static __init int stf_barrier_debugfs_init(void)
986 }
987 device_initcall(stf_barrier_debugfs_init);
988 #endif /* CONFIG_DEBUG_FS */
989 +
990 +static void toggle_count_cache_flush(bool enable)
991 +{
992 + if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
993 + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
994 + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
995 + pr_info("count-cache-flush: software flush disabled.\n");
996 + return;
997 + }
998 +
999 + patch_branch_site(&patch__call_flush_count_cache,
1000 + (u64)&flush_count_cache, BRANCH_SET_LINK);
1001 +
1002 + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
1003 + count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
1004 + pr_info("count-cache-flush: full software flush sequence enabled.\n");
1005 + return;
1006 + }
1007 +
1008 + patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
1009 + count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
1010 + pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
1011 +}
1012 +
1013 +void setup_count_cache_flush(void)
1014 +{
1015 + toggle_count_cache_flush(true);
1016 +}
1017 +
1018 +#ifdef CONFIG_DEBUG_FS
1019 +static int count_cache_flush_set(void *data, u64 val)
1020 +{
1021 + bool enable;
1022 +
1023 + if (val == 1)
1024 + enable = true;
1025 + else if (val == 0)
1026 + enable = false;
1027 + else
1028 + return -EINVAL;
1029 +
1030 + toggle_count_cache_flush(enable);
1031 +
1032 + return 0;
1033 +}
1034 +
1035 +static int count_cache_flush_get(void *data, u64 *val)
1036 +{
1037 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
1038 + *val = 0;
1039 + else
1040 + *val = 1;
1041 +
1042 + return 0;
1043 +}
1044 +
1045 +DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
1046 + count_cache_flush_set, "%llu\n");
1047 +
1048 +static __init int count_cache_flush_debugfs_init(void)
1049 +{
1050 + debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
1051 + NULL, &fops_count_cache_flush);
1052 + return 0;
1053 +}
1054 +device_initcall(count_cache_flush_debugfs_init);
1055 +#endif /* CONFIG_DEBUG_FS */
1056 +#endif /* CONFIG_PPC_BOOK3S_64 */
1057 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
1058 index bf0f712ac0e0..5e7d70c5d065 100644
1059 --- a/arch/powerpc/kernel/setup-common.c
1060 +++ b/arch/powerpc/kernel/setup-common.c
1061 @@ -918,6 +918,9 @@ void __init setup_arch(char **cmdline_p)
1062 if (ppc_md.setup_arch)
1063 ppc_md.setup_arch();
1064
1065 + setup_barrier_nospec();
1066 + setup_spectre_v2();
1067 +
1068 paging_init();
1069
1070 /* Initialize the MMU context management stuff. */
1071 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
1072 index d929afab7b24..bdf2f7b995bb 100644
1073 --- a/arch/powerpc/kernel/signal_64.c
1074 +++ b/arch/powerpc/kernel/signal_64.c
1075 @@ -746,12 +746,25 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
1076 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
1077 &uc_transact->uc_mcontext))
1078 goto badframe;
1079 - }
1080 - else
1081 - /* Fall through, for non-TM restore */
1082 + } else
1083 #endif
1084 - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
1085 - goto badframe;
1086 + {
1087 + /*
1088 + * Fall through, for non-TM restore
1089 + *
1090 + * Unset MSR[TS] on the thread regs since MSR from user
1091 + * context does not have MSR active, and recheckpoint was
1092 + * not called since restore_tm_sigcontexts() was not called
1093 + * also.
1094 + *
1095 + * If not unsetting it, the code can RFID to userspace with
1096 + * MSR[TS] set, but without CPU in the proper state,
1097 + * causing a TM bad thing.
1098 + */
1099 + current->thread.regs->msr &= ~MSR_TS_MASK;
1100 + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
1101 + goto badframe;
1102 + }
1103
1104 if (restore_altstack(&uc->uc_stack))
1105 goto badframe;
1106 diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
1107 index 988f38dced0f..82d8aae81c6a 100644
1108 --- a/arch/powerpc/kernel/swsusp_asm64.S
1109 +++ b/arch/powerpc/kernel/swsusp_asm64.S
1110 @@ -179,7 +179,7 @@ nothing_to_copy:
1111 sld r3, r3, r0
1112 li r0, 0
1113 1:
1114 - dcbf r0,r3
1115 + dcbf 0,r3
1116 addi r3,r3,0x20
1117 bdnz 1b
1118
1119 diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
1120 index c16fddbb6ab8..50d365060855 100644
1121 --- a/arch/powerpc/kernel/vmlinux.lds.S
1122 +++ b/arch/powerpc/kernel/vmlinux.lds.S
1123 @@ -153,8 +153,25 @@ SECTIONS
1124 *(__rfi_flush_fixup)
1125 __stop___rfi_flush_fixup = .;
1126 }
1127 -#endif
1128 +#endif /* CONFIG_PPC64 */
1129 +
1130 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
1131 + . = ALIGN(8);
1132 + __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
1133 + __start___barrier_nospec_fixup = .;
1134 + *(__barrier_nospec_fixup)
1135 + __stop___barrier_nospec_fixup = .;
1136 + }
1137 +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
1138
1139 +#ifdef CONFIG_PPC_FSL_BOOK3E
1140 + . = ALIGN(8);
1141 + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
1142 + __start__btb_flush_fixup = .;
1143 + *(__btb_flush_fixup)
1144 + __stop__btb_flush_fixup = .;
1145 + }
1146 +#endif
1147 EXCEPTION_TABLE(0)
1148
1149 NOTES :kernel :notes
1150 diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
1151 index 81bd8a07aa51..612b7f6a887f 100644
1152 --- a/arch/powerpc/kvm/bookehv_interrupts.S
1153 +++ b/arch/powerpc/kvm/bookehv_interrupts.S
1154 @@ -75,6 +75,10 @@
1155 PPC_LL r1, VCPU_HOST_STACK(r4)
1156 PPC_LL r2, HOST_R2(r1)
1157
1158 +START_BTB_FLUSH_SECTION
1159 + BTB_FLUSH(r10)
1160 +END_BTB_FLUSH_SECTION
1161 +
1162 mfspr r10, SPRN_PID
1163 lwz r8, VCPU_HOST_PID(r4)
1164 PPC_LL r11, VCPU_SHARED(r4)
1165 diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
1166 index 990db69a1d0b..fa88f641ac03 100644
1167 --- a/arch/powerpc/kvm/e500_emulate.c
1168 +++ b/arch/powerpc/kvm/e500_emulate.c
1169 @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
1170 vcpu->arch.pwrmgtcr0 = spr_val;
1171 break;
1172
1173 + case SPRN_BUCSR:
1174 + /*
1175 + * If we are here, it means that we have already flushed the
1176 + * branch predictor, so just return to guest.
1177 + */
1178 + break;
1179 +
1180 /* extra exceptions */
1181 #ifdef CONFIG_SPE_POSSIBLE
1182 case SPRN_IVOR32:
1183 diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
1184 index 753d591f1b52..14535ad4cdd1 100644
1185 --- a/arch/powerpc/lib/code-patching.c
1186 +++ b/arch/powerpc/lib/code-patching.c
1187 @@ -14,12 +14,20 @@
1188 #include <asm/page.h>
1189 #include <asm/code-patching.h>
1190 #include <asm/uaccess.h>
1191 +#include <asm/setup.h>
1192 +#include <asm/sections.h>
1193
1194
1195 int patch_instruction(unsigned int *addr, unsigned int instr)
1196 {
1197 int err;
1198
1199 + /* Make sure we aren't patching a freed init section */
1200 + if (init_mem_is_free && init_section_contains(addr, 4)) {
1201 + pr_debug("Skipping init section patching addr: 0x%px\n", addr);
1202 + return 0;
1203 + }
1204 +
1205 __put_user_size(instr, addr, 4, err);
1206 if (err)
1207 return err;
1208 @@ -32,6 +40,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
1209 return patch_instruction(addr, create_branch(addr, target, flags));
1210 }
1211
1212 +int patch_branch_site(s32 *site, unsigned long target, int flags)
1213 +{
1214 + unsigned int *addr;
1215 +
1216 + addr = (unsigned int *)((unsigned long)site + *site);
1217 + return patch_instruction(addr, create_branch(addr, target, flags));
1218 +}
1219 +
1220 +int patch_instruction_site(s32 *site, unsigned int instr)
1221 +{
1222 + unsigned int *addr;
1223 +
1224 + addr = (unsigned int *)((unsigned long)site + *site);
1225 + return patch_instruction(addr, instr);
1226 +}
1227 +
1228 unsigned int create_branch(const unsigned int *addr,
1229 unsigned long target, int flags)
1230 {
1231 diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
1232 index a84d333ecb09..ca5fc8fa7efc 100644
1233 --- a/arch/powerpc/lib/copypage_power7.S
1234 +++ b/arch/powerpc/lib/copypage_power7.S
1235 @@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
1236 .machine push
1237 .machine "power4"
1238 /* setup read stream 0 */
1239 - dcbt r0,r4,0b01000 /* addr from */
1240 - dcbt r0,r7,0b01010 /* length and depth from */
1241 + dcbt 0,r4,0b01000 /* addr from */
1242 + dcbt 0,r7,0b01010 /* length and depth from */
1243 /* setup write stream 1 */
1244 - dcbtst r0,r9,0b01000 /* addr to */
1245 - dcbtst r0,r10,0b01010 /* length and depth to */
1246 + dcbtst 0,r9,0b01000 /* addr to */
1247 + dcbtst 0,r10,0b01010 /* length and depth to */
1248 eieio
1249 - dcbt r0,r8,0b01010 /* all streams GO */
1250 + dcbt 0,r8,0b01010 /* all streams GO */
1251 .machine pop
1252
1253 #ifdef CONFIG_ALTIVEC
1254 @@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
1255 li r12,112
1256
1257 .align 5
1258 -1: lvx v7,r0,r4
1259 +1: lvx v7,0,r4
1260 lvx v6,r4,r6
1261 lvx v5,r4,r7
1262 lvx v4,r4,r8
1263 @@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
1264 lvx v1,r4,r11
1265 lvx v0,r4,r12
1266 addi r4,r4,128
1267 - stvx v7,r0,r3
1268 + stvx v7,0,r3
1269 stvx v6,r3,r6
1270 stvx v5,r3,r7
1271 stvx v4,r3,r8
1272 diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
1273 index da0c568d18c4..391694814691 100644
1274 --- a/arch/powerpc/lib/copyuser_power7.S
1275 +++ b/arch/powerpc/lib/copyuser_power7.S
1276 @@ -327,13 +327,13 @@ err1; stb r0,0(r3)
1277 .machine push
1278 .machine "power4"
1279 /* setup read stream 0 */
1280 - dcbt r0,r6,0b01000 /* addr from */
1281 - dcbt r0,r7,0b01010 /* length and depth from */
1282 + dcbt 0,r6,0b01000 /* addr from */
1283 + dcbt 0,r7,0b01010 /* length and depth from */
1284 /* setup write stream 1 */
1285 - dcbtst r0,r9,0b01000 /* addr to */
1286 - dcbtst r0,r10,0b01010 /* length and depth to */
1287 + dcbtst 0,r9,0b01000 /* addr to */
1288 + dcbtst 0,r10,0b01010 /* length and depth to */
1289 eieio
1290 - dcbt r0,r8,0b01010 /* all streams GO */
1291 + dcbt 0,r8,0b01010 /* all streams GO */
1292 .machine pop
1293
1294 beq cr1,.Lunwind_stack_nonvmx_copy
1295 @@ -388,26 +388,26 @@ err3; std r0,0(r3)
1296 li r11,48
1297
1298 bf cr7*4+3,5f
1299 -err3; lvx v1,r0,r4
1300 +err3; lvx v1,0,r4
1301 addi r4,r4,16
1302 -err3; stvx v1,r0,r3
1303 +err3; stvx v1,0,r3
1304 addi r3,r3,16
1305
1306 5: bf cr7*4+2,6f
1307 -err3; lvx v1,r0,r4
1308 +err3; lvx v1,0,r4
1309 err3; lvx v0,r4,r9
1310 addi r4,r4,32
1311 -err3; stvx v1,r0,r3
1312 +err3; stvx v1,0,r3
1313 err3; stvx v0,r3,r9
1314 addi r3,r3,32
1315
1316 6: bf cr7*4+1,7f
1317 -err3; lvx v3,r0,r4
1318 +err3; lvx v3,0,r4
1319 err3; lvx v2,r4,r9
1320 err3; lvx v1,r4,r10
1321 err3; lvx v0,r4,r11
1322 addi r4,r4,64
1323 -err3; stvx v3,r0,r3
1324 +err3; stvx v3,0,r3
1325 err3; stvx v2,r3,r9
1326 err3; stvx v1,r3,r10
1327 err3; stvx v0,r3,r11
1328 @@ -433,7 +433,7 @@ err3; stvx v0,r3,r11
1329 */
1330 .align 5
1331 8:
1332 -err4; lvx v7,r0,r4
1333 +err4; lvx v7,0,r4
1334 err4; lvx v6,r4,r9
1335 err4; lvx v5,r4,r10
1336 err4; lvx v4,r4,r11
1337 @@ -442,7 +442,7 @@ err4; lvx v2,r4,r14
1338 err4; lvx v1,r4,r15
1339 err4; lvx v0,r4,r16
1340 addi r4,r4,128
1341 -err4; stvx v7,r0,r3
1342 +err4; stvx v7,0,r3
1343 err4; stvx v6,r3,r9
1344 err4; stvx v5,r3,r10
1345 err4; stvx v4,r3,r11
1346 @@ -463,29 +463,29 @@ err4; stvx v0,r3,r16
1347 mtocrf 0x01,r6
1348
1349 bf cr7*4+1,9f
1350 -err3; lvx v3,r0,r4
1351 +err3; lvx v3,0,r4
1352 err3; lvx v2,r4,r9
1353 err3; lvx v1,r4,r10
1354 err3; lvx v0,r4,r11
1355 addi r4,r4,64
1356 -err3; stvx v3,r0,r3
1357 +err3; stvx v3,0,r3
1358 err3; stvx v2,r3,r9
1359 err3; stvx v1,r3,r10
1360 err3; stvx v0,r3,r11
1361 addi r3,r3,64
1362
1363 9: bf cr7*4+2,10f
1364 -err3; lvx v1,r0,r4
1365 +err3; lvx v1,0,r4
1366 err3; lvx v0,r4,r9
1367 addi r4,r4,32
1368 -err3; stvx v1,r0,r3
1369 +err3; stvx v1,0,r3
1370 err3; stvx v0,r3,r9
1371 addi r3,r3,32
1372
1373 10: bf cr7*4+3,11f
1374 -err3; lvx v1,r0,r4
1375 +err3; lvx v1,0,r4
1376 addi r4,r4,16
1377 -err3; stvx v1,r0,r3
1378 +err3; stvx v1,0,r3
1379 addi r3,r3,16
1380
1381 /* Up to 15B to go */
1382 @@ -565,25 +565,25 @@ err3; lvx v0,0,r4
1383 addi r4,r4,16
1384
1385 bf cr7*4+3,5f
1386 -err3; lvx v1,r0,r4
1387 +err3; lvx v1,0,r4
1388 VPERM(v8,v0,v1,v16)
1389 addi r4,r4,16
1390 -err3; stvx v8,r0,r3
1391 +err3; stvx v8,0,r3
1392 addi r3,r3,16
1393 vor v0,v1,v1
1394
1395 5: bf cr7*4+2,6f
1396 -err3; lvx v1,r0,r4
1397 +err3; lvx v1,0,r4
1398 VPERM(v8,v0,v1,v16)
1399 err3; lvx v0,r4,r9
1400 VPERM(v9,v1,v0,v16)
1401 addi r4,r4,32
1402 -err3; stvx v8,r0,r3
1403 +err3; stvx v8,0,r3
1404 err3; stvx v9,r3,r9
1405 addi r3,r3,32
1406
1407 6: bf cr7*4+1,7f
1408 -err3; lvx v3,r0,r4
1409 +err3; lvx v3,0,r4
1410 VPERM(v8,v0,v3,v16)
1411 err3; lvx v2,r4,r9
1412 VPERM(v9,v3,v2,v16)
1413 @@ -592,7 +592,7 @@ err3; lvx v1,r4,r10
1414 err3; lvx v0,r4,r11
1415 VPERM(v11,v1,v0,v16)
1416 addi r4,r4,64
1417 -err3; stvx v8,r0,r3
1418 +err3; stvx v8,0,r3
1419 err3; stvx v9,r3,r9
1420 err3; stvx v10,r3,r10
1421 err3; stvx v11,r3,r11
1422 @@ -618,7 +618,7 @@ err3; stvx v11,r3,r11
1423 */
1424 .align 5
1425 8:
1426 -err4; lvx v7,r0,r4
1427 +err4; lvx v7,0,r4
1428 VPERM(v8,v0,v7,v16)
1429 err4; lvx v6,r4,r9
1430 VPERM(v9,v7,v6,v16)
1431 @@ -635,7 +635,7 @@ err4; lvx v1,r4,r15
1432 err4; lvx v0,r4,r16
1433 VPERM(v15,v1,v0,v16)
1434 addi r4,r4,128
1435 -err4; stvx v8,r0,r3
1436 +err4; stvx v8,0,r3
1437 err4; stvx v9,r3,r9
1438 err4; stvx v10,r3,r10
1439 err4; stvx v11,r3,r11
1440 @@ -656,7 +656,7 @@ err4; stvx v15,r3,r16
1441 mtocrf 0x01,r6
1442
1443 bf cr7*4+1,9f
1444 -err3; lvx v3,r0,r4
1445 +err3; lvx v3,0,r4
1446 VPERM(v8,v0,v3,v16)
1447 err3; lvx v2,r4,r9
1448 VPERM(v9,v3,v2,v16)
1449 @@ -665,27 +665,27 @@ err3; lvx v1,r4,r10
1450 err3; lvx v0,r4,r11
1451 VPERM(v11,v1,v0,v16)
1452 addi r4,r4,64
1453 -err3; stvx v8,r0,r3
1454 +err3; stvx v8,0,r3
1455 err3; stvx v9,r3,r9
1456 err3; stvx v10,r3,r10
1457 err3; stvx v11,r3,r11
1458 addi r3,r3,64
1459
1460 9: bf cr7*4+2,10f
1461 -err3; lvx v1,r0,r4
1462 +err3; lvx v1,0,r4
1463 VPERM(v8,v0,v1,v16)
1464 err3; lvx v0,r4,r9
1465 VPERM(v9,v1,v0,v16)
1466 addi r4,r4,32
1467 -err3; stvx v8,r0,r3
1468 +err3; stvx v8,0,r3
1469 err3; stvx v9,r3,r9
1470 addi r3,r3,32
1471
1472 10: bf cr7*4+3,11f
1473 -err3; lvx v1,r0,r4
1474 +err3; lvx v1,0,r4
1475 VPERM(v8,v0,v1,v16)
1476 addi r4,r4,16
1477 -err3; stvx v8,r0,r3
1478 +err3; stvx v8,0,r3
1479 addi r3,r3,16
1480
1481 /* Up to 15B to go */
1482 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
1483 index cf1398e3c2e0..e6ed0ec94bc8 100644
1484 --- a/arch/powerpc/lib/feature-fixups.c
1485 +++ b/arch/powerpc/lib/feature-fixups.c
1486 @@ -277,8 +277,101 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
1487 (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
1488 : "unknown");
1489 }
1490 +
1491 +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
1492 +{
1493 + unsigned int instr, *dest;
1494 + long *start, *end;
1495 + int i;
1496 +
1497 + start = fixup_start;
1498 + end = fixup_end;
1499 +
1500 + instr = 0x60000000; /* nop */
1501 +
1502 + if (enable) {
1503 + pr_info("barrier-nospec: using ORI speculation barrier\n");
1504 + instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
1505 + }
1506 +
1507 + for (i = 0; start < end; start++, i++) {
1508 + dest = (void *)start + *start;
1509 +
1510 + pr_devel("patching dest %lx\n", (unsigned long)dest);
1511 + patch_instruction(dest, instr);
1512 + }
1513 +
1514 + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
1515 +}
1516 +
1517 #endif /* CONFIG_PPC_BOOK3S_64 */
1518
1519 +#ifdef CONFIG_PPC_BARRIER_NOSPEC
1520 +void do_barrier_nospec_fixups(bool enable)
1521 +{
1522 + void *start, *end;
1523 +
1524 + start = PTRRELOC(&__start___barrier_nospec_fixup),
1525 + end = PTRRELOC(&__stop___barrier_nospec_fixup);
1526 +
1527 + do_barrier_nospec_fixups_range(enable, start, end);
1528 +}
1529 +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
1530 +
1531 +#ifdef CONFIG_PPC_FSL_BOOK3E
1532 +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
1533 +{
1534 + unsigned int instr[2], *dest;
1535 + long *start, *end;
1536 + int i;
1537 +
1538 + start = fixup_start;
1539 + end = fixup_end;
1540 +
1541 + instr[0] = PPC_INST_NOP;
1542 + instr[1] = PPC_INST_NOP;
1543 +
1544 + if (enable) {
1545 + pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
1546 + instr[0] = PPC_INST_ISYNC;
1547 + instr[1] = PPC_INST_SYNC;
1548 + }
1549 +
1550 + for (i = 0; start < end; start++, i++) {
1551 + dest = (void *)start + *start;
1552 +
1553 + pr_devel("patching dest %lx\n", (unsigned long)dest);
1554 + patch_instruction(dest, instr[0]);
1555 + patch_instruction(dest + 1, instr[1]);
1556 + }
1557 +
1558 + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
1559 +}
1560 +
1561 +static void patch_btb_flush_section(long *curr)
1562 +{
1563 + unsigned int *start, *end;
1564 +
1565 + start = (void *)curr + *curr;
1566 + end = (void *)curr + *(curr + 1);
1567 + for (; start < end; start++) {
1568 + pr_devel("patching dest %lx\n", (unsigned long)start);
1569 + patch_instruction(start, PPC_INST_NOP);
1570 + }
1571 +}
1572 +
1573 +void do_btb_flush_fixups(void)
1574 +{
1575 + long *start, *end;
1576 +
1577 + start = PTRRELOC(&__start__btb_flush_fixup);
1578 + end = PTRRELOC(&__stop__btb_flush_fixup);
1579 +
1580 + for (; start < end; start += 2)
1581 + patch_btb_flush_section(start);
1582 +}
1583 +#endif /* CONFIG_PPC_FSL_BOOK3E */
1584 +
1585 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
1586 {
1587 long *start, *end;
1588 diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
1589 index 786234fd4e91..193909abd18b 100644
1590 --- a/arch/powerpc/lib/memcpy_power7.S
1591 +++ b/arch/powerpc/lib/memcpy_power7.S
1592 @@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
1593
1594 .machine push
1595 .machine "power4"
1596 - dcbt r0,r6,0b01000
1597 - dcbt r0,r7,0b01010
1598 - dcbtst r0,r9,0b01000
1599 - dcbtst r0,r10,0b01010
1600 + dcbt 0,r6,0b01000
1601 + dcbt 0,r7,0b01010
1602 + dcbtst 0,r9,0b01000
1603 + dcbtst 0,r10,0b01010
1604 eieio
1605 - dcbt r0,r8,0b01010 /* GO */
1606 + dcbt 0,r8,0b01010 /* GO */
1607 .machine pop
1608
1609 beq cr1,.Lunwind_stack_nonvmx_copy
1610 @@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
1611 li r11,48
1612
1613 bf cr7*4+3,5f
1614 - lvx v1,r0,r4
1615 + lvx v1,0,r4
1616 addi r4,r4,16
1617 - stvx v1,r0,r3
1618 + stvx v1,0,r3
1619 addi r3,r3,16
1620
1621 5: bf cr7*4+2,6f
1622 - lvx v1,r0,r4
1623 + lvx v1,0,r4
1624 lvx v0,r4,r9
1625 addi r4,r4,32
1626 - stvx v1,r0,r3
1627 + stvx v1,0,r3
1628 stvx v0,r3,r9
1629 addi r3,r3,32
1630
1631 6: bf cr7*4+1,7f
1632 - lvx v3,r0,r4
1633 + lvx v3,0,r4
1634 lvx v2,r4,r9
1635 lvx v1,r4,r10
1636 lvx v0,r4,r11
1637 addi r4,r4,64
1638 - stvx v3,r0,r3
1639 + stvx v3,0,r3
1640 stvx v2,r3,r9
1641 stvx v1,r3,r10
1642 stvx v0,r3,r11
1643 @@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
1644 */
1645 .align 5
1646 8:
1647 - lvx v7,r0,r4
1648 + lvx v7,0,r4
1649 lvx v6,r4,r9
1650 lvx v5,r4,r10
1651 lvx v4,r4,r11
1652 @@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
1653 lvx v1,r4,r15
1654 lvx v0,r4,r16
1655 addi r4,r4,128
1656 - stvx v7,r0,r3
1657 + stvx v7,0,r3
1658 stvx v6,r3,r9
1659 stvx v5,r3,r10
1660 stvx v4,r3,r11
1661 @@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
1662 mtocrf 0x01,r6
1663
1664 bf cr7*4+1,9f
1665 - lvx v3,r0,r4
1666 + lvx v3,0,r4
1667 lvx v2,r4,r9
1668 lvx v1,r4,r10
1669 lvx v0,r4,r11
1670 addi r4,r4,64
1671 - stvx v3,r0,r3
1672 + stvx v3,0,r3
1673 stvx v2,r3,r9
1674 stvx v1,r3,r10
1675 stvx v0,r3,r11
1676 addi r3,r3,64
1677
1678 9: bf cr7*4+2,10f
1679 - lvx v1,r0,r4
1680 + lvx v1,0,r4
1681 lvx v0,r4,r9
1682 addi r4,r4,32
1683 - stvx v1,r0,r3
1684 + stvx v1,0,r3
1685 stvx v0,r3,r9
1686 addi r3,r3,32
1687
1688 10: bf cr7*4+3,11f
1689 - lvx v1,r0,r4
1690 + lvx v1,0,r4
1691 addi r4,r4,16
1692 - stvx v1,r0,r3
1693 + stvx v1,0,r3
1694 addi r3,r3,16
1695
1696 /* Up to 15B to go */
1697 @@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
1698 addi r4,r4,16
1699
1700 bf cr7*4+3,5f
1701 - lvx v1,r0,r4
1702 + lvx v1,0,r4
1703 VPERM(v8,v0,v1,v16)
1704 addi r4,r4,16
1705 - stvx v8,r0,r3
1706 + stvx v8,0,r3
1707 addi r3,r3,16
1708 vor v0,v1,v1
1709
1710 5: bf cr7*4+2,6f
1711 - lvx v1,r0,r4
1712 + lvx v1,0,r4
1713 VPERM(v8,v0,v1,v16)
1714 lvx v0,r4,r9
1715 VPERM(v9,v1,v0,v16)
1716 addi r4,r4,32
1717 - stvx v8,r0,r3
1718 + stvx v8,0,r3
1719 stvx v9,r3,r9
1720 addi r3,r3,32
1721
1722 6: bf cr7*4+1,7f
1723 - lvx v3,r0,r4
1724 + lvx v3,0,r4
1725 VPERM(v8,v0,v3,v16)
1726 lvx v2,r4,r9
1727 VPERM(v9,v3,v2,v16)
1728 @@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
1729 lvx v0,r4,r11
1730 VPERM(v11,v1,v0,v16)
1731 addi r4,r4,64
1732 - stvx v8,r0,r3
1733 + stvx v8,0,r3
1734 stvx v9,r3,r9
1735 stvx v10,r3,r10
1736 stvx v11,r3,r11
1737 @@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
1738 */
1739 .align 5
1740 8:
1741 - lvx v7,r0,r4
1742 + lvx v7,0,r4
1743 VPERM(v8,v0,v7,v16)
1744 lvx v6,r4,r9
1745 VPERM(v9,v7,v6,v16)
1746 @@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
1747 lvx v0,r4,r16
1748 VPERM(v15,v1,v0,v16)
1749 addi r4,r4,128
1750 - stvx v8,r0,r3
1751 + stvx v8,0,r3
1752 stvx v9,r3,r9
1753 stvx v10,r3,r10
1754 stvx v11,r3,r11
1755 @@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
1756 mtocrf 0x01,r6
1757
1758 bf cr7*4+1,9f
1759 - lvx v3,r0,r4
1760 + lvx v3,0,r4
1761 VPERM(v8,v0,v3,v16)
1762 lvx v2,r4,r9
1763 VPERM(v9,v3,v2,v16)
1764 @@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
1765 lvx v0,r4,r11
1766 VPERM(v11,v1,v0,v16)
1767 addi r4,r4,64
1768 - stvx v8,r0,r3
1769 + stvx v8,0,r3
1770 stvx v9,r3,r9
1771 stvx v10,r3,r10
1772 stvx v11,r3,r11
1773 addi r3,r3,64
1774
1775 9: bf cr7*4+2,10f
1776 - lvx v1,r0,r4
1777 + lvx v1,0,r4
1778 VPERM(v8,v0,v1,v16)
1779 lvx v0,r4,r9
1780 VPERM(v9,v1,v0,v16)
1781 addi r4,r4,32
1782 - stvx v8,r0,r3
1783 + stvx v8,0,r3
1784 stvx v9,r3,r9
1785 addi r3,r3,32
1786
1787 10: bf cr7*4+3,11f
1788 - lvx v1,r0,r4
1789 + lvx v1,0,r4
1790 VPERM(v8,v0,v1,v16)
1791 addi r4,r4,16
1792 - stvx v8,r0,r3
1793 + stvx v8,0,r3
1794 addi r3,r3,16
1795
1796 /* Up to 15B to go */
1797 diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
1798 index 57ace356c949..11e6372537fd 100644
1799 --- a/arch/powerpc/lib/string_64.S
1800 +++ b/arch/powerpc/lib/string_64.S
1801 @@ -192,7 +192,7 @@ err1; std r0,8(r3)
1802 mtctr r6
1803 mr r8,r3
1804 14:
1805 -err1; dcbz r0,r3
1806 +err1; dcbz 0,r3
1807 add r3,r3,r9
1808 bdnz 14b
1809
1810 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
1811 index 5f844337de21..1e93dbc88e80 100644
1812 --- a/arch/powerpc/mm/mem.c
1813 +++ b/arch/powerpc/mm/mem.c
1814 @@ -62,6 +62,7 @@
1815 #endif
1816
1817 unsigned long long memory_limit;
1818 +bool init_mem_is_free;
1819
1820 #ifdef CONFIG_HIGHMEM
1821 pte_t *kmap_pte;
1822 @@ -396,6 +397,7 @@ void __init mem_init(void)
1823 void free_initmem(void)
1824 {
1825 ppc_md.progress = ppc_printk_progress;
1826 + init_mem_is_free = true;
1827 free_initmem_default(POISON_FREE_INITMEM);
1828 }
1829
1830 diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
1831 index eb82d787d99a..b7e9c09dfe19 100644
1832 --- a/arch/powerpc/mm/tlb_low_64e.S
1833 +++ b/arch/powerpc/mm/tlb_low_64e.S
1834 @@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
1835 std r15,EX_TLB_R15(r12)
1836 std r10,EX_TLB_CR(r12)
1837 #ifdef CONFIG_PPC_FSL_BOOK3E
1838 +START_BTB_FLUSH_SECTION
1839 + mfspr r11, SPRN_SRR1
1840 + andi. r10,r11,MSR_PR
1841 + beq 1f
1842 + BTB_FLUSH(r10)
1843 +1:
1844 +END_BTB_FLUSH_SECTION
1845 std r7,EX_TLB_R7(r12)
1846 #endif
1847 TLB_MISS_PROLOG_STATS
1848 diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
1849 index 17203abf38e8..365e2b620201 100644
1850 --- a/arch/powerpc/platforms/powernv/setup.c
1851 +++ b/arch/powerpc/platforms/powernv/setup.c
1852 @@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np)
1853 if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
1854 security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
1855
1856 + if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
1857 + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
1858 +
1859 + if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
1860 + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
1861 +
1862 /*
1863 * The features below are enabled by default, so we instead look to see
1864 * if firmware has *disabled* them, and clear them if so.
1865 @@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void)
1866 security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
1867
1868 setup_rfi_flush(type, enable);
1869 + setup_count_cache_flush();
1870 }
1871
1872 static void __init pnv_setup_arch(void)
1873 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
1874 index 91ade7755823..adb09ab87f7c 100644
1875 --- a/arch/powerpc/platforms/pseries/setup.c
1876 +++ b/arch/powerpc/platforms/pseries/setup.c
1877 @@ -475,6 +475,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
1878 if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
1879 security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
1880
1881 + if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
1882 + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
1883 +
1884 + if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
1885 + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
1886 +
1887 /*
1888 * The features below are enabled by default, so we instead look to see
1889 * if firmware has *disabled* them, and clear them if so.
1890 @@ -525,6 +531,7 @@ void pseries_setup_rfi_flush(void)
1891 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
1892
1893 setup_rfi_flush(types, enable);
1894 + setup_count_cache_flush();
1895 }
1896
1897 static void __init pSeries_setup_arch(void)
1898 diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
1899 index d5409660f5de..756dc9432d15 100644
1900 --- a/arch/x86/entry/vdso/Makefile
1901 +++ b/arch/x86/entry/vdso/Makefile
1902 @@ -47,10 +47,8 @@ targets += $(vdso_img_sodbg)
1903
1904 export CPPFLAGS_vdso.lds += -P -C
1905
1906 -VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
1907 - -Wl,--no-undefined \
1908 - -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
1909 - $(DISABLE_LTO)
1910 +VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
1911 + -z max-page-size=4096
1912
1913 $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
1914 $(call if_changed,vdso)
1915 @@ -96,10 +94,8 @@ CFLAGS_REMOVE_vvar.o = -pg
1916 #
1917
1918 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
1919 -VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
1920 - -Wl,-soname=linux-vdso.so.1 \
1921 - -Wl,-z,max-page-size=4096 \
1922 - -Wl,-z,common-page-size=4096
1923 +VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
1924 + -z max-page-size=4096
1925
1926 # 64-bit objects to re-brand as x32
1927 vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
1928 @@ -127,7 +123,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
1929 $(call if_changed,vdso)
1930
1931 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
1932 -VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
1933 +VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
1934
1935 # This makes sure the $(obj) subdirectory exists even though vdso32/
1936 # is not a kbuild sub-make subdirectory.
1937 @@ -165,13 +161,13 @@ $(obj)/vdso32.so.dbg: FORCE \
1938 # The DSO images are built using a special linker script.
1939 #
1940 quiet_cmd_vdso = VDSO $@
1941 - cmd_vdso = $(CC) -nostdlib -o $@ \
1942 + cmd_vdso = $(LD) -nostdlib -o $@ \
1943 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
1944 - -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
1945 + -T $(filter %.lds,$^) $(filter %.o,$^) && \
1946 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
1947
1948 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
1949 - $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
1950 +VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
1951 + $(call ld-option, --build-id) -Bsymbolic
1952 GCOV_PROFILE := n
1953
1954 #
1955 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
1956 index 8e9dbe7b73a1..5cc2ce4ab8a3 100644
1957 --- a/arch/x86/include/asm/suspend_32.h
1958 +++ b/arch/x86/include/asm/suspend_32.h
1959 @@ -11,7 +11,13 @@
1960
1961 /* image of the saved processor state */
1962 struct saved_context {
1963 - u16 es, fs, gs, ss;
1964 + /*
1965 + * On x86_32, all segment registers, with the possible exception of
1966 + * gs, are saved at kernel entry in pt_regs.
1967 + */
1968 +#ifdef CONFIG_X86_32_LAZY_GS
1969 + u16 gs;
1970 +#endif
1971 unsigned long cr0, cr2, cr3, cr4;
1972 u64 misc_enable;
1973 bool misc_enable_saved;
1974 diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
1975 index 2bd96b4df140..701751918921 100644
1976 --- a/arch/x86/include/asm/suspend_64.h
1977 +++ b/arch/x86/include/asm/suspend_64.h
1978 @@ -19,8 +19,20 @@
1979 */
1980 struct saved_context {
1981 struct pt_regs regs;
1982 - u16 ds, es, fs, gs, ss;
1983 - unsigned long gs_base, gs_kernel_base, fs_base;
1984 +
1985 + /*
1986 + * User CS and SS are saved in current_pt_regs(). The rest of the
1987 + * segment selectors need to be saved and restored here.
1988 + */
1989 + u16 ds, es, fs, gs;
1990 +
1991 + /*
1992 + * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
1993 + * so we save them separately. We save the kernelmode GSBASE to
1994 + * restore percpu access after resume.
1995 + */
1996 + unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
1997 +
1998 unsigned long cr0, cr2, cr3, cr4, cr8;
1999 u64 misc_enable;
2000 bool misc_enable_saved;
2001 @@ -29,8 +41,7 @@ struct saved_context {
2002 u16 gdt_pad; /* Unused */
2003 struct desc_ptr gdt_desc;
2004 u16 idt_pad;
2005 - u16 idt_limit;
2006 - unsigned long idt_base;
2007 + struct desc_ptr idt;
2008 u16 ldt;
2009 u16 tss;
2010 unsigned long tr;
2011 diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
2012 index ccdc23d89b60..9f694537a103 100644
2013 --- a/arch/x86/include/asm/xen/hypercall.h
2014 +++ b/arch/x86/include/asm/xen/hypercall.h
2015 @@ -216,6 +216,9 @@ privcmd_call(unsigned call,
2016 __HYPERCALL_DECLS;
2017 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
2018
2019 + if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
2020 + return -EINVAL;
2021 +
2022 stac();
2023 asm volatile(CALL_NOSPEC
2024 : __HYPERCALL_5PARAM
2025 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
2026 index 53cace2ec0e2..054e27671df9 100644
2027 --- a/arch/x86/power/cpu.c
2028 +++ b/arch/x86/power/cpu.c
2029 @@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
2030 /*
2031 * descriptor tables
2032 */
2033 -#ifdef CONFIG_X86_32
2034 store_idt(&ctxt->idt);
2035 -#else
2036 -/* CONFIG_X86_64 */
2037 - store_idt((struct desc_ptr *)&ctxt->idt_limit);
2038 -#endif
2039 +
2040 /*
2041 * We save it here, but restore it only in the hibernate case.
2042 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
2043 @@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
2044 /*
2045 * segment registers
2046 */
2047 -#ifdef CONFIG_X86_32
2048 - savesegment(es, ctxt->es);
2049 - savesegment(fs, ctxt->fs);
2050 +#ifdef CONFIG_X86_32_LAZY_GS
2051 savesegment(gs, ctxt->gs);
2052 - savesegment(ss, ctxt->ss);
2053 -#else
2054 -/* CONFIG_X86_64 */
2055 - asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
2056 - asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
2057 - asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
2058 - asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
2059 - asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
2060 +#endif
2061 +#ifdef CONFIG_X86_64
2062 + savesegment(gs, ctxt->gs);
2063 + savesegment(fs, ctxt->fs);
2064 + savesegment(ds, ctxt->ds);
2065 + savesegment(es, ctxt->es);
2066
2067 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
2068 - rdmsrl(MSR_GS_BASE, ctxt->gs_base);
2069 - rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
2070 + rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
2071 + rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
2072 mtrr_save_fixed_ranges(NULL);
2073
2074 rdmsrl(MSR_EFER, ctxt->efer);
2075 @@ -178,6 +170,9 @@ static void fix_processor_context(void)
2076 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
2077
2078 syscall_init(); /* This sets MSR_*STAR and related */
2079 +#else
2080 + if (boot_cpu_has(X86_FEATURE_SEP))
2081 + enable_sep_cpu();
2082 #endif
2083 load_TR_desc(); /* This does ltr */
2084 load_mm_ldt(current->active_mm); /* This does lldt */
2085 @@ -186,9 +181,12 @@ static void fix_processor_context(void)
2086 }
2087
2088 /**
2089 - * __restore_processor_state - restore the contents of CPU registers saved
2090 - * by __save_processor_state()
2091 - * @ctxt - structure to load the registers contents from
2092 + * __restore_processor_state - restore the contents of CPU registers saved
2093 + * by __save_processor_state()
2094 + * @ctxt - structure to load the registers contents from
2095 + *
2096 + * The asm code that gets us here will have restored a usable GDT, although
2097 + * it will be pointing to the wrong alias.
2098 */
2099 static void notrace __restore_processor_state(struct saved_context *ctxt)
2100 {
2101 @@ -211,46 +209,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
2102 write_cr2(ctxt->cr2);
2103 write_cr0(ctxt->cr0);
2104
2105 + /* Restore the IDT. */
2106 + load_idt(&ctxt->idt);
2107 +
2108 /*
2109 - * now restore the descriptor tables to their proper values
2110 - * ltr is done i fix_processor_context().
2111 + * Just in case the asm code got us here with the SS, DS, or ES
2112 + * out of sync with the GDT, update them.
2113 */
2114 -#ifdef CONFIG_X86_32
2115 - load_idt(&ctxt->idt);
2116 + loadsegment(ss, __KERNEL_DS);
2117 + loadsegment(ds, __USER_DS);
2118 + loadsegment(es, __USER_DS);
2119 +
2120 + /*
2121 + * Restore percpu access. Percpu access can happen in exception
2122 + * handlers or in complicated helpers like load_gs_index().
2123 + */
2124 +#ifdef CONFIG_X86_64
2125 + wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
2126 #else
2127 -/* CONFIG_X86_64 */
2128 - load_idt((const struct desc_ptr *)&ctxt->idt_limit);
2129 + loadsegment(fs, __KERNEL_PERCPU);
2130 + loadsegment(gs, __KERNEL_STACK_CANARY);
2131 #endif
2132
2133 + /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
2134 + fix_processor_context();
2135 +
2136 /*
2137 - * segment registers
2138 + * Now that we have descriptor tables fully restored and working
2139 + * exception handling, restore the usermode segments.
2140 */
2141 -#ifdef CONFIG_X86_32
2142 +#ifdef CONFIG_X86_64
2143 + loadsegment(ds, ctxt->es);
2144 loadsegment(es, ctxt->es);
2145 loadsegment(fs, ctxt->fs);
2146 - loadsegment(gs, ctxt->gs);
2147 - loadsegment(ss, ctxt->ss);
2148 + load_gs_index(ctxt->gs);
2149
2150 /*
2151 - * sysenter MSRs
2152 + * Restore FSBASE and GSBASE after restoring the selectors, since
2153 + * restoring the selectors clobbers the bases. Keep in mind
2154 + * that MSR_KERNEL_GS_BASE is horribly misnamed.
2155 */
2156 - if (boot_cpu_has(X86_FEATURE_SEP))
2157 - enable_sep_cpu();
2158 -#else
2159 -/* CONFIG_X86_64 */
2160 - asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
2161 - asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
2162 - asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
2163 - load_gs_index(ctxt->gs);
2164 - asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
2165 -
2166 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
2167 - wrmsrl(MSR_GS_BASE, ctxt->gs_base);
2168 - wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
2169 + wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
2170 +#elif defined(CONFIG_X86_32_LAZY_GS)
2171 + loadsegment(gs, ctxt->gs);
2172 #endif
2173
2174 - fix_processor_context();
2175 -
2176 do_fpu_end();
2177 x86_platform.restore_sched_clock_state();
2178 mtrr_bp_restore();
2179 diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
2180 index 7538d802b65a..483593068139 100644
2181 --- a/arch/xtensa/kernel/stacktrace.c
2182 +++ b/arch/xtensa/kernel/stacktrace.c
2183 @@ -272,10 +272,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
2184 return 1;
2185 }
2186
2187 +/*
2188 + * level == 0 is for the return address from the caller of this function,
2189 + * not from this function itself.
2190 + */
2191 unsigned long return_address(unsigned level)
2192 {
2193 struct return_addr_data r = {
2194 - .skip = level + 1,
2195 + .skip = level,
2196 };
2197 walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
2198 return r.addr;
2199 diff --git a/block/bio.c b/block/bio.c
2200 index 68972e3d3f5c..4c18a68913de 100644
2201 --- a/block/bio.c
2202 +++ b/block/bio.c
2203 @@ -1214,8 +1214,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
2204 }
2205 }
2206
2207 - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
2208 + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
2209 + if (!map_data)
2210 + __free_page(page);
2211 break;
2212 + }
2213
2214 len -= bytes;
2215 offset = 0;
2216 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
2217 index 8453a49471d7..f4ae000eb285 100644
2218 --- a/drivers/char/Kconfig
2219 +++ b/drivers/char/Kconfig
2220 @@ -377,7 +377,7 @@ config XILINX_HWICAP
2221
2222 config R3964
2223 tristate "Siemens R3964 line discipline"
2224 - depends on TTY
2225 + depends on TTY && BROKEN
2226 ---help---
2227 This driver allows synchronous communication with devices using the
2228 Siemens R3964 packet protocol. Unless you are dealing with special
2229 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2230 index 737f0f6f4075..45ea2718c65d 100644
2231 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2232 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2233 @@ -959,6 +959,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2234 tpa_info = &rxr->rx_tpa[agg_id];
2235
2236 if (unlikely(cons != rxr->rx_next_cons)) {
2237 + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
2238 + cons, rxr->rx_next_cons);
2239 bnxt_sched_reset(bp, rxr);
2240 return;
2241 }
2242 @@ -1377,14 +1379,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
2243 }
2244
2245 cons = rxcmp->rx_cmp_opaque;
2246 - rx_buf = &rxr->rx_buf_ring[cons];
2247 - data = rx_buf->data;
2248 if (unlikely(cons != rxr->rx_next_cons)) {
2249 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
2250
2251 + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
2252 + cons, rxr->rx_next_cons);
2253 bnxt_sched_reset(bp, rxr);
2254 return rc1;
2255 }
2256 + rx_buf = &rxr->rx_buf_ring[cons];
2257 + data = rx_buf->data;
2258 prefetch(data);
2259
2260 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
2261 @@ -1400,11 +1404,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
2262
2263 rx_buf->data = NULL;
2264 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
2265 + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
2266 +
2267 bnxt_reuse_rx_data(rxr, cons, data);
2268 if (agg_bufs)
2269 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
2270
2271 rc = -EIO;
2272 + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
2273 + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
2274 + bnxt_sched_reset(bp, rxr);
2275 + }
2276 goto next_rx;
2277 }
2278
2279 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2280 index 029e856f72a0..dc809c2ea413 100644
2281 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2282 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
2283 @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
2284 if (err)
2285 return err;
2286
2287 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2288 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
2289 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2290
2291 return 0;
2292 }
2293 @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
2294 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
2295 struct mlx5e_tir *tir)
2296 {
2297 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2298 mlx5_core_destroy_tir(mdev, tir->tirn);
2299 list_del(&tir->list);
2300 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2301 }
2302
2303 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
2304 @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
2305 }
2306
2307 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
2308 + mutex_init(&mdev->mlx5e_res.td.list_lock);
2309
2310 return 0;
2311
2312 @@ -151,6 +156,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
2313
2314 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
2315
2316 + mutex_lock(&mdev->mlx5e_res.td.list_lock);
2317 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
2318 err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
2319 if (err)
2320 @@ -159,6 +165,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
2321
2322 out:
2323 kvfree(in);
2324 + mutex_unlock(&mdev->mlx5e_res.td.list_lock);
2325
2326 return err;
2327 }
2328 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2329 index 16e5c8cd104d..d51ad140f46d 100644
2330 --- a/drivers/net/usb/qmi_wwan.c
2331 +++ b/drivers/net/usb/qmi_wwan.c
2332 @@ -890,6 +890,7 @@ static const struct usb_device_id products[] = {
2333 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2334 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2335 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2336 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2337 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2338 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2339 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2340 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2341 index dedb12083d86..6663b76934ad 100644
2342 --- a/drivers/pci/quirks.c
2343 +++ b/drivers/pci/quirks.c
2344 @@ -3866,6 +3866,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
2345 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
2346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
2347 quirk_dma_func1_alias);
2348 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
2349 + quirk_dma_func1_alias);
2350 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
2351 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
2352 quirk_dma_func1_alias);
2353 diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
2354 index 95103054c0e4..fdd2860cb9bd 100644
2355 --- a/drivers/tty/Kconfig
2356 +++ b/drivers/tty/Kconfig
2357 @@ -455,4 +455,27 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
2358 help
2359 FDC channel number to use for KGDB.
2360
2361 +config LDISC_AUTOLOAD
2362 + bool "Automatically load TTY Line Disciplines"
2363 + default y
2364 + help
2365 + Historically the kernel has always automatically loaded any
2366 + line discipline that is in a kernel module when a user asks
2367 + for it to be loaded with the TIOCSETD ioctl, or through other
2368 + means. This is not always the best thing to do on systems
2369 + where you know you will not be using some of the more
2370 + "ancient" line disciplines, so prevent the kernel from doing
2371 + this unless the request is coming from a process with the
2372 + CAP_SYS_MODULE permissions.
2373 +
2374 + Say 'Y' here if you trust your userspace users to do the right
2375 + thing, or if you have only provided the line disciplines that
2376 + you know you will be using, or if you wish to continue to use
2377 + the traditional method of on-demand loading of these modules
2378 + by any user.
2379 +
2380 + This functionality can be changed at runtime with the
2381 + dev.tty.ldisc_autoload sysctl, this configuration option will
2382 + only set the default value of this functionality.
2383 +
2384 endif # TTY
2385 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2386 index 19fe1e8fc124..15e0116e1232 100644
2387 --- a/drivers/tty/tty_io.c
2388 +++ b/drivers/tty/tty_io.c
2389 @@ -520,6 +520,8 @@ void proc_clear_tty(struct task_struct *p)
2390 tty_kref_put(tty);
2391 }
2392
2393 +extern void tty_sysctl_init(void);
2394 +
2395 /**
2396 * proc_set_tty - set the controlling terminal
2397 *
2398 @@ -3705,6 +3707,7 @@ void console_sysfs_notify(void)
2399 */
2400 int __init tty_init(void)
2401 {
2402 + tty_sysctl_init();
2403 cdev_init(&tty_cdev, &tty_fops);
2404 if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
2405 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
2406 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2407 index 4ab518d43758..3eb3f2a03bbb 100644
2408 --- a/drivers/tty/tty_ldisc.c
2409 +++ b/drivers/tty/tty_ldisc.c
2410 @@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
2411 * takes tty_ldiscs_lock to guard against ldisc races
2412 */
2413
2414 +#if defined(CONFIG_LDISC_AUTOLOAD)
2415 + #define INITIAL_AUTOLOAD_STATE 1
2416 +#else
2417 + #define INITIAL_AUTOLOAD_STATE 0
2418 +#endif
2419 +static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
2420 +
2421 static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2422 {
2423 struct tty_ldisc *ld;
2424 @@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2425 */
2426 ldops = get_ldops(disc);
2427 if (IS_ERR(ldops)) {
2428 + if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
2429 + return ERR_PTR(-EPERM);
2430 request_module("tty-ldisc-%d", disc);
2431 ldops = get_ldops(disc);
2432 if (IS_ERR(ldops))
2433 @@ -774,3 +783,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
2434 tty_ldisc_put(tty->ldisc);
2435 tty->ldisc = NULL;
2436 }
2437 +
2438 +static int zero;
2439 +static int one = 1;
2440 +static struct ctl_table tty_table[] = {
2441 + {
2442 + .procname = "ldisc_autoload",
2443 + .data = &tty_ldisc_autoload,
2444 + .maxlen = sizeof(tty_ldisc_autoload),
2445 + .mode = 0644,
2446 + .proc_handler = proc_dointvec,
2447 + .extra1 = &zero,
2448 + .extra2 = &one,
2449 + },
2450 + { }
2451 +};
2452 +
2453 +static struct ctl_table tty_dir_table[] = {
2454 + {
2455 + .procname = "tty",
2456 + .mode = 0555,
2457 + .child = tty_table,
2458 + },
2459 + { }
2460 +};
2461 +
2462 +static struct ctl_table tty_root_table[] = {
2463 + {
2464 + .procname = "dev",
2465 + .mode = 0555,
2466 + .child = tty_dir_table,
2467 + },
2468 + { }
2469 +};
2470 +
2471 +void tty_sysctl_init(void)
2472 +{
2473 + register_sysctl_table(tty_root_table);
2474 +}
2475 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2476 index 8977f40ea441..2f09294c5946 100644
2477 --- a/drivers/virtio/virtio_ring.c
2478 +++ b/drivers/virtio/virtio_ring.c
2479 @@ -1040,6 +1040,8 @@ struct virtqueue *vring_create_virtqueue(
2480 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
2481 if (queue)
2482 break;
2483 + if (!may_reduce_num)
2484 + return NULL;
2485 }
2486
2487 if (!num)
2488 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2489 index 242584a0d3b5..a67143c579aa 100644
2490 --- a/fs/btrfs/ioctl.c
2491 +++ b/fs/btrfs/ioctl.c
2492 @@ -385,6 +385,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
2493 if (!capable(CAP_SYS_ADMIN))
2494 return -EPERM;
2495
2496 + /*
2497 + * If the fs is mounted with nologreplay, which requires it to be
2498 + * mounted in RO mode as well, we can not allow discard on free space
2499 + * inside block groups, because log trees refer to extents that are not
2500 + * pinned in a block group's free space cache (pinning the extents is
2501 + * precisely the first phase of replaying a log tree).
2502 + */
2503 + if (btrfs_test_opt(fs_info, NOLOGREPLAY))
2504 + return -EROFS;
2505 +
2506 rcu_read_lock();
2507 list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
2508 dev_list) {
2509 diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
2510 index fb790b8449c1..333e42cf08de 100644
2511 --- a/include/linux/bitrev.h
2512 +++ b/include/linux/bitrev.h
2513 @@ -31,32 +31,32 @@ static inline u32 __bitrev32(u32 x)
2514
2515 #define __constant_bitrev32(x) \
2516 ({ \
2517 - u32 __x = x; \
2518 - __x = (__x >> 16) | (__x << 16); \
2519 - __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
2520 - __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
2521 - __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
2522 - __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
2523 - __x; \
2524 + u32 ___x = x; \
2525 + ___x = (___x >> 16) | (___x << 16); \
2526 + ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
2527 + ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
2528 + ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
2529 + ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
2530 + ___x; \
2531 })
2532
2533 #define __constant_bitrev16(x) \
2534 ({ \
2535 - u16 __x = x; \
2536 - __x = (__x >> 8) | (__x << 8); \
2537 - __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
2538 - __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
2539 - __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
2540 - __x; \
2541 + u16 ___x = x; \
2542 + ___x = (___x >> 8) | (___x << 8); \
2543 + ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
2544 + ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
2545 + ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
2546 + ___x; \
2547 })
2548
2549 #define __constant_bitrev8(x) \
2550 ({ \
2551 - u8 __x = x; \
2552 - __x = (__x >> 4) | (__x << 4); \
2553 - __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
2554 - __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
2555 - __x; \
2556 + u8 ___x = x; \
2557 + ___x = (___x >> 4) | (___x << 4); \
2558 + ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
2559 + ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
2560 + ___x; \
2561 })
2562
2563 #define bitrev32(x) \
2564 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2565 index 859fd209603a..509e99076c57 100644
2566 --- a/include/linux/mlx5/driver.h
2567 +++ b/include/linux/mlx5/driver.h
2568 @@ -578,6 +578,8 @@ enum mlx5_pci_status {
2569 };
2570
2571 struct mlx5_td {
2572 + /* protects tirs list changes while tirs refresh */
2573 + struct mutex list_lock;
2574 struct list_head tirs_list;
2575 u32 tdn;
2576 };
2577 diff --git a/include/linux/string.h b/include/linux/string.h
2578 index 60042e5e88ff..42eed573ebb6 100644
2579 --- a/include/linux/string.h
2580 +++ b/include/linux/string.h
2581 @@ -111,6 +111,9 @@ extern void * memscan(void *,int,__kernel_size_t);
2582 #ifndef __HAVE_ARCH_MEMCMP
2583 extern int memcmp(const void *,const void *,__kernel_size_t);
2584 #endif
2585 +#ifndef __HAVE_ARCH_BCMP
2586 +extern int bcmp(const void *,const void *,__kernel_size_t);
2587 +#endif
2588 #ifndef __HAVE_ARCH_MEMCHR
2589 extern void * memchr(const void *,int,__kernel_size_t);
2590 #endif
2591 diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
2592 index e8d36938f09a..b38c1871b735 100644
2593 --- a/include/linux/virtio_ring.h
2594 +++ b/include/linux/virtio_ring.h
2595 @@ -62,7 +62,7 @@ struct virtqueue;
2596 /*
2597 * Creates a virtqueue and allocates the descriptor ring. If
2598 * may_reduce_num is set, then this may allocate a smaller ring than
2599 - * expected. The caller should query virtqueue_get_ring_size to learn
2600 + * expected. The caller should query virtqueue_get_vring_size to learn
2601 * the actual size of the ring.
2602 */
2603 struct virtqueue *vring_create_virtqueue(unsigned int index,
2604 diff --git a/include/net/ip.h b/include/net/ip.h
2605 index f06cd30bb44c..a3c1b9dfc9a1 100644
2606 --- a/include/net/ip.h
2607 +++ b/include/net/ip.h
2608 @@ -580,7 +580,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
2609 unsigned char __user *data, int optlen);
2610 void ip_options_undo(struct ip_options *opt);
2611 void ip_forward_options(struct sk_buff *skb);
2612 -int ip_options_rcv_srr(struct sk_buff *skb);
2613 +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
2614
2615 /*
2616 * Functions provided by ip_sockglue.c
2617 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2618 index c05db6ff2515..0cdafe3935a6 100644
2619 --- a/include/net/net_namespace.h
2620 +++ b/include/net/net_namespace.h
2621 @@ -53,6 +53,7 @@ struct net {
2622 */
2623 spinlock_t rules_mod_lock;
2624
2625 + u32 hash_mix;
2626 atomic64_t cookie_gen;
2627
2628 struct list_head list; /* list of network namespaces */
2629 diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
2630 index 69a6715d9f3f..a347b2f9e748 100644
2631 --- a/include/net/netns/hash.h
2632 +++ b/include/net/netns/hash.h
2633 @@ -1,21 +1,10 @@
2634 #ifndef __NET_NS_HASH_H__
2635 #define __NET_NS_HASH_H__
2636
2637 -#include <asm/cache.h>
2638 -
2639 -struct net;
2640 +#include <net/net_namespace.h>
2641
2642 static inline u32 net_hash_mix(const struct net *net)
2643 {
2644 -#ifdef CONFIG_NET_NS
2645 - /*
2646 - * shift this right to eliminate bits, that are
2647 - * always zeroed
2648 - */
2649 -
2650 - return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
2651 -#else
2652 - return 0;
2653 -#endif
2654 + return net->hash_mix;
2655 }
2656 #endif
2657 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2658 index 9e745cc0726d..9f13667ccb9c 100644
2659 --- a/kernel/irq/chip.c
2660 +++ b/kernel/irq/chip.c
2661 @@ -1142,6 +1142,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
2662 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
2663 {
2664 data = data->parent_data;
2665 +
2666 + if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
2667 + return 0;
2668 +
2669 if (data->chip->irq_set_wake)
2670 return data->chip->irq_set_wake(data, on);
2671
2672 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2673 index 0c91d72f3e8f..1c630d94f86b 100644
2674 --- a/kernel/sched/fair.c
2675 +++ b/kernel/sched/fair.c
2676 @@ -6634,10 +6634,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2677 if (cfs_rq->last_h_load_update == now)
2678 return;
2679
2680 - cfs_rq->h_load_next = NULL;
2681 + WRITE_ONCE(cfs_rq->h_load_next, NULL);
2682 for_each_sched_entity(se) {
2683 cfs_rq = cfs_rq_of(se);
2684 - cfs_rq->h_load_next = se;
2685 + WRITE_ONCE(cfs_rq->h_load_next, se);
2686 if (cfs_rq->last_h_load_update == now)
2687 break;
2688 }
2689 @@ -6647,7 +6647,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
2690 cfs_rq->last_h_load_update = now;
2691 }
2692
2693 - while ((se = cfs_rq->h_load_next) != NULL) {
2694 + while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
2695 load = cfs_rq->h_load;
2696 load = div64_ul(load * se->avg.load_avg,
2697 cfs_rq_load_avg(cfs_rq) + 1);
2698 diff --git a/lib/string.c b/lib/string.c
2699 index ed83562a53ae..1cd9757291b1 100644
2700 --- a/lib/string.c
2701 +++ b/lib/string.c
2702 @@ -772,6 +772,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
2703 EXPORT_SYMBOL(memcmp);
2704 #endif
2705
2706 +#ifndef __HAVE_ARCH_BCMP
2707 +/**
2708 + * bcmp - returns 0 if and only if the buffers have identical contents.
2709 + * @a: pointer to first buffer.
2710 + * @b: pointer to second buffer.
2711 + * @len: size of buffers.
2712 + *
2713 + * The sign or magnitude of a non-zero return value has no particular
2714 + * meaning, and architectures may implement their own more efficient bcmp(). So
2715 + * while this particular implementation is a simple (tail) call to memcmp, do
2716 + * not rely on anything but whether the return value is zero or non-zero.
2717 + */
2718 +#undef bcmp
2719 +int bcmp(const void *a, const void *b, size_t len)
2720 +{
2721 + return memcmp(a, b, len);
2722 +}
2723 +EXPORT_SYMBOL(bcmp);
2724 +#endif
2725 +
2726 #ifndef __HAVE_ARCH_MEMSCAN
2727 /**
2728 * memscan - Find a character in an area of memory.
2729 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2730 index a8a9938aeceb..20ae57fbe009 100644
2731 --- a/net/core/ethtool.c
2732 +++ b/net/core/ethtool.c
2733 @@ -1801,17 +1801,22 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
2734
2735 gstrings.len = ret;
2736
2737 - data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
2738 - if (!data)
2739 - return -ENOMEM;
2740 + if (gstrings.len) {
2741 + data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
2742 + if (!data)
2743 + return -ENOMEM;
2744
2745 - __ethtool_get_strings(dev, gstrings.string_set, data);
2746 + __ethtool_get_strings(dev, gstrings.string_set, data);
2747 + } else {
2748 + data = NULL;
2749 + }
2750
2751 ret = -EFAULT;
2752 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
2753 goto out;
2754 useraddr += sizeof(gstrings);
2755 - if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
2756 + if (gstrings.len &&
2757 + copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
2758 goto out;
2759 ret = 0;
2760
2761 @@ -1899,17 +1904,21 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
2762 return -EFAULT;
2763
2764 stats.n_stats = n_stats;
2765 - data = kmalloc(n_stats * sizeof(u64), GFP_USER);
2766 - if (!data)
2767 - return -ENOMEM;
2768 + if (n_stats) {
2769 + data = kmalloc(n_stats * sizeof(u64), GFP_USER);
2770 + if (!data)
2771 + return -ENOMEM;
2772
2773 - ops->get_ethtool_stats(dev, &stats, data);
2774 + ops->get_ethtool_stats(dev, &stats, data);
2775 + } else {
2776 + data = NULL;
2777 + }
2778
2779 ret = -EFAULT;
2780 if (copy_to_user(useraddr, &stats, sizeof(stats)))
2781 goto out;
2782 useraddr += sizeof(stats);
2783 - if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
2784 + if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
2785 goto out;
2786 ret = 0;
2787
2788 @@ -1938,19 +1947,23 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
2789 return -EFAULT;
2790
2791 stats.n_stats = n_stats;
2792 - data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
2793 - if (!data)
2794 - return -ENOMEM;
2795 + if (n_stats) {
2796 + data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
2797 + if (!data)
2798 + return -ENOMEM;
2799
2800 - mutex_lock(&phydev->lock);
2801 - phydev->drv->get_stats(phydev, &stats, data);
2802 - mutex_unlock(&phydev->lock);
2803 + mutex_lock(&phydev->lock);
2804 + phydev->drv->get_stats(phydev, &stats, data);
2805 + mutex_unlock(&phydev->lock);
2806 + } else {
2807 + data = NULL;
2808 + }
2809
2810 ret = -EFAULT;
2811 if (copy_to_user(useraddr, &stats, sizeof(stats)))
2812 goto out;
2813 useraddr += sizeof(stats);
2814 - if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
2815 + if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
2816 goto out;
2817 ret = 0;
2818
2819 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2820 index 04fd04ccaa04..4509dec7bd1c 100644
2821 --- a/net/core/net_namespace.c
2822 +++ b/net/core/net_namespace.c
2823 @@ -282,6 +282,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
2824
2825 atomic_set(&net->count, 1);
2826 atomic_set(&net->passive, 1);
2827 + get_random_bytes(&net->hash_mix, sizeof(u32));
2828 net->dev_base_seq = 1;
2829 net->user_ns = user_ns;
2830 idr_init(&net->netns_ids);
2831 diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
2832 index bcadca26523b..ce0ce1401f28 100644
2833 --- a/net/ipv4/ip_input.c
2834 +++ b/net/ipv4/ip_input.c
2835 @@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb)
2836 ip_local_deliver_finish);
2837 }
2838
2839 -static inline bool ip_rcv_options(struct sk_buff *skb)
2840 +static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
2841 {
2842 struct ip_options *opt;
2843 const struct iphdr *iph;
2844 - struct net_device *dev = skb->dev;
2845
2846 /* It looks as overkill, because not all
2847 IP options require packet mangling.
2848 @@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
2849 }
2850 }
2851
2852 - if (ip_options_rcv_srr(skb))
2853 + if (ip_options_rcv_srr(skb, dev))
2854 goto drop;
2855 }
2856
2857 @@ -361,7 +360,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2858 }
2859 #endif
2860
2861 - if (iph->ihl > 5 && ip_rcv_options(skb))
2862 + if (iph->ihl > 5 && ip_rcv_options(skb, dev))
2863 goto drop;
2864
2865 rt = skb_rtable(skb);
2866 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
2867 index 4cd3b5ad9cee..570cdb547234 100644
2868 --- a/net/ipv4/ip_options.c
2869 +++ b/net/ipv4/ip_options.c
2870 @@ -614,7 +614,7 @@ void ip_forward_options(struct sk_buff *skb)
2871 }
2872 }
2873
2874 -int ip_options_rcv_srr(struct sk_buff *skb)
2875 +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
2876 {
2877 struct ip_options *opt = &(IPCB(skb)->opt);
2878 int srrspace, srrptr;
2879 @@ -649,7 +649,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
2880
2881 orefdst = skb->_skb_refdst;
2882 skb_dst_set(skb, NULL);
2883 - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
2884 + err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
2885 rt2 = skb_rtable(skb);
2886 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
2887 skb_dst_drop(skb);
2888 diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
2889 index a08cedf9d286..910ef01759e7 100644
2890 --- a/net/ipv4/tcp_dctcp.c
2891 +++ b/net/ipv4/tcp_dctcp.c
2892 @@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
2893 module_param(dctcp_alpha_on_init, uint, 0644);
2894 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
2895
2896 -static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
2897 -module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
2898 -MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
2899 - "parameter for clamping alpha on loss");
2900 -
2901 static struct tcp_congestion_ops dctcp_reno;
2902
2903 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
2904 @@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
2905 }
2906 }
2907
2908 -static void dctcp_state(struct sock *sk, u8 new_state)
2909 +static void dctcp_react_to_loss(struct sock *sk)
2910 {
2911 - if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
2912 - struct dctcp *ca = inet_csk_ca(sk);
2913 + struct dctcp *ca = inet_csk_ca(sk);
2914 + struct tcp_sock *tp = tcp_sk(sk);
2915
2916 - /* If this extension is enabled, we clamp dctcp_alpha to
2917 - * max on packet loss; the motivation is that dctcp_alpha
2918 - * is an indicator to the extend of congestion and packet
2919 - * loss is an indicator of extreme congestion; setting
2920 - * this in practice turned out to be beneficial, and
2921 - * effectively assumes total congestion which reduces the
2922 - * window by half.
2923 - */
2924 - ca->dctcp_alpha = DCTCP_MAX_ALPHA;
2925 - }
2926 + ca->loss_cwnd = tp->snd_cwnd;
2927 + tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
2928 +}
2929 +
2930 +static void dctcp_state(struct sock *sk, u8 new_state)
2931 +{
2932 + if (new_state == TCP_CA_Recovery &&
2933 + new_state != inet_csk(sk)->icsk_ca_state)
2934 + dctcp_react_to_loss(sk);
2935 + /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
2936 + * one loss-adjustment per RTT.
2937 + */
2938 }
2939
2940 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2941 @@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
2942 case CA_EVENT_ECN_NO_CE:
2943 dctcp_ce_state_1_to_0(sk);
2944 break;
2945 + case CA_EVENT_LOSS:
2946 + dctcp_react_to_loss(sk);
2947 + break;
2948 default:
2949 /* Don't care for the rest. */
2950 break;
2951 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2952 index b723987761be..11407dd6bc7c 100644
2953 --- a/net/ipv6/ip6_output.c
2954 +++ b/net/ipv6/ip6_output.c
2955 @@ -592,7 +592,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2956 inet6_sk(skb->sk) : NULL;
2957 struct ipv6hdr *tmp_hdr;
2958 struct frag_hdr *fh;
2959 - unsigned int mtu, hlen, left, len;
2960 + unsigned int mtu, hlen, left, len, nexthdr_offset;
2961 int hroom, troom;
2962 __be32 frag_id;
2963 int ptr, offset = 0, err = 0;
2964 @@ -603,6 +603,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2965 goto fail;
2966 hlen = err;
2967 nexthdr = *prevhdr;
2968 + nexthdr_offset = prevhdr - skb_network_header(skb);
2969
2970 mtu = ip6_skb_dst_mtu(skb);
2971
2972 @@ -637,6 +638,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2973 (err = skb_checksum_help(skb)))
2974 goto fail;
2975
2976 + prevhdr = skb_network_header(skb) + nexthdr_offset;
2977 hroom = LL_RESERVED_SPACE(rt->dst.dev);
2978 if (skb_has_frag_list(skb)) {
2979 int first_len = skb_pagelen(skb);
2980 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2981 index f89516d04150..42f363661d25 100644
2982 --- a/net/ipv6/ip6_tunnel.c
2983 +++ b/net/ipv6/ip6_tunnel.c
2984 @@ -634,7 +634,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2985 IPPROTO_IPIP,
2986 RT_TOS(eiph->tos), 0);
2987 if (IS_ERR(rt) ||
2988 - rt->dst.dev->type != ARPHRD_TUNNEL) {
2989 + rt->dst.dev->type != ARPHRD_TUNNEL6) {
2990 if (!IS_ERR(rt))
2991 ip_rt_put(rt);
2992 goto out;
2993 @@ -644,7 +644,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2994 ip_rt_put(rt);
2995 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
2996 skb2->dev) ||
2997 - skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
2998 + skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
2999 goto out;
3000 }
3001
3002 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3003 index c9c6a5e829ab..be74eee0e8ff 100644
3004 --- a/net/ipv6/sit.c
3005 +++ b/net/ipv6/sit.c
3006 @@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb)
3007 !net_eq(tunnel->net, dev_net(tunnel->dev))))
3008 goto out;
3009
3010 + /* skb can be uncloned in iptunnel_pull_header, so
3011 + * old iph is no longer valid
3012 + */
3013 + iph = (const struct iphdr *)skb_mac_header(skb);
3014 err = IP_ECN_decapsulate(iph, skb);
3015 if (unlikely(err)) {
3016 if (log_ecn_error)
3017 diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
3018 index 553d0ad4a2fa..2f3cd09ee0df 100644
3019 --- a/net/kcm/kcmsock.c
3020 +++ b/net/kcm/kcmsock.c
3021 @@ -2058,14 +2058,14 @@ static int __init kcm_init(void)
3022 if (err)
3023 goto fail;
3024
3025 - err = sock_register(&kcm_family_ops);
3026 - if (err)
3027 - goto sock_register_fail;
3028 -
3029 err = register_pernet_device(&kcm_net_ops);
3030 if (err)
3031 goto net_ops_fail;
3032
3033 + err = sock_register(&kcm_family_ops);
3034 + if (err)
3035 + goto sock_register_fail;
3036 +
3037 err = kcm_proc_init();
3038 if (err)
3039 goto proc_init_fail;
3040 @@ -2073,12 +2073,12 @@ static int __init kcm_init(void)
3041 return 0;
3042
3043 proc_init_fail:
3044 - unregister_pernet_device(&kcm_net_ops);
3045 -
3046 -net_ops_fail:
3047 sock_unregister(PF_KCM);
3048
3049 sock_register_fail:
3050 + unregister_pernet_device(&kcm_net_ops);
3051 +
3052 +net_ops_fail:
3053 proto_unregister(&kcm_proto);
3054
3055 fail:
3056 @@ -2094,8 +2094,8 @@ fail:
3057 static void __exit kcm_exit(void)
3058 {
3059 kcm_proc_exit();
3060 - unregister_pernet_device(&kcm_net_ops);
3061 sock_unregister(PF_KCM);
3062 + unregister_pernet_device(&kcm_net_ops);
3063 proto_unregister(&kcm_proto);
3064 destroy_workqueue(kcm_wq);
3065
3066 diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
3067 index 3bd4d5d0c346..50ea76180afa 100644
3068 --- a/net/openvswitch/flow_netlink.c
3069 +++ b/net/openvswitch/flow_netlink.c
3070 @@ -1853,14 +1853,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
3071
3072 struct sw_flow_actions *acts;
3073 int new_acts_size;
3074 - int req_size = NLA_ALIGN(attr_len);
3075 + size_t req_size = NLA_ALIGN(attr_len);
3076 int next_offset = offsetof(struct sw_flow_actions, actions) +
3077 (*sfa)->actions_len;
3078
3079 if (req_size <= (ksize(*sfa) - next_offset))
3080 goto out;
3081
3082 - new_acts_size = ksize(*sfa) * 2;
3083 + new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
3084
3085 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
3086 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
3087 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3088 index d36effbf7614..2daba5316caa 100644
3089 --- a/net/rds/tcp.c
3090 +++ b/net/rds/tcp.c
3091 @@ -527,7 +527,7 @@ static void rds_tcp_kill_sock(struct net *net)
3092 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3093 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3094
3095 - if (net != c_net || !tc->t_sock)
3096 + if (net != c_net)
3097 continue;
3098 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
3099 list_move_tail(&tc->t_tcp_node, &tmp_list);
3100 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3101 index 8ea8217db960..d6af93a24aa0 100644
3102 --- a/net/sctp/protocol.c
3103 +++ b/net/sctp/protocol.c
3104 @@ -600,6 +600,7 @@ out:
3105 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
3106 {
3107 /* No address mapping for V4 sockets */
3108 + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
3109 return sizeof(struct sockaddr_in);
3110 }
3111
3112 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3113 index 965473d4129c..09491b27092e 100644
3114 --- a/sound/core/seq/seq_clientmgr.c
3115 +++ b/sound/core/seq/seq_clientmgr.c
3116 @@ -1249,7 +1249,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
3117
3118 /* fill the info fields */
3119 if (client_info->name[0])
3120 - strlcpy(client->name, client_info->name, sizeof(client->name));
3121 + strscpy(client->name, client_info->name, sizeof(client->name));
3122
3123 client->filter = client_info->filter;
3124 client->event_lost = client_info->event_lost;
3125 @@ -1527,7 +1527,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
3126 /* set queue name */
3127 if (!info->name[0])
3128 snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
3129 - strlcpy(q->name, info->name, sizeof(q->name));
3130 + strscpy(q->name, info->name, sizeof(q->name));
3131 snd_use_lock_free(&q->use_lock);
3132
3133 return 0;
3134 @@ -1589,7 +1589,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
3135 queuefree(q);
3136 return -EPERM;
3137 }
3138 - strlcpy(q->name, info->name, sizeof(q->name));
3139 + strscpy(q->name, info->name, sizeof(q->name));
3140 queuefree(q);
3141
3142 return 0;
3143 diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
3144 index 23ab0d169c11..fa64cc2b1729 100644
3145 --- a/sound/soc/fsl/fsl_esai.c
3146 +++ b/sound/soc/fsl/fsl_esai.c
3147 @@ -59,6 +59,8 @@ struct fsl_esai {
3148 u32 fifo_depth;
3149 u32 slot_width;
3150 u32 slots;
3151 + u32 tx_mask;
3152 + u32 rx_mask;
3153 u32 hck_rate[2];
3154 u32 sck_rate[2];
3155 bool hck_dir[2];
3156 @@ -359,21 +361,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
3157 regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
3158 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3159
3160 - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
3161 - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
3162 - regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
3163 - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
3164 -
3165 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
3166 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
3167
3168 - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
3169 - ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
3170 - regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
3171 - ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
3172 -
3173 esai_priv->slot_width = slot_width;
3174 esai_priv->slots = slots;
3175 + esai_priv->tx_mask = tx_mask;
3176 + esai_priv->rx_mask = rx_mask;
3177
3178 return 0;
3179 }
3180 @@ -594,6 +588,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3181 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
3182 u8 i, channels = substream->runtime->channels;
3183 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
3184 + u32 mask;
3185
3186 switch (cmd) {
3187 case SNDRV_PCM_TRIGGER_START:
3188 @@ -606,15 +601,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
3189 for (i = 0; tx && i < channels; i++)
3190 regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
3191
3192 + /*
3193 + * When set the TE/RE in the end of enablement flow, there
3194 + * will be channel swap issue for multi data line case.
3195 + * In order to workaround this issue, we switch the bit
3196 + * enablement sequence to below sequence
3197 + * 1) clear the xSMB & xSMA: which is done in probe and
3198 + * stop state.
3199 + * 2) set TE/RE
3200 + * 3) set xSMB
3201 + * 4) set xSMA: xSMA is the last one in this flow, which
3202 + * will trigger esai to start.
3203 + */
3204 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3205 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
3206 tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
3207 + mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
3208 +
3209 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3210 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
3211 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3212 + ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
3213 +
3214 break;
3215 case SNDRV_PCM_TRIGGER_SUSPEND:
3216 case SNDRV_PCM_TRIGGER_STOP:
3217 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3218 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
3219 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
3220 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
3221 + ESAI_xSMA_xS_MASK, 0);
3222 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
3223 + ESAI_xSMB_xS_MASK, 0);
3224
3225 /* Disable and reset FIFO */
3226 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
3227 @@ -904,6 +922,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
3228 return ret;
3229 }
3230
3231 + esai_priv->tx_mask = 0xFFFFFFFF;
3232 + esai_priv->rx_mask = 0xFFFFFFFF;
3233 +
3234 + /* Clear the TSMA, TSMB, RSMA, RSMB */
3235 + regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
3236 + regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
3237 + regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
3238 + regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
3239 +
3240 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
3241 &fsl_esai_dai, 1);
3242 if (ret) {