Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0176-4.14.77-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 156631 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
2 index 27966ae741e0..141d8c1f714f 100644
3 --- a/Documentation/devicetree/bindings/net/macb.txt
4 +++ b/Documentation/devicetree/bindings/net/macb.txt
5 @@ -10,6 +10,7 @@ Required properties:
6 Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
7 the Cadence GEM, or the generic form: "cdns,gem".
8 Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
9 + Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
10 Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
11 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
12 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
13 diff --git a/Makefile b/Makefile
14 index 332dd011b3b9..16d1a18496fb 100644
15 --- a/Makefile
16 +++ b/Makefile
17 @@ -1,7 +1,7 @@
18 # SPDX-License-Identifier: GPL-2.0
19 VERSION = 4
20 PATCHLEVEL = 14
21 -SUBLEVEL = 76
22 +SUBLEVEL = 77
23 EXTRAVERSION =
24 NAME = Petit Gorille
25
26 diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
27 index 7cb235ef0fb6..6e9e1c2f9def 100644
28 --- a/arch/arm/boot/dts/sama5d3_emac.dtsi
29 +++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
30 @@ -41,7 +41,7 @@
31 };
32
33 macb1: ethernet@f802c000 {
34 - compatible = "cdns,at91sam9260-macb", "cdns,macb";
35 + compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
36 reg = <0xf802c000 0x100>;
37 interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
38 pinctrl-names = "default";
39 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
40 index 9342904cccca..b17ee03d280b 100644
41 --- a/arch/arm/include/asm/assembler.h
42 +++ b/arch/arm/include/asm/assembler.h
43 @@ -447,11 +447,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
44 .size \name , . - \name
45 .endm
46
47 + .macro csdb
48 +#ifdef CONFIG_THUMB2_KERNEL
49 + .inst.w 0xf3af8014
50 +#else
51 + .inst 0xe320f014
52 +#endif
53 + .endm
54 +
55 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
56 #ifndef CONFIG_CPU_USE_DOMAINS
57 adds \tmp, \addr, #\size - 1
58 sbcccs \tmp, \tmp, \limit
59 bcs \bad
60 +#ifdef CONFIG_CPU_SPECTRE
61 + movcs \addr, #0
62 + csdb
63 +#endif
64 #endif
65 .endm
66
67 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
68 index 40f5c410fd8c..69772e742a0a 100644
69 --- a/arch/arm/include/asm/barrier.h
70 +++ b/arch/arm/include/asm/barrier.h
71 @@ -17,6 +17,12 @@
72 #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
73 #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
74 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
75 +#ifdef CONFIG_THUMB2_KERNEL
76 +#define CSDB ".inst.w 0xf3af8014"
77 +#else
78 +#define CSDB ".inst 0xe320f014"
79 +#endif
80 +#define csdb() __asm__ __volatile__(CSDB : : : "memory")
81 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
82 #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
83 : : "r" (0) : "memory")
84 @@ -37,6 +43,13 @@
85 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
86 #endif
87
88 +#ifndef CSDB
89 +#define CSDB
90 +#endif
91 +#ifndef csdb
92 +#define csdb()
93 +#endif
94 +
95 #ifdef CONFIG_ARM_HEAVY_MB
96 extern void (*soc_mb)(void);
97 extern void arm_heavy_mb(void);
98 @@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
99 #define __smp_rmb() __smp_mb()
100 #define __smp_wmb() dmb(ishst)
101
102 +#ifdef CONFIG_CPU_SPECTRE
103 +static inline unsigned long array_index_mask_nospec(unsigned long idx,
104 + unsigned long sz)
105 +{
106 + unsigned long mask;
107 +
108 + asm volatile(
109 + "cmp %1, %2\n"
110 + " sbc %0, %1, %1\n"
111 + CSDB
112 + : "=r" (mask)
113 + : "r" (idx), "Ir" (sz)
114 + : "cc");
115 +
116 + return mask;
117 +}
118 +#define array_index_mask_nospec array_index_mask_nospec
119 +#endif
120 +
121 #include <asm-generic/barrier.h>
122
123 #endif /* !__ASSEMBLY__ */
124 diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
125 index a97f1ea708d1..73a99c72a930 100644
126 --- a/arch/arm/include/asm/bugs.h
127 +++ b/arch/arm/include/asm/bugs.h
128 @@ -10,12 +10,14 @@
129 #ifndef __ASM_BUGS_H
130 #define __ASM_BUGS_H
131
132 -#ifdef CONFIG_MMU
133 extern void check_writebuffer_bugs(void);
134
135 -#define check_bugs() check_writebuffer_bugs()
136 +#ifdef CONFIG_MMU
137 +extern void check_bugs(void);
138 +extern void check_other_bugs(void);
139 #else
140 #define check_bugs() do { } while (0)
141 +#define check_other_bugs() do { } while (0)
142 #endif
143
144 #endif
145 diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
146 index 4c9fa72b59f5..07e27f212dc7 100644
147 --- a/arch/arm/include/asm/cp15.h
148 +++ b/arch/arm/include/asm/cp15.h
149 @@ -65,6 +65,9 @@
150 #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
151 #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
152
153 +#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
154 +#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
155 +
156 extern unsigned long cr_alignment; /* defined in entry-armv.S */
157
158 static inline unsigned long get_cr(void)
159 diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
160 index 441933311bbf..3379c2c684c2 100644
161 --- a/arch/arm/include/asm/cputype.h
162 +++ b/arch/arm/include/asm/cputype.h
163 @@ -77,8 +77,16 @@
164 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
165 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
166 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
167 +#define ARM_CPU_PART_CORTEX_A53 0x4100d030
168 +#define ARM_CPU_PART_CORTEX_A57 0x4100d070
169 +#define ARM_CPU_PART_CORTEX_A72 0x4100d080
170 +#define ARM_CPU_PART_CORTEX_A73 0x4100d090
171 +#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
172 #define ARM_CPU_PART_MASK 0xff00fff0
173
174 +/* Broadcom cores */
175 +#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
176 +
177 /* DEC implemented cores */
178 #define ARM_CPU_PART_SA1100 0x4400a110
179
180 diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
181 index 14d68a4d826f..b598e666da4c 100644
182 --- a/arch/arm/include/asm/kvm_asm.h
183 +++ b/arch/arm/include/asm/kvm_asm.h
184 @@ -61,8 +61,6 @@ struct kvm_vcpu;
185 extern char __kvm_hyp_init[];
186 extern char __kvm_hyp_init_end[];
187
188 -extern char __kvm_hyp_vector[];
189 -
190 extern void __kvm_flush_vm_context(void);
191 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
192 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
193 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
194 index 65572e14306c..b60232639984 100644
195 --- a/arch/arm/include/asm/kvm_host.h
196 +++ b/arch/arm/include/asm/kvm_host.h
197 @@ -21,6 +21,7 @@
198
199 #include <linux/types.h>
200 #include <linux/kvm_types.h>
201 +#include <asm/cputype.h>
202 #include <asm/kvm.h>
203 #include <asm/kvm_asm.h>
204 #include <asm/kvm_mmio.h>
205 @@ -298,8 +299,17 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
206
207 static inline bool kvm_arm_harden_branch_predictor(void)
208 {
209 - /* No way to detect it yet, pretend it is not there. */
210 - return false;
211 + switch(read_cpuid_part()) {
212 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
213 + case ARM_CPU_PART_BRAHMA_B15:
214 + case ARM_CPU_PART_CORTEX_A12:
215 + case ARM_CPU_PART_CORTEX_A15:
216 + case ARM_CPU_PART_CORTEX_A17:
217 + return true;
218 +#endif
219 + default:
220 + return false;
221 + }
222 }
223
224 #define KVM_SSBD_UNKNOWN -1
225 diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
226 index 8a098e65f5f8..ca62f95f3b4c 100644
227 --- a/arch/arm/include/asm/kvm_mmu.h
228 +++ b/arch/arm/include/asm/kvm_mmu.h
229 @@ -246,7 +246,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
230
231 static inline void *kvm_get_hyp_vector(void)
232 {
233 - return kvm_ksym_ref(__kvm_hyp_vector);
234 + switch(read_cpuid_part()) {
235 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
236 + case ARM_CPU_PART_CORTEX_A12:
237 + case ARM_CPU_PART_CORTEX_A17:
238 + {
239 + extern char __kvm_hyp_vector_bp_inv[];
240 + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
241 + }
242 +
243 + case ARM_CPU_PART_BRAHMA_B15:
244 + case ARM_CPU_PART_CORTEX_A15:
245 + {
246 + extern char __kvm_hyp_vector_ic_inv[];
247 + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
248 + }
249 +#endif
250 + default:
251 + {
252 + extern char __kvm_hyp_vector[];
253 + return kvm_ksym_ref(__kvm_hyp_vector);
254 + }
255 + }
256 }
257
258 static inline int kvm_map_vectors(void)
259 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
260 index f2e1af45bd6f..e25f4392e1b2 100644
261 --- a/arch/arm/include/asm/proc-fns.h
262 +++ b/arch/arm/include/asm/proc-fns.h
263 @@ -36,6 +36,10 @@ extern struct processor {
264 * Set up any processor specifics
265 */
266 void (*_proc_init)(void);
267 + /*
268 + * Check for processor bugs
269 + */
270 + void (*check_bugs)(void);
271 /*
272 * Disable any processor specifics
273 */
274 diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
275 index 78f6db114faf..8e76db83c498 100644
276 --- a/arch/arm/include/asm/system_misc.h
277 +++ b/arch/arm/include/asm/system_misc.h
278 @@ -8,6 +8,7 @@
279 #include <linux/linkage.h>
280 #include <linux/irqflags.h>
281 #include <linux/reboot.h>
282 +#include <linux/percpu.h>
283
284 extern void cpu_init(void);
285
286 @@ -15,6 +16,20 @@ void soft_restart(unsigned long);
287 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
288 extern void (*arm_pm_idle)(void);
289
290 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
291 +typedef void (*harden_branch_predictor_fn_t)(void);
292 +DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
293 +static inline void harden_branch_predictor(void)
294 +{
295 + harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
296 + smp_processor_id());
297 + if (fn)
298 + fn();
299 +}
300 +#else
301 +#define harden_branch_predictor() do { } while (0)
302 +#endif
303 +
304 #define UDBG_UNDEFINED (1 << 0)
305 #define UDBG_SYSCALL (1 << 1)
306 #define UDBG_BADABORT (1 << 2)
307 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
308 index 776757d1604a..57d2ad9c75ca 100644
309 --- a/arch/arm/include/asm/thread_info.h
310 +++ b/arch/arm/include/asm/thread_info.h
311 @@ -126,8 +126,8 @@ struct user_vfp_exc;
312
313 extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
314 struct user_vfp_exc __user *);
315 -extern int vfp_restore_user_hwstate(struct user_vfp __user *,
316 - struct user_vfp_exc __user *);
317 +extern int vfp_restore_user_hwstate(struct user_vfp *,
318 + struct user_vfp_exc *);
319 #endif
320
321 /*
322 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
323 index 0bf2347495f1..4140be431087 100644
324 --- a/arch/arm/include/asm/uaccess.h
325 +++ b/arch/arm/include/asm/uaccess.h
326 @@ -84,6 +84,13 @@ static inline void set_fs(mm_segment_t fs)
327 : "cc"); \
328 flag; })
329
330 +/*
331 + * This is a type: either unsigned long, if the argument fits into
332 + * that type, or otherwise unsigned long long.
333 + */
334 +#define __inttype(x) \
335 + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
336 +
337 /*
338 * Single-value transfer routines. They automatically use the right
339 * size if we just have the right pointer type. Note that the functions
340 @@ -153,7 +160,7 @@ extern int __get_user_64t_4(void *);
341 ({ \
342 unsigned long __limit = current_thread_info()->addr_limit - 1; \
343 register const typeof(*(p)) __user *__p asm("r0") = (p);\
344 - register typeof(x) __r2 asm("r2"); \
345 + register __inttype(x) __r2 asm("r2"); \
346 register unsigned long __l asm("r1") = __limit; \
347 register int __e asm("r0"); \
348 unsigned int __ua_flags = uaccess_save_and_enable(); \
349 @@ -243,6 +250,16 @@ static inline void set_fs(mm_segment_t fs)
350 #define user_addr_max() \
351 (uaccess_kernel() ? ~0UL : get_fs())
352
353 +#ifdef CONFIG_CPU_SPECTRE
354 +/*
355 + * When mitigating Spectre variant 1, it is not worth fixing the non-
356 + * verifying accessors, because we need to add verification of the
357 + * address space there. Force these to use the standard get_user()
358 + * version instead.
359 + */
360 +#define __get_user(x, ptr) get_user(x, ptr)
361 +#else
362 +
363 /*
364 * The "__xxx" versions of the user access functions do not verify the
365 * address space - it must have been done previously with a separate
366 @@ -259,12 +276,6 @@ static inline void set_fs(mm_segment_t fs)
367 __gu_err; \
368 })
369
370 -#define __get_user_error(x, ptr, err) \
371 -({ \
372 - __get_user_err((x), (ptr), err); \
373 - (void) 0; \
374 -})
375 -
376 #define __get_user_err(x, ptr, err) \
377 do { \
378 unsigned long __gu_addr = (unsigned long)(ptr); \
379 @@ -324,6 +335,7 @@ do { \
380
381 #define __get_user_asm_word(x, addr, err) \
382 __get_user_asm(x, addr, err, ldr)
383 +#endif
384
385
386 #define __put_user_switch(x, ptr, __err, __fn) \
387 diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
388 index 499f978fb1fd..50de918252b7 100644
389 --- a/arch/arm/kernel/Makefile
390 +++ b/arch/arm/kernel/Makefile
391 @@ -31,6 +31,7 @@ else
392 obj-y += entry-armv.o
393 endif
394
395 +obj-$(CONFIG_MMU) += bugs.o
396 obj-$(CONFIG_CPU_IDLE) += cpuidle.o
397 obj-$(CONFIG_ISA_DMA_API) += dma.o
398 obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
399 diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
400 new file mode 100644
401 index 000000000000..7be511310191
402 --- /dev/null
403 +++ b/arch/arm/kernel/bugs.c
404 @@ -0,0 +1,18 @@
405 +// SPDX-Identifier: GPL-2.0
406 +#include <linux/init.h>
407 +#include <asm/bugs.h>
408 +#include <asm/proc-fns.h>
409 +
410 +void check_other_bugs(void)
411 +{
412 +#ifdef MULTI_CPU
413 + if (processor.check_bugs)
414 + processor.check_bugs();
415 +#endif
416 +}
417 +
418 +void __init check_bugs(void)
419 +{
420 + check_writebuffer_bugs();
421 + check_other_bugs();
422 +}
423 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
424 index 99c908226065..54c10503d71f 100644
425 --- a/arch/arm/kernel/entry-common.S
426 +++ b/arch/arm/kernel/entry-common.S
427 @@ -241,9 +241,7 @@ local_restart:
428 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
429 bne __sys_trace
430
431 - cmp scno, #NR_syscalls @ check upper syscall limit
432 - badr lr, ret_fast_syscall @ return address
433 - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
434 + invoke_syscall tbl, scno, r10, ret_fast_syscall
435
436 add r1, sp, #S_OFF
437 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
438 @@ -277,14 +275,8 @@ __sys_trace:
439 mov r1, scno
440 add r0, sp, #S_OFF
441 bl syscall_trace_enter
442 -
443 - badr lr, __sys_trace_return @ return address
444 - mov scno, r0 @ syscall number (possibly new)
445 - add r1, sp, #S_R0 + S_OFF @ pointer to regs
446 - cmp scno, #NR_syscalls @ check upper syscall limit
447 - ldmccia r1, {r0 - r6} @ have to reload r0 - r6
448 - stmccia sp, {r4, r5} @ and update the stack args
449 - ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
450 + mov scno, r0
451 + invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
452 cmp scno, #-1 @ skip the syscall?
453 bne 2b
454 add sp, sp, #S_OFF @ restore stack
455 @@ -362,6 +354,10 @@ sys_syscall:
456 bic scno, r0, #__NR_OABI_SYSCALL_BASE
457 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
458 cmpne scno, #NR_syscalls @ check range
459 +#ifdef CONFIG_CPU_SPECTRE
460 + movhs scno, #0
461 + csdb
462 +#endif
463 stmloia sp, {r5, r6} @ shuffle args
464 movlo r0, r1
465 movlo r1, r2
466 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
467 index 0f07579af472..773424843d6e 100644
468 --- a/arch/arm/kernel/entry-header.S
469 +++ b/arch/arm/kernel/entry-header.S
470 @@ -378,6 +378,31 @@
471 #endif
472 .endm
473
474 + .macro invoke_syscall, table, nr, tmp, ret, reload=0
475 +#ifdef CONFIG_CPU_SPECTRE
476 + mov \tmp, \nr
477 + cmp \tmp, #NR_syscalls @ check upper syscall limit
478 + movcs \tmp, #0
479 + csdb
480 + badr lr, \ret @ return address
481 + .if \reload
482 + add r1, sp, #S_R0 + S_OFF @ pointer to regs
483 + ldmccia r1, {r0 - r6} @ reload r0-r6
484 + stmccia sp, {r4, r5} @ update stack arguments
485 + .endif
486 + ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
487 +#else
488 + cmp \nr, #NR_syscalls @ check upper syscall limit
489 + badr lr, \ret @ return address
490 + .if \reload
491 + add r1, sp, #S_R0 + S_OFF @ pointer to regs
492 + ldmccia r1, {r0 - r6} @ reload r0-r6
493 + stmccia sp, {r4, r5} @ update stack arguments
494 + .endif
495 + ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
496 +#endif
497 + .endm
498 +
499 /*
500 * These are the registers used in the syscall handler, and allow us to
501 * have in theory up to 7 arguments to a function - r0 to r6.
502 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
503 index b67ae12503f3..cdfe52b15a0a 100644
504 --- a/arch/arm/kernel/signal.c
505 +++ b/arch/arm/kernel/signal.c
506 @@ -149,22 +149,18 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
507
508 static int restore_vfp_context(char __user **auxp)
509 {
510 - struct vfp_sigframe __user *frame =
511 - (struct vfp_sigframe __user *)*auxp;
512 - unsigned long magic;
513 - unsigned long size;
514 - int err = 0;
515 -
516 - __get_user_error(magic, &frame->magic, err);
517 - __get_user_error(size, &frame->size, err);
518 + struct vfp_sigframe frame;
519 + int err;
520
521 + err = __copy_from_user(&frame, *auxp, sizeof(frame));
522 if (err)
523 - return -EFAULT;
524 - if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
525 + return err;
526 +
527 + if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
528 return -EINVAL;
529
530 - *auxp += size;
531 - return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
532 + *auxp += sizeof(frame);
533 + return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
534 }
535
536 #endif
537 @@ -184,6 +180,7 @@ struct rt_sigframe {
538
539 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
540 {
541 + struct sigcontext context;
542 char __user *aux;
543 sigset_t set;
544 int err;
545 @@ -192,23 +189,26 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
546 if (err == 0)
547 set_current_blocked(&set);
548
549 - __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
550 - __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
551 - __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
552 - __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
553 - __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
554 - __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
555 - __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
556 - __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
557 - __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
558 - __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
559 - __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
560 - __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
561 - __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
562 - __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
563 - __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
564 - __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
565 - __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
566 + err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
567 + if (err == 0) {
568 + regs->ARM_r0 = context.arm_r0;
569 + regs->ARM_r1 = context.arm_r1;
570 + regs->ARM_r2 = context.arm_r2;
571 + regs->ARM_r3 = context.arm_r3;
572 + regs->ARM_r4 = context.arm_r4;
573 + regs->ARM_r5 = context.arm_r5;
574 + regs->ARM_r6 = context.arm_r6;
575 + regs->ARM_r7 = context.arm_r7;
576 + regs->ARM_r8 = context.arm_r8;
577 + regs->ARM_r9 = context.arm_r9;
578 + regs->ARM_r10 = context.arm_r10;
579 + regs->ARM_fp = context.arm_fp;
580 + regs->ARM_ip = context.arm_ip;
581 + regs->ARM_sp = context.arm_sp;
582 + regs->ARM_lr = context.arm_lr;
583 + regs->ARM_pc = context.arm_pc;
584 + regs->ARM_cpsr = context.arm_cpsr;
585 + }
586
587 err |= !valid_user_regs(regs);
588
589 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
590 index c9a0a5299827..e61af0600133 100644
591 --- a/arch/arm/kernel/smp.c
592 +++ b/arch/arm/kernel/smp.c
593 @@ -31,6 +31,7 @@
594 #include <linux/irq_work.h>
595
596 #include <linux/atomic.h>
597 +#include <asm/bugs.h>
598 #include <asm/smp.h>
599 #include <asm/cacheflush.h>
600 #include <asm/cpu.h>
601 @@ -402,6 +403,9 @@ asmlinkage void secondary_start_kernel(void)
602 * before we continue - which happens after __cpu_up returns.
603 */
604 set_cpu_online(cpu, true);
605 +
606 + check_other_bugs();
607 +
608 complete(&cpu_running);
609
610 local_irq_enable();
611 diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
612 index a40ebb7c0896..d08099269e35 100644
613 --- a/arch/arm/kernel/suspend.c
614 +++ b/arch/arm/kernel/suspend.c
615 @@ -3,6 +3,7 @@
616 #include <linux/slab.h>
617 #include <linux/mm_types.h>
618
619 +#include <asm/bugs.h>
620 #include <asm/cacheflush.h>
621 #include <asm/idmap.h>
622 #include <asm/pgalloc.h>
623 @@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
624 cpu_switch_mm(mm->pgd, mm);
625 local_flush_bp_all();
626 local_flush_tlb_all();
627 + check_other_bugs();
628 }
629
630 return ret;
631 diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
632 index b9786f491873..4abe4909417f 100644
633 --- a/arch/arm/kernel/sys_oabi-compat.c
634 +++ b/arch/arm/kernel/sys_oabi-compat.c
635 @@ -329,9 +329,11 @@ asmlinkage long sys_oabi_semtimedop(int semid,
636 return -ENOMEM;
637 err = 0;
638 for (i = 0; i < nsops; i++) {
639 - __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
640 - __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
641 - __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
642 + struct oabi_sembuf osb;
643 + err |= __copy_from_user(&osb, tsops, sizeof(osb));
644 + sops[i].sem_num = osb.sem_num;
645 + sops[i].sem_op = osb.sem_op;
646 + sops[i].sem_flg = osb.sem_flg;
647 tsops++;
648 }
649 if (timeout) {
650 diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
651 index 95a2faefc070..aa3f9a9837ac 100644
652 --- a/arch/arm/kvm/hyp/hyp-entry.S
653 +++ b/arch/arm/kvm/hyp/hyp-entry.S
654 @@ -16,6 +16,7 @@
655 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
656 */
657
658 +#include <linux/arm-smccc.h>
659 #include <linux/linkage.h>
660 #include <asm/kvm_arm.h>
661 #include <asm/kvm_asm.h>
662 @@ -71,6 +72,90 @@ __kvm_hyp_vector:
663 W(b) hyp_irq
664 W(b) hyp_fiq
665
666 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
667 + .align 5
668 +__kvm_hyp_vector_ic_inv:
669 + .global __kvm_hyp_vector_ic_inv
670 +
671 + /*
672 + * We encode the exception entry in the bottom 3 bits of
673 + * SP, and we have to guarantee to be 8 bytes aligned.
674 + */
675 + W(add) sp, sp, #1 /* Reset 7 */
676 + W(add) sp, sp, #1 /* Undef 6 */
677 + W(add) sp, sp, #1 /* Syscall 5 */
678 + W(add) sp, sp, #1 /* Prefetch abort 4 */
679 + W(add) sp, sp, #1 /* Data abort 3 */
680 + W(add) sp, sp, #1 /* HVC 2 */
681 + W(add) sp, sp, #1 /* IRQ 1 */
682 + W(nop) /* FIQ 0 */
683 +
684 + mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
685 + isb
686 +
687 + b decode_vectors
688 +
689 + .align 5
690 +__kvm_hyp_vector_bp_inv:
691 + .global __kvm_hyp_vector_bp_inv
692 +
693 + /*
694 + * We encode the exception entry in the bottom 3 bits of
695 + * SP, and we have to guarantee to be 8 bytes aligned.
696 + */
697 + W(add) sp, sp, #1 /* Reset 7 */
698 + W(add) sp, sp, #1 /* Undef 6 */
699 + W(add) sp, sp, #1 /* Syscall 5 */
700 + W(add) sp, sp, #1 /* Prefetch abort 4 */
701 + W(add) sp, sp, #1 /* Data abort 3 */
702 + W(add) sp, sp, #1 /* HVC 2 */
703 + W(add) sp, sp, #1 /* IRQ 1 */
704 + W(nop) /* FIQ 0 */
705 +
706 + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
707 + isb
708 +
709 +decode_vectors:
710 +
711 +#ifdef CONFIG_THUMB2_KERNEL
712 + /*
713 + * Yet another silly hack: Use VPIDR as a temp register.
714 + * Thumb2 is really a pain, as SP cannot be used with most
715 + * of the bitwise instructions. The vect_br macro ensures
716 + * things gets cleaned-up.
717 + */
718 + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
719 + mov r0, sp
720 + and r0, r0, #7
721 + sub sp, sp, r0
722 + push {r1, r2}
723 + mov r1, r0
724 + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
725 + mrc p15, 0, r2, c0, c0, 0 /* MIDR */
726 + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
727 +#endif
728 +
729 +.macro vect_br val, targ
730 +ARM( eor sp, sp, #\val )
731 +ARM( tst sp, #7 )
732 +ARM( eorne sp, sp, #\val )
733 +
734 +THUMB( cmp r1, #\val )
735 +THUMB( popeq {r1, r2} )
736 +
737 + beq \targ
738 +.endm
739 +
740 + vect_br 0, hyp_fiq
741 + vect_br 1, hyp_irq
742 + vect_br 2, hyp_hvc
743 + vect_br 3, hyp_dabt
744 + vect_br 4, hyp_pabt
745 + vect_br 5, hyp_svc
746 + vect_br 6, hyp_undef
747 + vect_br 7, hyp_reset
748 +#endif
749 +
750 .macro invalid_vector label, cause
751 .align
752 \label: mov r0, #\cause
753 @@ -118,7 +203,7 @@ hyp_hvc:
754 lsr r2, r2, #16
755 and r2, r2, #0xff
756 cmp r2, #0
757 - bne guest_trap @ Guest called HVC
758 + bne guest_hvc_trap @ Guest called HVC
759
760 /*
761 * Getting here means host called HVC, we shift parameters and branch
762 @@ -149,7 +234,14 @@ hyp_hvc:
763 bx ip
764
765 1:
766 - push {lr}
767 + /*
768 + * Pushing r2 here is just a way of keeping the stack aligned to
769 + * 8 bytes on any path that can trigger a HYP exception. Here,
770 + * we may well be about to jump into the guest, and the guest
771 + * exit would otherwise be badly decoded by our fancy
772 + * "decode-exception-without-a-branch" code...
773 + */
774 + push {r2, lr}
775
776 mov lr, r0
777 mov r0, r1
778 @@ -159,7 +251,21 @@ hyp_hvc:
779 THUMB( orr lr, #1)
780 blx lr @ Call the HYP function
781
782 - pop {lr}
783 + pop {r2, lr}
784 + eret
785 +
786 +guest_hvc_trap:
787 + movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
788 + movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
789 + ldr r0, [sp] @ Guest's r0
790 + teq r0, r2
791 + bne guest_trap
792 + add sp, sp, #12
793 + @ Returns:
794 + @ r0 = 0
795 + @ r1 = HSR value (perfectly predictable)
796 + @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
797 + mov r0, #0
798 eret
799
800 guest_trap:
801 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802 index 7a4b06049001..a826df3d3814 100644
803 --- a/arch/arm/lib/copy_from_user.S
804 +++ b/arch/arm/lib/copy_from_user.S
805 @@ -90,6 +90,15 @@
806 .text
807
808 ENTRY(arm_copy_from_user)
809 +#ifdef CONFIG_CPU_SPECTRE
810 + get_thread_info r3
811 + ldr r3, [r3, #TI_ADDR_LIMIT]
812 + adds ip, r1, r2 @ ip=addr+size
813 + sub r3, r3, #1 @ addr_limit - 1
814 + cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
815 + movcs r1, #0 @ addr = NULL
816 + csdb
817 +#endif
818
819 #include "copy_template.S"
820
821 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
822 index fd9077a74fce..50e0b45a22db 100644
823 --- a/arch/arm/mm/Kconfig
824 +++ b/arch/arm/mm/Kconfig
825 @@ -415,6 +415,7 @@ config CPU_V7
826 select CPU_CP15_MPU if !MMU
827 select CPU_HAS_ASID if MMU
828 select CPU_PABRT_V7
829 + select CPU_SPECTRE if MMU
830 select CPU_THUMB_CAPABLE
831 select CPU_TLB_V7 if MMU
832
833 @@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
834 help
835 Say Y here to disable branch prediction. If unsure, say N.
836
837 +config CPU_SPECTRE
838 + bool
839 +
840 +config HARDEN_BRANCH_PREDICTOR
841 + bool "Harden the branch predictor against aliasing attacks" if EXPERT
842 + depends on CPU_SPECTRE
843 + default y
844 + help
845 + Speculation attacks against some high-performance processors rely
846 + on being able to manipulate the branch predictor for a victim
847 + context by executing aliasing branches in the attacker context.
848 + Such attacks can be partially mitigated against by clearing
849 + internal branch predictor state and limiting the prediction
850 + logic in some situations.
851 +
852 + This config option will take CPU-specific actions to harden
853 + the branch predictor against aliasing attacks and may rely on
854 + specific instruction sequences or control bits being set by
855 + the system firmware.
856 +
857 + If unsure, say Y.
858 +
859 config TLS_REG_EMUL
860 bool
861 select NEED_KUSER_HELPERS
862 diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
863 index f353ee569f6b..93a622a18cba 100644
864 --- a/arch/arm/mm/Makefile
865 +++ b/arch/arm/mm/Makefile
866 @@ -95,7 +95,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
867 obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
868 obj-$(CONFIG_CPU_V6) += proc-v6.o
869 obj-$(CONFIG_CPU_V6K) += proc-v6.o
870 -obj-$(CONFIG_CPU_V7) += proc-v7.o
871 +obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
872 obj-$(CONFIG_CPU_V7M) += proc-v7m.o
873
874 AFLAGS_proc-v6.o :=-Wa,-march=armv6
875 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
876 index 42f585379e19..49b1b8048635 100644
877 --- a/arch/arm/mm/fault.c
878 +++ b/arch/arm/mm/fault.c
879 @@ -164,6 +164,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
880 {
881 struct siginfo si;
882
883 + if (addr > TASK_SIZE)
884 + harden_branch_predictor();
885 +
886 #ifdef CONFIG_DEBUG_USER
887 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
888 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
889 diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
890 index f10e31d0730a..81d0efb055c6 100644
891 --- a/arch/arm/mm/proc-macros.S
892 +++ b/arch/arm/mm/proc-macros.S
893 @@ -273,13 +273,14 @@
894 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
895 .endm
896
897 -.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
898 +.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
899 .type \name\()_processor_functions, #object
900 .align 2
901 ENTRY(\name\()_processor_functions)
902 .word \dabort
903 .word \pabort
904 .word cpu_\name\()_proc_init
905 + .word \bugs
906 .word cpu_\name\()_proc_fin
907 .word cpu_\name\()_reset
908 .word cpu_\name\()_do_idle
909 diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
910 index c6141a5435c3..f8d45ad2a515 100644
911 --- a/arch/arm/mm/proc-v7-2level.S
912 +++ b/arch/arm/mm/proc-v7-2level.S
913 @@ -41,11 +41,6 @@
914 * even on Cortex-A8 revisions not affected by 430973.
915 * If IBE is not set, the flush BTAC/BTB won't do anything.
916 */
917 -ENTRY(cpu_ca8_switch_mm)
918 -#ifdef CONFIG_MMU
919 - mov r2, #0
920 - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
921 -#endif
922 ENTRY(cpu_v7_switch_mm)
923 #ifdef CONFIG_MMU
924 mmid r1, r1 @ get mm->context.id
925 @@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
926 #endif
927 bx lr
928 ENDPROC(cpu_v7_switch_mm)
929 -ENDPROC(cpu_ca8_switch_mm)
930
931 /*
932 * cpu_v7_set_pte_ext(ptep, pte)
933 diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
934 new file mode 100644
935 index 000000000000..5544b82a2e7a
936 --- /dev/null
937 +++ b/arch/arm/mm/proc-v7-bugs.c
938 @@ -0,0 +1,174 @@
939 +// SPDX-License-Identifier: GPL-2.0
940 +#include <linux/arm-smccc.h>
941 +#include <linux/kernel.h>
942 +#include <linux/psci.h>
943 +#include <linux/smp.h>
944 +
945 +#include <asm/cp15.h>
946 +#include <asm/cputype.h>
947 +#include <asm/proc-fns.h>
948 +#include <asm/system_misc.h>
949 +
950 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
951 +DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
952 +
953 +extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
954 +extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
955 +extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
956 +extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
957 +
958 +static void harden_branch_predictor_bpiall(void)
959 +{
960 + write_sysreg(0, BPIALL);
961 +}
962 +
963 +static void harden_branch_predictor_iciallu(void)
964 +{
965 + write_sysreg(0, ICIALLU);
966 +}
967 +
968 +static void __maybe_unused call_smc_arch_workaround_1(void)
969 +{
970 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
971 +}
972 +
973 +static void __maybe_unused call_hvc_arch_workaround_1(void)
974 +{
975 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
976 +}
977 +
978 +static void cpu_v7_spectre_init(void)
979 +{
980 + const char *spectre_v2_method = NULL;
981 + int cpu = smp_processor_id();
982 +
983 + if (per_cpu(harden_branch_predictor_fn, cpu))
984 + return;
985 +
986 + switch (read_cpuid_part()) {
987 + case ARM_CPU_PART_CORTEX_A8:
988 + case ARM_CPU_PART_CORTEX_A9:
989 + case ARM_CPU_PART_CORTEX_A12:
990 + case ARM_CPU_PART_CORTEX_A17:
991 + case ARM_CPU_PART_CORTEX_A73:
992 + case ARM_CPU_PART_CORTEX_A75:
993 + if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
994 + goto bl_error;
995 + per_cpu(harden_branch_predictor_fn, cpu) =
996 + harden_branch_predictor_bpiall;
997 + spectre_v2_method = "BPIALL";
998 + break;
999 +
1000 + case ARM_CPU_PART_CORTEX_A15:
1001 + case ARM_CPU_PART_BRAHMA_B15:
1002 + if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
1003 + goto bl_error;
1004 + per_cpu(harden_branch_predictor_fn, cpu) =
1005 + harden_branch_predictor_iciallu;
1006 + spectre_v2_method = "ICIALLU";
1007 + break;
1008 +
1009 +#ifdef CONFIG_ARM_PSCI
1010 + default:
1011 + /* Other ARM CPUs require no workaround */
1012 + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
1013 + break;
1014 + /* fallthrough */
1015 + /* Cortex A57/A72 require firmware workaround */
1016 + case ARM_CPU_PART_CORTEX_A57:
1017 + case ARM_CPU_PART_CORTEX_A72: {
1018 + struct arm_smccc_res res;
1019 +
1020 + if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1021 + break;
1022 +
1023 + switch (psci_ops.conduit) {
1024 + case PSCI_CONDUIT_HVC:
1025 + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1026 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1027 + if ((int)res.a0 != 0)
1028 + break;
1029 + if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
1030 + goto bl_error;
1031 + per_cpu(harden_branch_predictor_fn, cpu) =
1032 + call_hvc_arch_workaround_1;
1033 + processor.switch_mm = cpu_v7_hvc_switch_mm;
1034 + spectre_v2_method = "hypervisor";
1035 + break;
1036 +
1037 + case PSCI_CONDUIT_SMC:
1038 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1039 + ARM_SMCCC_ARCH_WORKAROUND_1, &res);
1040 + if ((int)res.a0 != 0)
1041 + break;
1042 + if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
1043 + goto bl_error;
1044 + per_cpu(harden_branch_predictor_fn, cpu) =
1045 + call_smc_arch_workaround_1;
1046 + processor.switch_mm = cpu_v7_smc_switch_mm;
1047 + spectre_v2_method = "firmware";
1048 + break;
1049 +
1050 + default:
1051 + break;
1052 + }
1053 + }
1054 +#endif
1055 + }
1056 +
1057 + if (spectre_v2_method)
1058 + pr_info("CPU%u: Spectre v2: using %s workaround\n",
1059 + smp_processor_id(), spectre_v2_method);
1060 + return;
1061 +
1062 +bl_error:
1063 + pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
1064 + cpu);
1065 +}
1066 +#else
1067 +static void cpu_v7_spectre_init(void)
1068 +{
1069 +}
1070 +#endif
1071 +
1072 +static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
1073 + u32 mask, const char *msg)
1074 +{
1075 + u32 aux_cr;
1076 +
1077 + asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
1078 +
1079 + if ((aux_cr & mask) != mask) {
1080 + if (!*warned)
1081 + pr_err("CPU%u: %s", smp_processor_id(), msg);
1082 + *warned = true;
1083 + return false;
1084 + }
1085 + return true;
1086 +}
1087 +
1088 +static DEFINE_PER_CPU(bool, spectre_warned);
1089 +
1090 +static bool check_spectre_auxcr(bool *warned, u32 bit)
1091 +{
1092 + return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
1093 + cpu_v7_check_auxcr_set(warned, bit,
1094 + "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
1095 +}
1096 +
1097 +void cpu_v7_ca8_ibe(void)
1098 +{
1099 + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
1100 + cpu_v7_spectre_init();
1101 +}
1102 +
1103 +void cpu_v7_ca15_ibe(void)
1104 +{
1105 + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
1106 + cpu_v7_spectre_init();
1107 +}
1108 +
1109 +void cpu_v7_bugs_init(void)
1110 +{
1111 + cpu_v7_spectre_init();
1112 +}
1113 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
1114 index 01d64c0b2563..12468d9378d8 100644
1115 --- a/arch/arm/mm/proc-v7.S
1116 +++ b/arch/arm/mm/proc-v7.S
1117 @@ -9,6 +9,7 @@
1118 *
1119 * This is the "shell" of the ARMv7 processor support.
1120 */
1121 +#include <linux/arm-smccc.h>
1122 #include <linux/init.h>
1123 #include <linux/linkage.h>
1124 #include <asm/assembler.h>
1125 @@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
1126 ret lr
1127 ENDPROC(cpu_v7_dcache_clean_area)
1128
1129 +#ifdef CONFIG_ARM_PSCI
1130 + .arch_extension sec
1131 +ENTRY(cpu_v7_smc_switch_mm)
1132 + stmfd sp!, {r0 - r3}
1133 + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1134 + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1135 + smc #0
1136 + ldmfd sp!, {r0 - r3}
1137 + b cpu_v7_switch_mm
1138 +ENDPROC(cpu_v7_smc_switch_mm)
1139 + .arch_extension virt
1140 +ENTRY(cpu_v7_hvc_switch_mm)
1141 + stmfd sp!, {r0 - r3}
1142 + movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
1143 + movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
1144 + hvc #0
1145 + ldmfd sp!, {r0 - r3}
1146 + b cpu_v7_switch_mm
1147 +ENDPROC(cpu_v7_smc_switch_mm)
1148 +#endif
1149 +ENTRY(cpu_v7_iciallu_switch_mm)
1150 + mov r3, #0
1151 + mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
1152 + b cpu_v7_switch_mm
1153 +ENDPROC(cpu_v7_iciallu_switch_mm)
1154 +ENTRY(cpu_v7_bpiall_switch_mm)
1155 + mov r3, #0
1156 + mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
1157 + b cpu_v7_switch_mm
1158 +ENDPROC(cpu_v7_bpiall_switch_mm)
1159 +
1160 string cpu_v7_name, "ARMv7 Processor"
1161 .align
1162
1163 @@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
1164 ENDPROC(cpu_v7_do_resume)
1165 #endif
1166
1167 -/*
1168 - * Cortex-A8
1169 - */
1170 - globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1171 - globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1172 - globl_equ cpu_ca8_reset, cpu_v7_reset
1173 - globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1174 - globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1175 - globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1176 - globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1177 -#ifdef CONFIG_ARM_CPU_SUSPEND
1178 - globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1179 - globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1180 -#endif
1181 -
1182 -/*
1183 - * Cortex-A9 processor functions
1184 - */
1185 - globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1186 - globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1187 - globl_equ cpu_ca9mp_reset, cpu_v7_reset
1188 - globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1189 - globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1190 - globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1191 - globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1192 .globl cpu_ca9mp_suspend_size
1193 .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
1194 #ifdef CONFIG_ARM_CPU_SUSPEND
1195 @@ -546,12 +553,79 @@ __v7_setup_stack:
1196
1197 __INITDATA
1198
1199 + .weak cpu_v7_bugs_init
1200 +
1201 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
1202 - define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1203 + define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1204 +
1205 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1206 + @ generic v7 bpiall on context switch
1207 + globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
1208 + globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
1209 + globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
1210 + globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
1211 + globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
1212 + globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
1213 + globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
1214 +#ifdef CONFIG_ARM_CPU_SUSPEND
1215 + globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
1216 + globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
1217 +#endif
1218 + define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1219 +
1220 +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
1221 +#else
1222 +#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
1223 +#endif
1224 +
1225 #ifndef CONFIG_ARM_LPAE
1226 - define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1227 - define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1228 + @ Cortex-A8 - always needs bpiall switch_mm implementation
1229 + globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
1230 + globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
1231 + globl_equ cpu_ca8_reset, cpu_v7_reset
1232 + globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
1233 + globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
1234 + globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
1235 + globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
1236 + globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
1237 +#ifdef CONFIG_ARM_CPU_SUSPEND
1238 + globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
1239 + globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
1240 +#endif
1241 + define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
1242 +
1243 + @ Cortex-A9 - needs more registers preserved across suspend/resume
1244 + @ and bpiall switch_mm for hardening
1245 + globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
1246 + globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
1247 + globl_equ cpu_ca9mp_reset, cpu_v7_reset
1248 + globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
1249 + globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
1250 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1251 + globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
1252 +#else
1253 + globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
1254 +#endif
1255 + globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
1256 + define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
1257 #endif
1258 +
1259 + @ Cortex-A15 - needs iciallu switch_mm for hardening
1260 + globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
1261 + globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
1262 + globl_equ cpu_ca15_reset, cpu_v7_reset
1263 + globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
1264 + globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
1265 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
1266 + globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
1267 +#else
1268 + globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
1269 +#endif
1270 + globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
1271 + globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
1272 + globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
1273 + globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
1274 + define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
1275 #ifdef CONFIG_CPU_PJ4B
1276 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
1277 #endif
1278 @@ -658,7 +732,7 @@ __v7_ca7mp_proc_info:
1279 __v7_ca12mp_proc_info:
1280 .long 0x410fc0d0
1281 .long 0xff0ffff0
1282 - __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
1283 + __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1284 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
1285
1286 /*
1287 @@ -668,7 +742,7 @@ __v7_ca12mp_proc_info:
1288 __v7_ca15mp_proc_info:
1289 .long 0x410fc0f0
1290 .long 0xff0ffff0
1291 - __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
1292 + __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
1293 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
1294
1295 /*
1296 @@ -678,7 +752,7 @@ __v7_ca15mp_proc_info:
1297 __v7_b15mp_proc_info:
1298 .long 0x420f00f0
1299 .long 0xff0ffff0
1300 - __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
1301 + __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
1302 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
1303
1304 /*
1305 @@ -688,9 +762,25 @@ __v7_b15mp_proc_info:
1306 __v7_ca17mp_proc_info:
1307 .long 0x410fc0e0
1308 .long 0xff0ffff0
1309 - __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
1310 + __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1311 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
1312
1313 + /* ARM Ltd. Cortex A73 processor */
1314 + .type __v7_ca73_proc_info, #object
1315 +__v7_ca73_proc_info:
1316 + .long 0x410fd090
1317 + .long 0xff0ffff0
1318 + __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1319 + .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
1320 +
1321 + /* ARM Ltd. Cortex A75 processor */
1322 + .type __v7_ca75_proc_info, #object
1323 +__v7_ca75_proc_info:
1324 + .long 0x410fd0a0
1325 + .long 0xff0ffff0
1326 + __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
1327 + .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
1328 +
1329 /*
1330 * Qualcomm Inc. Krait processors.
1331 */
1332 diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
1333 index aa7496be311d..6abcd4af8274 100644
1334 --- a/arch/arm/vfp/vfpmodule.c
1335 +++ b/arch/arm/vfp/vfpmodule.c
1336 @@ -597,13 +597,11 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
1337 }
1338
1339 /* Sanitise and restore the current VFP state from the provided structures. */
1340 -int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1341 - struct user_vfp_exc __user *ufp_exc)
1342 +int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
1343 {
1344 struct thread_info *thread = current_thread_info();
1345 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
1346 unsigned long fpexc;
1347 - int err = 0;
1348
1349 /* Disable VFP to avoid corrupting the new thread state. */
1350 vfp_flush_hwstate(thread);
1351 @@ -612,17 +610,16 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1352 * Copy the floating point registers. There can be unused
1353 * registers see asm/hwcap.h for details.
1354 */
1355 - err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
1356 - sizeof(hwstate->fpregs));
1357 + memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
1358 /*
1359 * Copy the status and control register.
1360 */
1361 - __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
1362 + hwstate->fpscr = ufp->fpscr;
1363
1364 /*
1365 * Sanitise and restore the exception registers.
1366 */
1367 - __get_user_error(fpexc, &ufp_exc->fpexc, err);
1368 + fpexc = ufp_exc->fpexc;
1369
1370 /* Ensure the VFP is enabled. */
1371 fpexc |= FPEXC_EN;
1372 @@ -631,10 +628,10 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
1373 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
1374 hwstate->fpexc = fpexc;
1375
1376 - __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
1377 - __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
1378 + hwstate->fpinst = ufp_exc->fpinst;
1379 + hwstate->fpinst2 = ufp_exc->fpinst2;
1380
1381 - return err ? -EFAULT : 0;
1382 + return 0;
1383 }
1384
1385 /*
1386 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
1387 index 1984e739f155..86249a24592d 100644
1388 --- a/arch/arm64/kernel/perf_event.c
1389 +++ b/arch/arm64/kernel/perf_event.c
1390 @@ -824,6 +824,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
1391 return 0;
1392 }
1393
1394 +static int armv8pmu_filter_match(struct perf_event *event)
1395 +{
1396 + unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
1397 + return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
1398 +}
1399 +
1400 static void armv8pmu_reset(void *info)
1401 {
1402 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1403 @@ -970,6 +976,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
1404 cpu_pmu->reset = armv8pmu_reset,
1405 cpu_pmu->max_period = (1LLU << 32) - 1,
1406 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1407 + cpu_pmu->filter_match = armv8pmu_filter_match;
1408
1409 return 0;
1410 }
1411 diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
1412 index eb1f6030ab85..8bbbab611a3f 100644
1413 --- a/arch/mips/include/asm/processor.h
1414 +++ b/arch/mips/include/asm/processor.h
1415 @@ -13,6 +13,7 @@
1416
1417 #include <linux/atomic.h>
1418 #include <linux/cpumask.h>
1419 +#include <linux/sizes.h>
1420 #include <linux/threads.h>
1421
1422 #include <asm/cachectl.h>
1423 @@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
1424
1425 #endif
1426
1427 -/*
1428 - * One page above the stack is used for branch delay slot "emulation".
1429 - * See dsemul.c for details.
1430 - */
1431 -#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
1432 +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
1433 +
1434 +extern unsigned long mips_stack_top(void);
1435 +#define STACK_TOP mips_stack_top()
1436
1437 /*
1438 * This decides where the kernel will search for a free chunk of vm
1439 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1440 index e8d772a2597d..e8b166e9146a 100644
1441 --- a/arch/mips/kernel/process.c
1442 +++ b/arch/mips/kernel/process.c
1443 @@ -31,6 +31,7 @@
1444 #include <linux/prctl.h>
1445 #include <linux/nmi.h>
1446
1447 +#include <asm/abi.h>
1448 #include <asm/asm.h>
1449 #include <asm/bootinfo.h>
1450 #include <asm/cpu.h>
1451 @@ -38,6 +39,7 @@
1452 #include <asm/dsp.h>
1453 #include <asm/fpu.h>
1454 #include <asm/irq.h>
1455 +#include <asm/mips-cps.h>
1456 #include <asm/msa.h>
1457 #include <asm/pgtable.h>
1458 #include <asm/mipsregs.h>
1459 @@ -644,6 +646,29 @@ out:
1460 return pc;
1461 }
1462
1463 +unsigned long mips_stack_top(void)
1464 +{
1465 + unsigned long top = TASK_SIZE & PAGE_MASK;
1466 +
1467 + /* One page for branch delay slot "emulation" */
1468 + top -= PAGE_SIZE;
1469 +
1470 + /* Space for the VDSO, data page & GIC user page */
1471 + top -= PAGE_ALIGN(current->thread.abi->vdso->size);
1472 + top -= PAGE_SIZE;
1473 + top -= mips_gic_present() ? PAGE_SIZE : 0;
1474 +
1475 + /* Space for cache colour alignment */
1476 + if (cpu_has_dc_aliases)
1477 + top -= shm_align_mask + 1;
1478 +
1479 + /* Space to randomize the VDSO base */
1480 + if (current->flags & PF_RANDOMIZE)
1481 + top -= VDSO_RANDOMIZE_SIZE;
1482 +
1483 + return top;
1484 +}
1485 +
1486 /*
1487 * Don't forget that the stack pointer must be aligned on a 8 bytes
1488 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1489 diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
1490 index 8f845f6e5f42..48a9c6b90e07 100644
1491 --- a/arch/mips/kernel/vdso.c
1492 +++ b/arch/mips/kernel/vdso.c
1493 @@ -15,6 +15,7 @@
1494 #include <linux/ioport.h>
1495 #include <linux/kernel.h>
1496 #include <linux/mm.h>
1497 +#include <linux/random.h>
1498 #include <linux/sched.h>
1499 #include <linux/slab.h>
1500 #include <linux/timekeeper_internal.h>
1501 @@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
1502 }
1503 }
1504
1505 +static unsigned long vdso_base(void)
1506 +{
1507 + unsigned long base;
1508 +
1509 + /* Skip the delay slot emulation page */
1510 + base = STACK_TOP + PAGE_SIZE;
1511 +
1512 + if (current->flags & PF_RANDOMIZE) {
1513 + base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
1514 + base = PAGE_ALIGN(base);
1515 + }
1516 +
1517 + return base;
1518 +}
1519 +
1520 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
1521 {
1522 struct mips_vdso_image *image = current->thread.abi->vdso;
1523 @@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
1524 if (cpu_has_dc_aliases)
1525 size += shm_align_mask + 1;
1526
1527 - base = get_unmapped_area(NULL, 0, size, 0, 0);
1528 + base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
1529 if (IS_ERR_VALUE(base)) {
1530 ret = base;
1531 goto out;
1532 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
1533 index 9a677cd5997f..4dd13b503dbb 100644
1534 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
1535 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
1536 @@ -102,7 +102,7 @@
1537 */
1538 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
1539 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
1540 - _PAGE_SOFT_DIRTY)
1541 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
1542 /*
1543 * user access blocked by key
1544 */
1545 @@ -120,7 +120,7 @@
1546 */
1547 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
1548 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
1549 - _PAGE_SOFT_DIRTY)
1550 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
1551 /*
1552 * Mask of bits returned by pte_pgprot()
1553 */
1554 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
1555 index 246f15b4e64c..85f8279c885a 100644
1556 --- a/arch/x86/include/asm/pgtable_types.h
1557 +++ b/arch/x86/include/asm/pgtable_types.h
1558 @@ -124,7 +124,7 @@
1559 */
1560 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
1561 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
1562 - _PAGE_SOFT_DIRTY)
1563 + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
1564 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
1565
1566 /*
1567 diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
1568 index f3a960488eae..dcf4dc9bf327 100644
1569 --- a/arch/x86/include/uapi/asm/kvm.h
1570 +++ b/arch/x86/include/uapi/asm/kvm.h
1571 @@ -360,5 +360,6 @@ struct kvm_sync_regs {
1572
1573 #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
1574 #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
1575 +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
1576
1577 #endif /* _ASM_X86_KVM_H */
1578 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1579 index 6d0fbff71d7a..13dfb55b84db 100644
1580 --- a/arch/x86/kvm/lapic.c
1581 +++ b/arch/x86/kvm/lapic.c
1582 @@ -1282,9 +1282,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1583
1584 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1585 {
1586 - return kvm_apic_hw_enabled(apic) &&
1587 - addr >= apic->base_address &&
1588 - addr < apic->base_address + LAPIC_MMIO_LENGTH;
1589 + return addr >= apic->base_address &&
1590 + addr < apic->base_address + LAPIC_MMIO_LENGTH;
1591 }
1592
1593 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1594 @@ -1296,6 +1295,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1595 if (!apic_mmio_in_range(apic, address))
1596 return -EOPNOTSUPP;
1597
1598 + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1599 + if (!kvm_check_has_quirk(vcpu->kvm,
1600 + KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1601 + return -EOPNOTSUPP;
1602 +
1603 + memset(data, 0xff, len);
1604 + return 0;
1605 + }
1606 +
1607 kvm_lapic_reg_read(apic, offset, len, data);
1608
1609 return 0;
1610 @@ -1806,6 +1814,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1611 if (!apic_mmio_in_range(apic, address))
1612 return -EOPNOTSUPP;
1613
1614 + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1615 + if (!kvm_check_has_quirk(vcpu->kvm,
1616 + KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1617 + return -EOPNOTSUPP;
1618 +
1619 + return 0;
1620 + }
1621 +
1622 /*
1623 * APIC register must be aligned on 128-bits boundary.
1624 * 32/64/128 bits registers must be accessed thru 32 bits.
1625 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
1626 index c823914b3a80..30bbe19b4b85 100644
1627 --- a/drivers/bluetooth/hci_ldisc.c
1628 +++ b/drivers/bluetooth/hci_ldisc.c
1629 @@ -539,6 +539,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
1630 }
1631 clear_bit(HCI_UART_PROTO_SET, &hu->flags);
1632
1633 + percpu_free_rwsem(&hu->proto_lock);
1634 +
1635 kfree(hu);
1636 }
1637
1638 diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
1639 index 08ef69945ffb..d977193842df 100644
1640 --- a/drivers/clk/x86/clk-pmc-atom.c
1641 +++ b/drivers/clk/x86/clk-pmc-atom.c
1642 @@ -55,6 +55,7 @@ struct clk_plt_data {
1643 u8 nparents;
1644 struct clk_plt *clks[PMC_CLK_NUM];
1645 struct clk_lookup *mclk_lookup;
1646 + struct clk_lookup *ether_clk_lookup;
1647 };
1648
1649 /* Return an index in parent table */
1650 @@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
1651 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
1652 spin_lock_init(&pclk->lock);
1653
1654 - /*
1655 - * If the clock was already enabled by the firmware mark it as critical
1656 - * to avoid it being gated by the clock framework if no driver owns it.
1657 - */
1658 - if (plt_clk_is_enabled(&pclk->hw))
1659 - init.flags |= CLK_IS_CRITICAL;
1660 -
1661 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
1662 if (ret) {
1663 pclk = ERR_PTR(ret);
1664 @@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
1665 goto err_unreg_clk_plt;
1666 }
1667
1668 + data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
1669 + "ether_clk", NULL);
1670 + if (!data->ether_clk_lookup) {
1671 + err = -ENOMEM;
1672 + goto err_drop_mclk;
1673 + }
1674 +
1675 plt_clk_free_parent_names_loop(parent_names, data->nparents);
1676
1677 platform_set_drvdata(pdev, data);
1678 return 0;
1679
1680 +err_drop_mclk:
1681 + clkdev_drop(data->mclk_lookup);
1682 err_unreg_clk_plt:
1683 plt_clk_unregister_loop(data, i);
1684 plt_clk_unregister_parents(data);
1685 @@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
1686
1687 data = platform_get_drvdata(pdev);
1688
1689 + clkdev_drop(data->ether_clk_lookup);
1690 clkdev_drop(data->mclk_lookup);
1691 plt_clk_unregister_loop(data, PMC_CLK_NUM);
1692 plt_clk_unregister_parents(data);
1693 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1694 index bdabaa3399db..e2c0ff03f386 100644
1695 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1696 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1697 @@ -576,7 +576,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
1698
1699 while (true) {
1700 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
1701 - if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
1702 + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
1703 break;
1704 if (timeout <= 0)
1705 return -ETIME;
1706 diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
1707 index 7aa7b9cb6203..efefcfa24a4c 100644
1708 --- a/drivers/i2c/busses/i2c-scmi.c
1709 +++ b/drivers/i2c/busses/i2c-scmi.c
1710 @@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
1711 mt_params[3].type = ACPI_TYPE_INTEGER;
1712 mt_params[3].integer.value = len;
1713 mt_params[4].type = ACPI_TYPE_BUFFER;
1714 + mt_params[4].buffer.length = len;
1715 mt_params[4].buffer.pointer = data->block + 1;
1716 }
1717 break;
1718 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1719 index e2ea57d5376e..b5f541112fca 100644
1720 --- a/drivers/md/dm-cache-target.c
1721 +++ b/drivers/md/dm-cache-target.c
1722 @@ -3571,14 +3571,13 @@ static int __init dm_cache_init(void)
1723 int r;
1724
1725 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
1726 - if (!migration_cache) {
1727 - dm_unregister_target(&cache_target);
1728 + if (!migration_cache)
1729 return -ENOMEM;
1730 - }
1731
1732 r = dm_register_target(&cache_target);
1733 if (r) {
1734 DMERR("cache target registration failed: %d", r);
1735 + kmem_cache_destroy(migration_cache);
1736 return r;
1737 }
1738
1739 diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1740 index b82cb1ab1eaa..0c1ef63c3461 100644
1741 --- a/drivers/md/dm-flakey.c
1742 +++ b/drivers/md/dm-flakey.c
1743 @@ -463,7 +463,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
1744 static struct target_type flakey_target = {
1745 .name = "flakey",
1746 .version = {1, 5, 0},
1747 +#ifdef CONFIG_BLK_DEV_ZONED
1748 .features = DM_TARGET_ZONED_HM,
1749 +#endif
1750 .module = THIS_MODULE,
1751 .ctr = flakey_ctr,
1752 .dtr = flakey_dtr,
1753 diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
1754 index d5f8eff7c11d..a53de71bc30c 100644
1755 --- a/drivers/md/dm-linear.c
1756 +++ b/drivers/md/dm-linear.c
1757 @@ -101,6 +101,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
1758 return DM_MAPIO_REMAPPED;
1759 }
1760
1761 +#ifdef CONFIG_BLK_DEV_ZONED
1762 static int linear_end_io(struct dm_target *ti, struct bio *bio,
1763 blk_status_t *error)
1764 {
1765 @@ -111,6 +112,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
1766
1767 return DM_ENDIO_DONE;
1768 }
1769 +#endif
1770
1771 static void linear_status(struct dm_target *ti, status_type_t type,
1772 unsigned status_flags, char *result, unsigned maxlen)
1773 @@ -187,12 +189,16 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
1774 static struct target_type linear_target = {
1775 .name = "linear",
1776 .version = {1, 4, 0},
1777 +#ifdef CONFIG_BLK_DEV_ZONED
1778 + .end_io = linear_end_io,
1779 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
1780 +#else
1781 + .features = DM_TARGET_PASSES_INTEGRITY,
1782 +#endif
1783 .module = THIS_MODULE,
1784 .ctr = linear_ctr,
1785 .dtr = linear_dtr,
1786 .map = linear_map,
1787 - .end_io = linear_end_io,
1788 .status = linear_status,
1789 .prepare_ioctl = linear_prepare_ioctl,
1790 .iterate_devices = linear_iterate_devices,
1791 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1792 index 24ec6e039448..a56008b2e7c2 100644
1793 --- a/drivers/md/dm.c
1794 +++ b/drivers/md/dm.c
1795 @@ -1034,12 +1034,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1796 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1797
1798 /*
1799 - * The zone descriptors obtained with a zone report indicate
1800 - * zone positions within the target device. The zone descriptors
1801 - * must be remapped to match their position within the dm device.
1802 - * A target may call dm_remap_zone_report after completion of a
1803 - * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
1804 - * from the target device mapping to the dm device.
1805 + * The zone descriptors obtained with a zone report indicate zone positions
1806 + * within the target backing device, regardless of that device is a partition
1807 + * and regardless of the target mapping start sector on the device or partition.
1808 + * The zone descriptors start sector and write pointer position must be adjusted
1809 + * to match their relative position within the dm device.
1810 + * A target may call dm_remap_zone_report() after completion of a
1811 + * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
1812 + * backing device.
1813 */
1814 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1815 {
1816 @@ -1050,6 +1052,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1817 struct blk_zone *zone;
1818 unsigned int nr_rep = 0;
1819 unsigned int ofst;
1820 + sector_t part_offset;
1821 struct bio_vec bvec;
1822 struct bvec_iter iter;
1823 void *addr;
1824 @@ -1057,6 +1060,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1825 if (bio->bi_status)
1826 return;
1827
1828 + /*
1829 + * bio sector was incremented by the request size on completion. Taking
1830 + * into account the original request sector, the target start offset on
1831 + * the backing device and the target mapping offset (ti->begin), the
1832 + * start sector of the backing device. The partition offset is always 0
1833 + * if the target uses a whole device.
1834 + */
1835 + part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
1836 +
1837 /*
1838 * Remap the start sector of the reported zones. For sequential zones,
1839 * also remap the write pointer position.
1840 @@ -1074,6 +1086,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1841 /* Set zones start sector */
1842 while (hdr->nr_zones && ofst < bvec.bv_len) {
1843 zone = addr + ofst;
1844 + zone->start -= part_offset;
1845 if (zone->start >= start + ti->len) {
1846 hdr->nr_zones = 0;
1847 break;
1848 @@ -1085,7 +1098,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
1849 else if (zone->cond == BLK_ZONE_COND_EMPTY)
1850 zone->wp = zone->start;
1851 else
1852 - zone->wp = zone->wp + ti->begin - start;
1853 + zone->wp = zone->wp + ti->begin - start - part_offset;
1854 }
1855 ofst += sizeof(struct blk_zone);
1856 hdr->nr_zones--;
1857 diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
1858 index 7aab376ecb84..3785c638d530 100644
1859 --- a/drivers/mfd/omap-usb-host.c
1860 +++ b/drivers/mfd/omap-usb-host.c
1861 @@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
1862 }
1863
1864 static const struct of_device_id usbhs_child_match_table[] = {
1865 - { .compatible = "ti,omap-ehci", },
1866 - { .compatible = "ti,omap-ohci", },
1867 + { .compatible = "ti,ehci-omap", },
1868 + { .compatible = "ti,ohci-omap3", },
1869 { }
1870 };
1871
1872 @@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
1873 .pm = &usbhsomap_dev_pm_ops,
1874 .of_match_table = usbhs_omap_dt_ids,
1875 },
1876 + .probe = usbhs_omap_probe,
1877 .remove = usbhs_omap_remove,
1878 };
1879
1880 @@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
1881 MODULE_LICENSE("GPL v2");
1882 MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
1883
1884 -static int __init omap_usbhs_drvinit(void)
1885 +static int omap_usbhs_drvinit(void)
1886 {
1887 - return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
1888 + return platform_driver_register(&usbhs_omap_driver);
1889 }
1890
1891 /*
1892 @@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
1893 */
1894 fs_initcall_sync(omap_usbhs_drvinit);
1895
1896 -static void __exit omap_usbhs_drvexit(void)
1897 +static void omap_usbhs_drvexit(void)
1898 {
1899 platform_driver_unregister(&usbhs_omap_driver);
1900 }
1901 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
1902 index 4281fdc0a13c..ce6dd49fbb98 100644
1903 --- a/drivers/mmc/core/block.c
1904 +++ b/drivers/mmc/core/block.c
1905 @@ -1613,6 +1613,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1906 brq->data.blocks = card->host->max_blk_count;
1907
1908 if (brq->data.blocks > 1) {
1909 + /*
1910 + * Some SD cards in SPI mode return a CRC error or even lock up
1911 + * completely when trying to read the last block using a
1912 + * multiblock read command.
1913 + */
1914 + if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
1915 + (blk_rq_pos(req) + blk_rq_sectors(req) ==
1916 + get_capacity(md->disk)))
1917 + brq->data.blocks--;
1918 +
1919 /*
1920 * After a read error, we redo the request one sector
1921 * at a time in order to accurately determine which
1922 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1923 index 15aedb64a02b..cf64a365362b 100644
1924 --- a/drivers/net/bonding/bond_main.c
1925 +++ b/drivers/net/bonding/bond_main.c
1926 @@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
1927 static void bond_slave_arr_handler(struct work_struct *work);
1928 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
1929 int mod);
1930 +static void bond_netdev_notify_work(struct work_struct *work);
1931
1932 /*---------------------------- General routines -----------------------------*/
1933
1934 @@ -1176,9 +1177,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1935 }
1936 }
1937
1938 - /* don't change skb->dev for link-local packets */
1939 - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1940 + /* Link-local multicast packets should be passed to the
1941 + * stack on the link they arrive as well as pass them to the
1942 + * bond-master device. These packets are mostly usable when
1943 + * stack receives it with the link on which they arrive
1944 + * (e.g. LLDP) they also must be available on master. Some of
1945 + * the use cases include (but are not limited to): LLDP agents
1946 + * that must be able to operate both on enslaved interfaces as
1947 + * well as on bonds themselves; linux bridges that must be able
1948 + * to process/pass BPDUs from attached bonds when any kind of
1949 + * STP version is enabled on the network.
1950 + */
1951 + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
1952 + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1953 +
1954 + if (nskb) {
1955 + nskb->dev = bond->dev;
1956 + nskb->queue_mapping = 0;
1957 + netif_rx(nskb);
1958 + }
1959 return RX_HANDLER_PASS;
1960 + }
1961 if (bond_should_deliver_exact_match(skb, slave, bond))
1962 return RX_HANDLER_EXACT;
1963
1964 @@ -1254,6 +1273,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
1965 return NULL;
1966 }
1967 }
1968 + INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1969 +
1970 return slave;
1971 }
1972
1973 @@ -1261,6 +1282,7 @@ static void bond_free_slave(struct slave *slave)
1974 {
1975 struct bonding *bond = bond_get_bond_by_slave(slave);
1976
1977 + cancel_delayed_work_sync(&slave->notify_work);
1978 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1979 kfree(SLAVE_AD_INFO(slave));
1980
1981 @@ -1282,39 +1304,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1982 info->link_failure_count = slave->link_failure_count;
1983 }
1984
1985 -static void bond_netdev_notify(struct net_device *dev,
1986 - struct netdev_bonding_info *info)
1987 -{
1988 - rtnl_lock();
1989 - netdev_bonding_info_change(dev, info);
1990 - rtnl_unlock();
1991 -}
1992 -
1993 static void bond_netdev_notify_work(struct work_struct *_work)
1994 {
1995 - struct netdev_notify_work *w =
1996 - container_of(_work, struct netdev_notify_work, work.work);
1997 + struct slave *slave = container_of(_work, struct slave,
1998 + notify_work.work);
1999 +
2000 + if (rtnl_trylock()) {
2001 + struct netdev_bonding_info binfo;
2002
2003 - bond_netdev_notify(w->dev, &w->bonding_info);
2004 - dev_put(w->dev);
2005 - kfree(w);
2006 + bond_fill_ifslave(slave, &binfo.slave);
2007 + bond_fill_ifbond(slave->bond, &binfo.master);
2008 + netdev_bonding_info_change(slave->dev, &binfo);
2009 + rtnl_unlock();
2010 + } else {
2011 + queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
2012 + }
2013 }
2014
2015 void bond_queue_slave_event(struct slave *slave)
2016 {
2017 - struct bonding *bond = slave->bond;
2018 - struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
2019 -
2020 - if (!nnw)
2021 - return;
2022 -
2023 - dev_hold(slave->dev);
2024 - nnw->dev = slave->dev;
2025 - bond_fill_ifslave(slave, &nnw->bonding_info.slave);
2026 - bond_fill_ifbond(bond, &nnw->bonding_info.master);
2027 - INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
2028 -
2029 - queue_delayed_work(slave->bond->wq, &nnw->work, 0);
2030 + queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
2031 }
2032
2033 void bond_lower_state_changed(struct slave *slave)
2034 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
2035 index 72d6ffbfd638..0132921f408a 100644
2036 --- a/drivers/net/dsa/bcm_sf2.c
2037 +++ b/drivers/net/dsa/bcm_sf2.c
2038 @@ -772,7 +772,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
2039 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
2040 {
2041 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
2042 - unsigned int port;
2043 int ret;
2044
2045 ret = bcm_sf2_sw_rst(priv);
2046 @@ -784,12 +783,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
2047 if (priv->hw_params.num_gphy == 1)
2048 bcm_sf2_gphy_enable_set(ds, true);
2049
2050 - for (port = 0; port < DSA_MAX_PORTS; port++) {
2051 - if ((1 << port) & ds->enabled_port_mask)
2052 - bcm_sf2_port_setup(ds, port, NULL);
2053 - else if (dsa_is_cpu_port(ds, port))
2054 - bcm_sf2_imp_setup(ds, port);
2055 - }
2056 + ds->ops->setup(ds);
2057
2058 return 0;
2059 }
2060 @@ -1270,10 +1264,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
2061 {
2062 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
2063
2064 - /* Disable all ports and interrupts */
2065 priv->wol_ports_mask = 0;
2066 - bcm_sf2_sw_suspend(priv->dev->ds);
2067 dsa_unregister_switch(priv->dev->ds);
2068 + /* Disable all ports and interrupts */
2069 + bcm_sf2_sw_suspend(priv->dev->ds);
2070 bcm_sf2_mdio_unregister(priv);
2071
2072 return 0;
2073 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2074 index 0654e0c76bc2..640babf752ea 100644
2075 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2076 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2077 @@ -222,9 +222,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
2078 }
2079
2080 /* for single fragment packets use build_skb() */
2081 - if (buff->is_eop) {
2082 + if (buff->is_eop &&
2083 + buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
2084 skb = build_skb(page_address(buff->page),
2085 - buff->len + AQ_SKB_ALIGN);
2086 + AQ_CFG_RX_FRAME_MAX);
2087 if (unlikely(!skb)) {
2088 err = -ENOMEM;
2089 goto err_exit;
2090 @@ -244,18 +245,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
2091 buff->len - ETH_HLEN,
2092 SKB_TRUESIZE(buff->len - ETH_HLEN));
2093
2094 - for (i = 1U, next_ = buff->next,
2095 - buff_ = &self->buff_ring[next_]; true;
2096 - next_ = buff_->next,
2097 - buff_ = &self->buff_ring[next_], ++i) {
2098 - skb_add_rx_frag(skb, i, buff_->page, 0,
2099 - buff_->len,
2100 - SKB_TRUESIZE(buff->len -
2101 - ETH_HLEN));
2102 - buff_->is_cleaned = 1;
2103 -
2104 - if (buff_->is_eop)
2105 - break;
2106 + if (!buff->is_eop) {
2107 + for (i = 1U, next_ = buff->next,
2108 + buff_ = &self->buff_ring[next_];
2109 + true; next_ = buff_->next,
2110 + buff_ = &self->buff_ring[next_], ++i) {
2111 + skb_add_rx_frag(skb, i,
2112 + buff_->page, 0,
2113 + buff_->len,
2114 + SKB_TRUESIZE(buff->len -
2115 + ETH_HLEN));
2116 + buff_->is_cleaned = 1;
2117 +
2118 + if (buff_->is_eop)
2119 + break;
2120 + }
2121 }
2122 }
2123
2124 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
2125 index 0fff2432ab4c..6e7f9a470ea1 100644
2126 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
2127 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
2128 @@ -1001,14 +1001,22 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
2129 {
2130 u32 reg;
2131
2132 - /* Stop monitoring MPD interrupt */
2133 - intrl2_0_mask_set(priv, INTRL2_0_MPD);
2134 -
2135 /* Clear the MagicPacket detection logic */
2136 reg = umac_readl(priv, UMAC_MPD_CTRL);
2137 reg &= ~MPD_EN;
2138 umac_writel(priv, reg, UMAC_MPD_CTRL);
2139
2140 + reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
2141 + if (reg & INTRL2_0_MPD)
2142 + netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
2143 +
2144 + if (reg & INTRL2_0_BRCM_MATCH_TAG) {
2145 + reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
2146 + RXCHK_BRCM_TAG_MATCH_MASK;
2147 + netdev_info(priv->netdev,
2148 + "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
2149 + }
2150 +
2151 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
2152 }
2153
2154 @@ -1043,11 +1051,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
2155 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
2156 bcm_sysport_tx_reclaim_all(priv);
2157
2158 - if (priv->irq0_stat & INTRL2_0_MPD) {
2159 - netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
2160 - bcm_sysport_resume_from_wol(priv);
2161 - }
2162 -
2163 if (!priv->is_lite)
2164 goto out;
2165
2166 @@ -2248,9 +2251,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2167 /* UniMAC receive needs to be turned on */
2168 umac_enable_set(priv, CMD_RX_EN, 1);
2169
2170 - /* Enable the interrupt wake-up source */
2171 - intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2172 -
2173 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2174
2175 return 0;
2176 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2177 index 937db8019289..da6c73868fa0 100644
2178 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2179 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2180 @@ -1864,8 +1864,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2181 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2182 tx_pkts++;
2183 /* return full budget so NAPI will complete. */
2184 - if (unlikely(tx_pkts > bp->tx_wake_thresh))
2185 + if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2186 rx_pkts = budget;
2187 + raw_cons = NEXT_RAW_CMP(raw_cons);
2188 + break;
2189 + }
2190 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2191 if (likely(budget))
2192 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
2193 @@ -1893,7 +1896,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2194 }
2195 raw_cons = NEXT_RAW_CMP(raw_cons);
2196
2197 - if (rx_pkts == budget)
2198 + if (rx_pkts && rx_pkts == budget)
2199 break;
2200 }
2201
2202 @@ -2007,8 +2010,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
2203 while (1) {
2204 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2205
2206 - if (work_done >= budget)
2207 + if (work_done >= budget) {
2208 + if (!budget)
2209 + BNXT_CP_DB_REARM(cpr->cp_doorbell,
2210 + cpr->cp_raw_cons);
2211 break;
2212 + }
2213
2214 if (!bnxt_has_work(bp, cpr)) {
2215 if (napi_complete_done(napi, work_done))
2216 @@ -2957,10 +2964,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
2217 {
2218 struct pci_dev *pdev = bp->pdev;
2219
2220 - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2221 - bp->hwrm_cmd_resp_dma_addr);
2222 -
2223 - bp->hwrm_cmd_resp_addr = NULL;
2224 + if (bp->hwrm_cmd_resp_addr) {
2225 + dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2226 + bp->hwrm_cmd_resp_dma_addr);
2227 + bp->hwrm_cmd_resp_addr = NULL;
2228 + }
2229 if (bp->hwrm_dbg_resp_addr) {
2230 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2231 bp->hwrm_dbg_resp_addr,
2232 @@ -8210,6 +8218,7 @@ init_err_cleanup_tc:
2233 bnxt_clear_int_mode(bp);
2234
2235 init_err_pci_clean:
2236 + bnxt_free_hwrm_resources(bp);
2237 bnxt_cleanup_pci(bp);
2238
2239 init_err_free:
2240 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2241 index 6a185344b378..149d30f60459 100644
2242 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2243 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
2244 @@ -78,17 +78,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
2245 return 0;
2246 }
2247
2248 -static void bnxt_tc_parse_vlan(struct bnxt *bp,
2249 - struct bnxt_tc_actions *actions,
2250 - const struct tc_action *tc_act)
2251 +static int bnxt_tc_parse_vlan(struct bnxt *bp,
2252 + struct bnxt_tc_actions *actions,
2253 + const struct tc_action *tc_act)
2254 {
2255 - if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
2256 + switch (tcf_vlan_action(tc_act)) {
2257 + case TCA_VLAN_ACT_POP:
2258 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
2259 - } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
2260 + break;
2261 + case TCA_VLAN_ACT_PUSH:
2262 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
2263 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
2264 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
2265 + break;
2266 + default:
2267 + return -EOPNOTSUPP;
2268 }
2269 + return 0;
2270 }
2271
2272 static int bnxt_tc_parse_actions(struct bnxt *bp,
2273 @@ -122,7 +128,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
2274
2275 /* Push/pop VLAN */
2276 if (is_tcf_vlan(tc_act)) {
2277 - bnxt_tc_parse_vlan(bp, actions, tc_act);
2278 + rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
2279 + if (rc)
2280 + return rc;
2281 continue;
2282 }
2283 }
2284 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2285 index c1787be6a258..b4f92de1efbd 100644
2286 --- a/drivers/net/ethernet/cadence/macb_main.c
2287 +++ b/drivers/net/ethernet/cadence/macb_main.c
2288 @@ -3301,6 +3301,13 @@ static const struct macb_config at91sam9260_config = {
2289 .init = macb_init,
2290 };
2291
2292 +static const struct macb_config sama5d3macb_config = {
2293 + .caps = MACB_CAPS_SG_DISABLED
2294 + | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
2295 + .clk_init = macb_clk_init,
2296 + .init = macb_init,
2297 +};
2298 +
2299 static const struct macb_config pc302gem_config = {
2300 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2301 .dma_burst_length = 16,
2302 @@ -3368,6 +3375,7 @@ static const struct of_device_id macb_dt_ids[] = {
2303 { .compatible = "cdns,gem", .data = &pc302gem_config },
2304 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
2305 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
2306 + { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
2307 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
2308 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
2309 { .compatible = "cdns,emac", .data = &emac_config },
2310 diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
2311 index a051e582d541..79d03f8ee7b1 100644
2312 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c
2313 +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
2314 @@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
2315 if (cb->type == DESC_TYPE_SKB)
2316 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2317 ring_to_dma_dir(ring));
2318 - else
2319 + else if (cb->length)
2320 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2321 ring_to_dma_dir(ring));
2322 }
2323 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2324 index 07d6a9cf2c55..4faadc3ffe8c 100644
2325 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2326 +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2327 @@ -40,9 +40,9 @@
2328 #define SKB_TMP_LEN(SKB) \
2329 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
2330
2331 -static void fill_v2_desc(struct hnae_ring *ring, void *priv,
2332 - int size, dma_addr_t dma, int frag_end,
2333 - int buf_num, enum hns_desc_type type, int mtu)
2334 +static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
2335 + int send_sz, dma_addr_t dma, int frag_end,
2336 + int buf_num, enum hns_desc_type type, int mtu)
2337 {
2338 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
2339 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2340 @@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
2341 desc_cb->type = type;
2342
2343 desc->addr = cpu_to_le64(dma);
2344 - desc->tx.send_size = cpu_to_le16((u16)size);
2345 + desc->tx.send_size = cpu_to_le16((u16)send_sz);
2346
2347 /* config bd buffer end */
2348 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
2349 @@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
2350 ring_ptr_move_fw(ring, next_to_use);
2351 }
2352
2353 +static void fill_v2_desc(struct hnae_ring *ring, void *priv,
2354 + int size, dma_addr_t dma, int frag_end,
2355 + int buf_num, enum hns_desc_type type, int mtu)
2356 +{
2357 + fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
2358 + buf_num, type, mtu);
2359 +}
2360 +
2361 static const struct acpi_device_id hns_enet_acpi_match[] = {
2362 { "HISI00C1", 0 },
2363 { "HISI00C2", 0 },
2364 @@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
2365
2366 /* when the frag size is bigger than hardware, split this frag */
2367 for (k = 0; k < frag_buf_num; k++)
2368 - fill_v2_desc(ring, priv,
2369 - (k == frag_buf_num - 1) ?
2370 + fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
2371 + (k == frag_buf_num - 1) ?
2372 sizeoflast : BD_MAX_SEND_SIZE,
2373 - dma + BD_MAX_SEND_SIZE * k,
2374 - frag_end && (k == frag_buf_num - 1) ? 1 : 0,
2375 - buf_num,
2376 - (type == DESC_TYPE_SKB && !k) ?
2377 + dma + BD_MAX_SEND_SIZE * k,
2378 + frag_end && (k == frag_buf_num - 1) ? 1 : 0,
2379 + buf_num,
2380 + (type == DESC_TYPE_SKB && !k) ?
2381 DESC_TYPE_SKB : DESC_TYPE_PAGE,
2382 - mtu);
2383 + mtu);
2384 }
2385
2386 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
2387 diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
2388 index 529be74f609d..00e6f1d155a6 100644
2389 --- a/drivers/net/ethernet/marvell/mvpp2.c
2390 +++ b/drivers/net/ethernet/marvell/mvpp2.c
2391 @@ -33,6 +33,7 @@
2392 #include <linux/hrtimer.h>
2393 #include <linux/ktime.h>
2394 #include <linux/regmap.h>
2395 +#include <linux/if_vlan.h>
2396 #include <uapi/linux/ppp_defs.h>
2397 #include <net/ip.h>
2398 #include <net/ipv6.h>
2399 @@ -5101,7 +5102,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2400 }
2401
2402 /* Set Tx descriptors fields relevant for CSUM calculation */
2403 -static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
2404 +static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2405 int ip_hdr_len, int l4_proto)
2406 {
2407 u32 command;
2408 @@ -6065,14 +6066,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2409 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2410 int ip_hdr_len = 0;
2411 u8 l4_proto;
2412 + __be16 l3_proto = vlan_get_protocol(skb);
2413
2414 - if (skb->protocol == htons(ETH_P_IP)) {
2415 + if (l3_proto == htons(ETH_P_IP)) {
2416 struct iphdr *ip4h = ip_hdr(skb);
2417
2418 /* Calculate IPv4 checksum and L4 checksum */
2419 ip_hdr_len = ip4h->ihl;
2420 l4_proto = ip4h->protocol;
2421 - } else if (skb->protocol == htons(ETH_P_IPV6)) {
2422 + } else if (l3_proto == htons(ETH_P_IPV6)) {
2423 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2424
2425 /* Read l4_protocol from one of IPv6 extra headers */
2426 @@ -6084,7 +6086,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2427 }
2428
2429 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2430 - skb->protocol, ip_hdr_len, l4_proto);
2431 + l3_proto, ip_hdr_len, l4_proto);
2432 }
2433
2434 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
2435 @@ -6532,10 +6534,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
2436 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
2437 }
2438
2439 - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
2440 - if (cause_tx) {
2441 - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
2442 - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
2443 + if (port->has_tx_irqs) {
2444 + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
2445 + if (cause_tx) {
2446 + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
2447 + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
2448 + }
2449 }
2450
2451 /* Process RX packets */
2452 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2453 index e28f9dab9ceb..9e0be077df9c 100644
2454 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2455 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2456 @@ -864,6 +864,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
2457 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
2458 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
2459 }
2460 + } else {
2461 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
2462 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2463 }
2464
2465 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2466 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2467 index de72b66df3e5..1af9894abd95 100644
2468 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2469 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2470 @@ -1922,7 +1922,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2471 u32 max_guarantee = 0;
2472 int i;
2473
2474 - for (i = 0; i <= esw->total_vports; i++) {
2475 + for (i = 0; i < esw->total_vports; i++) {
2476 evport = &esw->vports[i];
2477 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2478 continue;
2479 @@ -1942,7 +1942,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2480 int err;
2481 int i;
2482
2483 - for (i = 0; i <= esw->total_vports; i++) {
2484 + for (i = 0; i < esw->total_vports; i++) {
2485 evport = &esw->vports[i];
2486 if (!evport->enabled)
2487 continue;
2488 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2489 index 56751990bcee..6df2c8b2ce6f 100644
2490 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2491 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
2492 @@ -2058,14 +2058,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2493 return true;
2494 }
2495
2496 -static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2497 +static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2498 {
2499 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2500 struct nfp_net *nn = r_vec->nfp_net;
2501 struct nfp_net_dp *dp = &nn->dp;
2502 + unsigned int budget = 512;
2503
2504 - while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
2505 + while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2506 continue;
2507 +
2508 + return budget;
2509 }
2510
2511 static void nfp_ctrl_poll(unsigned long arg)
2512 @@ -2077,9 +2080,13 @@ static void nfp_ctrl_poll(unsigned long arg)
2513 __nfp_ctrl_tx_queued(r_vec);
2514 spin_unlock_bh(&r_vec->lock);
2515
2516 - nfp_ctrl_rx(r_vec);
2517 -
2518 - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2519 + if (nfp_ctrl_rx(r_vec)) {
2520 + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2521 + } else {
2522 + tasklet_schedule(&r_vec->tasklet);
2523 + nn_dp_warn(&r_vec->nfp_net->dp,
2524 + "control message budget exceeded!\n");
2525 + }
2526 }
2527
2528 /* Setup and Configuration
2529 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2530 index 81312924df14..0c443ea98479 100644
2531 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2532 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2533 @@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
2534 int (*config_loopback) (struct qlcnic_adapter *, u8);
2535 int (*clear_loopback) (struct qlcnic_adapter *, u8);
2536 int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
2537 - void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
2538 + void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
2539 + u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
2540 int (*get_board_info) (struct qlcnic_adapter *);
2541 void (*set_mac_filter_count) (struct qlcnic_adapter *);
2542 void (*free_mac_list) (struct qlcnic_adapter *);
2543 @@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
2544 }
2545
2546 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
2547 - u64 *addr, u16 id)
2548 + u64 *addr, u16 vlan,
2549 + struct qlcnic_host_tx_ring *tx_ring)
2550 {
2551 - adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
2552 + adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
2553 }
2554
2555 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
2556 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2557 index 46b0372dd032..1fc84d8f891b 100644
2558 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2559 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2560 @@ -2134,7 +2134,8 @@ out:
2561 }
2562
2563 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
2564 - u16 vlan_id)
2565 + u16 vlan_id,
2566 + struct qlcnic_host_tx_ring *tx_ring)
2567 {
2568 u8 mac[ETH_ALEN];
2569 memcpy(&mac, addr, ETH_ALEN);
2570 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
2571 index b75a81246856..73fe2f64491d 100644
2572 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
2573 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
2574 @@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
2575 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
2576 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
2577 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
2578 -void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
2579 +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
2580 + u16 vlan, struct qlcnic_host_tx_ring *ring);
2581 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
2582 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
2583 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
2584 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
2585 index 4bb33af8e2b3..56a3bd9e37dc 100644
2586 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
2587 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
2588 @@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
2589 struct net_device *netdev);
2590 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
2591 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
2592 - u64 *uaddr, u16 vlan_id);
2593 + u64 *uaddr, u16 vlan_id,
2594 + struct qlcnic_host_tx_ring *tx_ring);
2595 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
2596 struct ethtool_coalesce *);
2597 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
2598 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
2599 index 84dd83031a1b..9647578cbe6a 100644
2600 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
2601 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
2602 @@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
2603 }
2604
2605 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
2606 - u16 vlan_id)
2607 + u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
2608 {
2609 struct cmd_desc_type0 *hwdesc;
2610 struct qlcnic_nic_req *req;
2611 struct qlcnic_mac_req *mac_req;
2612 struct qlcnic_vlan_req *vlan_req;
2613 - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2614 u32 producer;
2615 u64 word;
2616
2617 @@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
2618
2619 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
2620 struct cmd_desc_type0 *first_desc,
2621 - struct sk_buff *skb)
2622 + struct sk_buff *skb,
2623 + struct qlcnic_host_tx_ring *tx_ring)
2624 {
2625 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
2626 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
2627 @@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
2628 tmp_fil->vlan_id == vlan_id) {
2629 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
2630 qlcnic_change_filter(adapter, &src_addr,
2631 - vlan_id);
2632 + vlan_id, tx_ring);
2633 tmp_fil->ftime = jiffies;
2634 return;
2635 }
2636 @@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
2637 if (!fil)
2638 return;
2639
2640 - qlcnic_change_filter(adapter, &src_addr, vlan_id);
2641 + qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
2642 fil->ftime = jiffies;
2643 fil->vlan_id = vlan_id;
2644 memcpy(fil->faddr, &src_addr, ETH_ALEN);
2645 @@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2646 }
2647
2648 if (adapter->drv_mac_learn)
2649 - qlcnic_send_filter(adapter, first_desc, skb);
2650 + qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
2651
2652 tx_ring->tx_stats.tx_bytes += skb->len;
2653 tx_ring->tx_stats.xmit_called++;
2654 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2655 index 1a9a382bf1c4..bafbebeb0e00 100644
2656 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2657 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2658 @@ -2190,8 +2190,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2659 priv->plat->dma_cfg,
2660 tx_q->dma_tx_phy, chan);
2661
2662 - tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2663 - (DMA_TX_SIZE * sizeof(struct dma_desc));
2664 + tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2665 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2666 tx_q->tx_tail_addr,
2667 chan);
2668 @@ -2963,6 +2962,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2669
2670 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2671
2672 + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2673 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2674 queue);
2675
2676 @@ -3178,9 +3178,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2677
2678 if (priv->synopsys_id < DWMAC_CORE_4_00)
2679 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2680 - else
2681 + else {
2682 + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2683 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2684 queue);
2685 + }
2686
2687 return NETDEV_TX_OK;
2688
2689 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2690 index 195eb7e71473..d48cc32dc507 100644
2691 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2692 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2693 @@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
2694 * Description:
2695 * This function validates the number of Unicast address entries supported
2696 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
2697 - * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
2698 + * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
2699 * logic. This function validates a valid, supported configuration is
2700 * selected, and defaults to 1 Unicast address if an unsupported
2701 * configuration is selected.
2702 @@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
2703 int x = ucast_entries;
2704
2705 switch (x) {
2706 - case 1:
2707 - case 32:
2708 + case 1 ... 32:
2709 case 64:
2710 case 128:
2711 break;
2712 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2713 index aba16d81e9bb..2d90cffae9ff 100644
2714 --- a/drivers/net/hyperv/netvsc_drv.c
2715 +++ b/drivers/net/hyperv/netvsc_drv.c
2716 @@ -2110,17 +2110,15 @@ static int netvsc_remove(struct hv_device *dev)
2717
2718 cancel_delayed_work_sync(&ndev_ctx->dwork);
2719
2720 - rcu_read_lock();
2721 - nvdev = rcu_dereference(ndev_ctx->nvdev);
2722 -
2723 - if (nvdev)
2724 + rtnl_lock();
2725 + nvdev = rtnl_dereference(ndev_ctx->nvdev);
2726 + if (nvdev)
2727 cancel_work_sync(&nvdev->subchan_work);
2728
2729 /*
2730 * Call to the vsc driver to let it know that the device is being
2731 * removed. Also blocks mtu and channel changes.
2732 */
2733 - rtnl_lock();
2734 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2735 if (vf_netdev)
2736 netvsc_unregister_vf(vf_netdev);
2737 @@ -2132,7 +2130,6 @@ static int netvsc_remove(struct hv_device *dev)
2738 list_del(&ndev_ctx->list);
2739
2740 rtnl_unlock();
2741 - rcu_read_unlock();
2742
2743 hv_set_drvdata(dev, NULL);
2744
2745 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2746 index 83c591713837..817451a1efd6 100644
2747 --- a/drivers/net/team/team.c
2748 +++ b/drivers/net/team/team.c
2749 @@ -1165,6 +1165,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
2750 return -EBUSY;
2751 }
2752
2753 + if (dev == port_dev) {
2754 + netdev_err(dev, "Cannot enslave team device to itself\n");
2755 + return -EINVAL;
2756 + }
2757 +
2758 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
2759 vlan_uses_dev(dev)) {
2760 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
2761 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2762 index c5d4b35bb72a..11a25cef113f 100644
2763 --- a/drivers/net/usb/qmi_wwan.c
2764 +++ b/drivers/net/usb/qmi_wwan.c
2765 @@ -1233,6 +1233,7 @@ static const struct usb_device_id products[] = {
2766 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
2767 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
2768 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
2769 + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
2770 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
2771 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
2772 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
2773 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2774 index 05553d252446..b64b1ee56d2d 100644
2775 --- a/drivers/net/usb/smsc75xx.c
2776 +++ b/drivers/net/usb/smsc75xx.c
2777 @@ -1517,6 +1517,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
2778 {
2779 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
2780 if (pdata) {
2781 + cancel_work_sync(&pdata->set_multicast);
2782 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
2783 kfree(pdata);
2784 pdata = NULL;
2785 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
2786 index ffc87a956d97..53d1c08cef4d 100644
2787 --- a/drivers/pci/host/pci-hyperv.c
2788 +++ b/drivers/pci/host/pci-hyperv.c
2789 @@ -100,6 +100,9 @@ static enum pci_protocol_version_t pci_protocol_version;
2790
2791 #define STATUS_REVISION_MISMATCH 0xC0000059
2792
2793 +/* space for 32bit serial number as string */
2794 +#define SLOT_NAME_SIZE 11
2795 +
2796 /*
2797 * Message Types
2798 */
2799 @@ -516,6 +519,7 @@ struct hv_pci_dev {
2800 struct list_head list_entry;
2801 refcount_t refs;
2802 enum hv_pcichild_state state;
2803 + struct pci_slot *pci_slot;
2804 struct pci_function_description desc;
2805 bool reported_missing;
2806 struct hv_pcibus_device *hbus;
2807 @@ -1481,6 +1485,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
2808 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2809 }
2810
2811 +/*
2812 + * Assign entries in sysfs pci slot directory.
2813 + *
2814 + * Note that this function does not need to lock the children list
2815 + * because it is called from pci_devices_present_work which
2816 + * is serialized with hv_eject_device_work because they are on the
2817 + * same ordered workqueue. Therefore hbus->children list will not change
2818 + * even when pci_create_slot sleeps.
2819 + */
2820 +static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2821 +{
2822 + struct hv_pci_dev *hpdev;
2823 + char name[SLOT_NAME_SIZE];
2824 + int slot_nr;
2825 +
2826 + list_for_each_entry(hpdev, &hbus->children, list_entry) {
2827 + if (hpdev->pci_slot)
2828 + continue;
2829 +
2830 + slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2831 + snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2832 + hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
2833 + name, NULL);
2834 + if (!hpdev->pci_slot)
2835 + pr_warn("pci_create slot %s failed\n", name);
2836 + }
2837 +}
2838 +
2839 /**
2840 * create_root_hv_pci_bus() - Expose a new root PCI bus
2841 * @hbus: Root PCI bus, as understood by this driver
2842 @@ -1504,6 +1536,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2843 pci_lock_rescan_remove();
2844 pci_scan_child_bus(hbus->pci_bus);
2845 pci_bus_assign_resources(hbus->pci_bus);
2846 + hv_pci_assign_slots(hbus);
2847 pci_bus_add_devices(hbus->pci_bus);
2848 pci_unlock_rescan_remove();
2849 hbus->state = hv_pcibus_installed;
2850 @@ -1787,6 +1820,7 @@ static void pci_devices_present_work(struct work_struct *work)
2851 */
2852 pci_lock_rescan_remove();
2853 pci_scan_child_bus(hbus->pci_bus);
2854 + hv_pci_assign_slots(hbus);
2855 pci_unlock_rescan_remove();
2856 break;
2857
2858 @@ -1895,6 +1929,9 @@ static void hv_eject_device_work(struct work_struct *work)
2859 list_del(&hpdev->list_entry);
2860 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
2861
2862 + if (hpdev->pci_slot)
2863 + pci_destroy_slot(hpdev->pci_slot);
2864 +
2865 memset(&ctxt, 0, sizeof(ctxt));
2866 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2867 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2868 diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
2869 index d14fc2e67f93..5e06917b4cef 100644
2870 --- a/drivers/perf/arm_pmu.c
2871 +++ b/drivers/perf/arm_pmu.c
2872 @@ -483,7 +483,13 @@ static int armpmu_filter_match(struct perf_event *event)
2873 {
2874 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
2875 unsigned int cpu = smp_processor_id();
2876 - return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
2877 + int ret;
2878 +
2879 + ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
2880 + if (ret && armpmu->filter_match)
2881 + return armpmu->filter_match(event);
2882 +
2883 + return ret;
2884 }
2885
2886 static ssize_t armpmu_cpumask_show(struct device *dev,
2887 diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
2888 index db9cca4a83ff..22558bf29424 100644
2889 --- a/drivers/pinctrl/pinctrl-mcp23s08.c
2890 +++ b/drivers/pinctrl/pinctrl-mcp23s08.c
2891 @@ -643,6 +643,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
2892 return err;
2893 }
2894
2895 + return 0;
2896 +}
2897 +
2898 +static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
2899 +{
2900 + struct gpio_chip *chip = &mcp->chip;
2901 + int err;
2902 +
2903 err = gpiochip_irqchip_add_nested(chip,
2904 &mcp23s08_irq_chip,
2905 0,
2906 @@ -907,7 +915,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
2907 }
2908
2909 if (mcp->irq && mcp->irq_controller) {
2910 - ret = mcp23s08_irq_setup(mcp);
2911 + ret = mcp23s08_irqchip_setup(mcp);
2912 if (ret)
2913 goto fail;
2914 }
2915 @@ -932,6 +940,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
2916 goto fail;
2917 }
2918
2919 + if (mcp->irq)
2920 + ret = mcp23s08_irq_setup(mcp);
2921 +
2922 fail:
2923 if (ret < 0)
2924 dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
2925 diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
2926 index 72ce6ad95767..1419eaea03d8 100644
2927 --- a/drivers/s390/cio/vfio_ccw_cp.c
2928 +++ b/drivers/s390/cio/vfio_ccw_cp.c
2929 @@ -172,7 +172,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
2930
2931 for (i = 0; i < pat->pat_nr; i++, pa++)
2932 for (j = 0; j < pa->pa_nr; j++)
2933 - if (pa->pa_iova_pfn[i] == iova_pfn)
2934 + if (pa->pa_iova_pfn[j] == iova_pfn)
2935 return true;
2936
2937 return false;
2938 diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
2939 index aba58d3848a6..511a31b359c7 100644
2940 --- a/drivers/scsi/qla2xxx/qla_target.h
2941 +++ b/drivers/scsi/qla2xxx/qla_target.h
2942 @@ -374,8 +374,8 @@ struct atio_from_isp {
2943 static inline int fcpcmd_is_corrupted(struct atio *atio)
2944 {
2945 if (atio->entry_type == ATIO_TYPE7 &&
2946 - (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
2947 - FCP_CMD_LENGTH_MIN))
2948 + ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
2949 + FCP_CMD_LENGTH_MIN))
2950 return 1;
2951 else
2952 return 0;
2953 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2954 index 52fa52c20be0..d2cafdae8317 100644
2955 --- a/drivers/target/iscsi/iscsi_target.c
2956 +++ b/drivers/target/iscsi/iscsi_target.c
2957 @@ -1421,7 +1421,8 @@ static void iscsit_do_crypto_hash_buf(
2958
2959 sg_init_table(sg, ARRAY_SIZE(sg));
2960 sg_set_buf(sg, buf, payload_length);
2961 - sg_set_buf(sg + 1, pad_bytes, padding);
2962 + if (padding)
2963 + sg_set_buf(sg + 1, pad_bytes, padding);
2964
2965 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
2966
2967 @@ -3942,10 +3943,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
2968 static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
2969 {
2970 int ret;
2971 - u8 buffer[ISCSI_HDR_LEN], opcode;
2972 + u8 *buffer, opcode;
2973 u32 checksum = 0, digest = 0;
2974 struct kvec iov;
2975
2976 + buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
2977 + if (!buffer)
2978 + return;
2979 +
2980 while (!kthread_should_stop()) {
2981 /*
2982 * Ensure that both TX and RX per connection kthreads
2983 @@ -3953,7 +3958,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
2984 */
2985 iscsit_thread_check_cpumask(conn, current, 0);
2986
2987 - memset(buffer, 0, ISCSI_HDR_LEN);
2988 memset(&iov, 0, sizeof(struct kvec));
2989
2990 iov.iov_base = buffer;
2991 @@ -3962,7 +3966,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
2992 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
2993 if (ret != ISCSI_HDR_LEN) {
2994 iscsit_rx_thread_wait_for_tcp(conn);
2995 - return;
2996 + break;
2997 }
2998
2999 if (conn->conn_ops->HeaderDigest) {
3000 @@ -3972,7 +3976,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3001 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3002 if (ret != ISCSI_CRC_LEN) {
3003 iscsit_rx_thread_wait_for_tcp(conn);
3004 - return;
3005 + break;
3006 }
3007
3008 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
3009 @@ -3996,7 +4000,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3010 }
3011
3012 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3013 - return;
3014 + break;
3015
3016 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3017
3018 @@ -4007,13 +4011,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3019 " while in Discovery Session, rejecting.\n", opcode);
3020 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3021 buffer);
3022 - return;
3023 + break;
3024 }
3025
3026 ret = iscsi_target_rx_opcode(conn, buffer);
3027 if (ret < 0)
3028 - return;
3029 + break;
3030 }
3031 +
3032 + kfree(buffer);
3033 }
3034
3035 int iscsi_target_rx_thread(void *arg)
3036 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3037 index c01d1f3a1c7d..d2a9767a8e9c 100644
3038 --- a/drivers/usb/host/xhci-hub.c
3039 +++ b/drivers/usb/host/xhci-hub.c
3040 @@ -1236,17 +1236,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
3041 temp = readl(port_array[wIndex]);
3042 break;
3043 }
3044 -
3045 - /* Software should not attempt to set
3046 - * port link state above '3' (U3) and the port
3047 - * must be enabled.
3048 - */
3049 - if ((temp & PORT_PE) == 0 ||
3050 - (link_state > USB_SS_PORT_LS_U3)) {
3051 - xhci_warn(xhci, "Cannot set link state.\n");
3052 + /* Port must be enabled */
3053 + if (!(temp & PORT_PE)) {
3054 + retval = -ENODEV;
3055 + break;
3056 + }
3057 + /* Can't set port link state above '3' (U3) */
3058 + if (link_state > USB_SS_PORT_LS_U3) {
3059 + xhci_warn(xhci, "Cannot set port %d link state %d\n",
3060 + wIndex, link_state);
3061 goto error;
3062 }
3063 -
3064 if (link_state == USB_SS_PORT_LS_U3) {
3065 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
3066 wIndex + 1);
3067 diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
3068 index 8235b285dbb2..d09bab3bf224 100644
3069 --- a/drivers/video/fbdev/aty/atyfb.h
3070 +++ b/drivers/video/fbdev/aty/atyfb.h
3071 @@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
3072 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
3073 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
3074
3075 +extern const u8 aty_postdividers[8];
3076 +
3077
3078 /*
3079 * Hardware cursor support
3080 @@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
3081
3082 extern void aty_reset_engine(const struct atyfb_par *par);
3083 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
3084 -extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
3085
3086 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
3087 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
3088 diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
3089 index 3ec72f19114b..d4b938276d23 100644
3090 --- a/drivers/video/fbdev/aty/atyfb_base.c
3091 +++ b/drivers/video/fbdev/aty/atyfb_base.c
3092 @@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
3093 /*
3094 * PLL Reference Divider M:
3095 */
3096 - M = pll_regs[2];
3097 + M = pll_regs[PLL_REF_DIV];
3098
3099 /*
3100 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
3101 */
3102 - N = pll_regs[7 + (clock_cntl & 3)];
3103 + N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
3104
3105 /*
3106 * PLL Post Divider P (Dependent on CLOCK_CNTL):
3107 */
3108 - P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
3109 + P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
3110 + ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
3111
3112 /*
3113 * PLL Divider Q:
3114 diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
3115 index 7d3bd723d3d5..d55f4bacb41c 100644
3116 --- a/drivers/video/fbdev/aty/mach64_ct.c
3117 +++ b/drivers/video/fbdev/aty/mach64_ct.c
3118 @@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
3119 */
3120
3121 #define Maximum_DSP_PRECISION 7
3122 -static u8 postdividers[] = {1,2,4,8,3};
3123 +const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
3124
3125 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
3126 {
3127 @@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
3128 pll->vclk_post_div += (q < 64*8);
3129 pll->vclk_post_div += (q < 32*8);
3130 }
3131 - pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
3132 + pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
3133 // pll->vclk_post_div <<= 6;
3134 pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
3135 pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
3136 @@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
3137 u8 mclk_fb_div, pll_ext_cntl;
3138 pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
3139 pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
3140 - pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
3141 + pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
3142 mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
3143 if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
3144 mclk_fb_div <<= 1;
3145 @@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
3146 xpost_div += (q < 64*8);
3147 xpost_div += (q < 32*8);
3148 }
3149 - pll->ct.xclk_post_div_real = postdividers[xpost_div];
3150 + pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
3151 pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
3152
3153 #ifdef CONFIG_PPC
3154 @@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
3155 mpost_div += (q < 64*8);
3156 mpost_div += (q < 32*8);
3157 }
3158 - sclk_post_div_real = postdividers[mpost_div];
3159 + sclk_post_div_real = aty_postdividers[mpost_div];
3160 pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
3161 pll->ct.spll_cntl2 = mpost_div << 4;
3162 #ifdef DEBUG
3163 diff --git a/fs/dcache.c b/fs/dcache.c
3164 index c1a7c174a905..28b2e770bb69 100644
3165 --- a/fs/dcache.c
3166 +++ b/fs/dcache.c
3167 @@ -270,11 +270,25 @@ static void __d_free(struct rcu_head *head)
3168 kmem_cache_free(dentry_cache, dentry);
3169 }
3170
3171 +static void __d_free_external_name(struct rcu_head *head)
3172 +{
3173 + struct external_name *name = container_of(head, struct external_name,
3174 + u.head);
3175 +
3176 + mod_node_page_state(page_pgdat(virt_to_page(name)),
3177 + NR_INDIRECTLY_RECLAIMABLE_BYTES,
3178 + -ksize(name));
3179 +
3180 + kfree(name);
3181 +}
3182 +
3183 static void __d_free_external(struct rcu_head *head)
3184 {
3185 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
3186 - kfree(external_name(dentry));
3187 - kmem_cache_free(dentry_cache, dentry);
3188 +
3189 + __d_free_external_name(&external_name(dentry)->u.head);
3190 +
3191 + kmem_cache_free(dentry_cache, dentry);
3192 }
3193
3194 static inline int dname_external(const struct dentry *dentry)
3195 @@ -305,7 +319,7 @@ void release_dentry_name_snapshot(struct name_snapshot *name)
3196 struct external_name *p;
3197 p = container_of(name->name, struct external_name, name[0]);
3198 if (unlikely(atomic_dec_and_test(&p->u.count)))
3199 - kfree_rcu(p, u.head);
3200 + call_rcu(&p->u.head, __d_free_external_name);
3201 }
3202 }
3203 EXPORT_SYMBOL(release_dentry_name_snapshot);
3204 @@ -1605,6 +1619,7 @@ EXPORT_SYMBOL(d_invalidate);
3205
3206 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
3207 {
3208 + struct external_name *ext = NULL;
3209 struct dentry *dentry;
3210 char *dname;
3211 int err;
3212 @@ -1625,14 +1640,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
3213 dname = dentry->d_iname;
3214 } else if (name->len > DNAME_INLINE_LEN-1) {
3215 size_t size = offsetof(struct external_name, name[1]);
3216 - struct external_name *p = kmalloc(size + name->len,
3217 - GFP_KERNEL_ACCOUNT);
3218 - if (!p) {
3219 + ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
3220 + if (!ext) {
3221 kmem_cache_free(dentry_cache, dentry);
3222 return NULL;
3223 }
3224 - atomic_set(&p->u.count, 1);
3225 - dname = p->name;
3226 + atomic_set(&ext->u.count, 1);
3227 + dname = ext->name;
3228 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
3229 kasan_unpoison_shadow(dname,
3230 round_up(name->len + 1, sizeof(unsigned long)));
3231 @@ -1675,6 +1689,12 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
3232 }
3233 }
3234
3235 + if (unlikely(ext)) {
3236 + pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
3237 + mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
3238 + ksize(ext));
3239 + }
3240 +
3241 this_cpu_inc(nr_dentry);
3242
3243 return dentry;
3244 @@ -2769,7 +2789,7 @@ static void copy_name(struct dentry *dentry, struct dentry *target)
3245 dentry->d_name.hash_len = target->d_name.hash_len;
3246 }
3247 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
3248 - kfree_rcu(old_name, u.head);
3249 + call_rcu(&old_name->u.head, __d_free_external_name);
3250 }
3251
3252 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
3253 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
3254 index 4e8f77504a57..e7905d9353e8 100644
3255 --- a/include/linux/cgroup-defs.h
3256 +++ b/include/linux/cgroup-defs.h
3257 @@ -353,6 +353,7 @@ struct cgroup {
3258 * specific task are charged to the dom_cgrp.
3259 */
3260 struct cgroup *dom_cgrp;
3261 + struct cgroup *old_dom_cgrp; /* used while enabling threaded */
3262
3263 /*
3264 * list of pidlists, up to two for each namespace (one for procs, one
3265 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
3266 index f0938257ee6d..f679f5268467 100644
3267 --- a/include/linux/mmzone.h
3268 +++ b/include/linux/mmzone.h
3269 @@ -180,6 +180,7 @@ enum node_stat_item {
3270 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
3271 NR_DIRTIED, /* page dirtyings since bootup */
3272 NR_WRITTEN, /* page writings since bootup */
3273 + NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
3274 NR_VM_NODE_STAT_ITEMS
3275 };
3276
3277 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3278 index 2ea7ee1fb495..a516dbe5869f 100644
3279 --- a/include/linux/netdevice.h
3280 +++ b/include/linux/netdevice.h
3281 @@ -2307,6 +2307,13 @@ struct netdev_notifier_info {
3282 struct net_device *dev;
3283 };
3284
3285 +struct netdev_notifier_info_ext {
3286 + struct netdev_notifier_info info; /* must be first */
3287 + union {
3288 + u32 mtu;
3289 + } ext;
3290 +};
3291 +
3292 struct netdev_notifier_change_info {
3293 struct netdev_notifier_info info; /* must be first */
3294 unsigned int flags_changed;
3295 diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
3296 index af0f44effd44..251bc43fdcfc 100644
3297 --- a/include/linux/perf/arm_pmu.h
3298 +++ b/include/linux/perf/arm_pmu.h
3299 @@ -110,6 +110,7 @@ struct arm_pmu {
3300 void (*stop)(struct arm_pmu *);
3301 void (*reset)(void *);
3302 int (*map_event)(struct perf_event *event);
3303 + int (*filter_match)(struct perf_event *event);
3304 int num_events;
3305 u64 max_period;
3306 bool secure_access; /* 32-bit ARM only */
3307 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
3308 index 9397628a1967..cb462f9ab7dd 100644
3309 --- a/include/linux/virtio_net.h
3310 +++ b/include/linux/virtio_net.h
3311 @@ -5,6 +5,24 @@
3312 #include <linux/if_vlan.h>
3313 #include <uapi/linux/virtio_net.h>
3314
3315 +static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
3316 + const struct virtio_net_hdr *hdr)
3317 +{
3318 + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
3319 + case VIRTIO_NET_HDR_GSO_TCPV4:
3320 + case VIRTIO_NET_HDR_GSO_UDP:
3321 + skb->protocol = cpu_to_be16(ETH_P_IP);
3322 + break;
3323 + case VIRTIO_NET_HDR_GSO_TCPV6:
3324 + skb->protocol = cpu_to_be16(ETH_P_IPV6);
3325 + break;
3326 + default:
3327 + return -EINVAL;
3328 + }
3329 +
3330 + return 0;
3331 +}
3332 +
3333 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
3334 const struct virtio_net_hdr *hdr,
3335 bool little_endian)
3336 diff --git a/include/net/bonding.h b/include/net/bonding.h
3337 index 73799da57400..04008209506a 100644
3338 --- a/include/net/bonding.h
3339 +++ b/include/net/bonding.h
3340 @@ -139,12 +139,6 @@ struct bond_parm_tbl {
3341 int mode;
3342 };
3343
3344 -struct netdev_notify_work {
3345 - struct delayed_work work;
3346 - struct net_device *dev;
3347 - struct netdev_bonding_info bonding_info;
3348 -};
3349 -
3350 struct slave {
3351 struct net_device *dev; /* first - useful for panic debug */
3352 struct bonding *bond; /* our master */
3353 @@ -172,6 +166,7 @@ struct slave {
3354 #ifdef CONFIG_NET_POLL_CONTROLLER
3355 struct netpoll *np;
3356 #endif
3357 + struct delayed_work notify_work;
3358 struct kobject kobj;
3359 struct rtnl_link_stats64 slave_stats;
3360 };
3361 diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
3362 index 8e51b4a69088..16a1492a5bd3 100644
3363 --- a/include/net/inet_sock.h
3364 +++ b/include/net/inet_sock.h
3365 @@ -129,12 +129,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
3366 return sk->sk_bound_dev_if;
3367 }
3368
3369 -static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
3370 -{
3371 - return rcu_dereference_check(ireq->ireq_opt,
3372 - refcount_read(&ireq->req.rsk_refcnt) > 0);
3373 -}
3374 -
3375 struct inet_cork {
3376 unsigned int flags;
3377 __be32 addr;
3378 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
3379 index 5c5d344c0629..32df52869a14 100644
3380 --- a/include/net/ip_fib.h
3381 +++ b/include/net/ip_fib.h
3382 @@ -372,6 +372,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
3383 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
3384 int fib_sync_down_addr(struct net_device *dev, __be32 local);
3385 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
3386 +void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
3387
3388 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3389 int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
3390 diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
3391 index d8afd8a5bd76..926ea701cdc4 100644
3392 --- a/include/sound/hdaudio.h
3393 +++ b/include/sound/hdaudio.h
3394 @@ -357,6 +357,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
3395 void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
3396 void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
3397 void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
3398 +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
3399
3400 void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
3401 int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
3402 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
3403 index 76c0ef2cb509..3fc11b8851ac 100644
3404 --- a/kernel/cgroup/cgroup.c
3405 +++ b/kernel/cgroup/cgroup.c
3406 @@ -2780,11 +2780,12 @@ restart:
3407 }
3408
3409 /**
3410 - * cgroup_save_control - save control masks of a subtree
3411 + * cgroup_save_control - save control masks and dom_cgrp of a subtree
3412 * @cgrp: root of the target subtree
3413 *
3414 - * Save ->subtree_control and ->subtree_ss_mask to the respective old_
3415 - * prefixed fields for @cgrp's subtree including @cgrp itself.
3416 + * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
3417 + * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3418 + * itself.
3419 */
3420 static void cgroup_save_control(struct cgroup *cgrp)
3421 {
3422 @@ -2794,6 +2795,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
3423 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3424 dsct->old_subtree_control = dsct->subtree_control;
3425 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
3426 + dsct->old_dom_cgrp = dsct->dom_cgrp;
3427 }
3428 }
3429
3430 @@ -2819,11 +2821,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
3431 }
3432
3433 /**
3434 - * cgroup_restore_control - restore control masks of a subtree
3435 + * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
3436 * @cgrp: root of the target subtree
3437 *
3438 - * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
3439 - * prefixed fields for @cgrp's subtree including @cgrp itself.
3440 + * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
3441 + * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3442 + * itself.
3443 */
3444 static void cgroup_restore_control(struct cgroup *cgrp)
3445 {
3446 @@ -2833,6 +2836,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
3447 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3448 dsct->subtree_control = dsct->old_subtree_control;
3449 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
3450 + dsct->dom_cgrp = dsct->old_dom_cgrp;
3451 }
3452 }
3453
3454 @@ -3140,6 +3144,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
3455 {
3456 struct cgroup *parent = cgroup_parent(cgrp);
3457 struct cgroup *dom_cgrp = parent->dom_cgrp;
3458 + struct cgroup *dsct;
3459 + struct cgroup_subsys_state *d_css;
3460 int ret;
3461
3462 lockdep_assert_held(&cgroup_mutex);
3463 @@ -3169,12 +3175,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
3464 */
3465 cgroup_save_control(cgrp);
3466
3467 - cgrp->dom_cgrp = dom_cgrp;
3468 + cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
3469 + if (dsct == cgrp || cgroup_is_threaded(dsct))
3470 + dsct->dom_cgrp = dom_cgrp;
3471 +
3472 ret = cgroup_apply_control(cgrp);
3473 if (!ret)
3474 parent->nr_threaded_children++;
3475 - else
3476 - cgrp->dom_cgrp = cgrp;
3477
3478 cgroup_finalize_control(cgrp, ret);
3479 return ret;
3480 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3481 index 174612f8339c..39c1fedcfdb4 100644
3482 --- a/mm/huge_memory.c
3483 +++ b/mm/huge_memory.c
3484 @@ -2843,9 +2843,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3485 if (!(pvmw->pmd && !pvmw->pte))
3486 return;
3487
3488 - mmu_notifier_invalidate_range_start(mm, address,
3489 - address + HPAGE_PMD_SIZE);
3490 -
3491 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3492 pmdval = *pvmw->pmd;
3493 pmdp_invalidate(vma, address, pvmw->pmd);
3494 @@ -2858,9 +2855,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3495 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3496 page_remove_rmap(page, true);
3497 put_page(page);
3498 -
3499 - mmu_notifier_invalidate_range_end(mm, address,
3500 - address + HPAGE_PMD_SIZE);
3501 }
3502
3503 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3504 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3505 index 59ccf455fcbd..a604b5da6755 100644
3506 --- a/mm/page_alloc.c
3507 +++ b/mm/page_alloc.c
3508 @@ -4557,6 +4557,13 @@ long si_mem_available(void)
3509 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
3510 wmark_low);
3511
3512 + /*
3513 + * Part of the kernel memory, which can be released under memory
3514 + * pressure.
3515 + */
3516 + available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
3517 + PAGE_SHIFT;
3518 +
3519 if (available < 0)
3520 available = 0;
3521 return available;
3522 diff --git a/mm/percpu.c b/mm/percpu.c
3523 index 5fa5e79b69f0..3074148b7e0d 100644
3524 --- a/mm/percpu.c
3525 +++ b/mm/percpu.c
3526 @@ -1208,6 +1208,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
3527 {
3528 if (!chunk)
3529 return;
3530 + pcpu_mem_free(chunk->md_blocks);
3531 pcpu_mem_free(chunk->bound_map);
3532 pcpu_mem_free(chunk->alloc_map);
3533 pcpu_mem_free(chunk);
3534 diff --git a/mm/util.c b/mm/util.c
3535 index 34e57fae959d..547e04b5cfff 100644
3536 --- a/mm/util.c
3537 +++ b/mm/util.c
3538 @@ -635,6 +635,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
3539 */
3540 free += global_node_page_state(NR_SLAB_RECLAIMABLE);
3541
3542 + /*
3543 + * Part of the kernel memory, which can be released
3544 + * under memory pressure.
3545 + */
3546 + free += global_node_page_state(
3547 + NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
3548 +
3549 /*
3550 * Leave reserved pages. The pages are not for anonymous pages.
3551 */
3552 diff --git a/mm/vmstat.c b/mm/vmstat.c
3553 index 2bdc962b2dfe..527ae727d547 100644
3554 --- a/mm/vmstat.c
3555 +++ b/mm/vmstat.c
3556 @@ -1090,6 +1090,7 @@ const char * const vmstat_text[] = {
3557 "nr_vmscan_immediate_reclaim",
3558 "nr_dirtied",
3559 "nr_written",
3560 + "", /* nr_indirectly_reclaimable */
3561
3562 /* enum writeback_stat_item counters */
3563 "nr_dirty_threshold",
3564 @@ -1214,7 +1215,6 @@ const char * const vmstat_text[] = {
3565 #ifdef CONFIG_DEBUG_VM_VMACACHE
3566 "vmacache_find_calls",
3567 "vmacache_find_hits",
3568 - "vmacache_full_flushes",
3569 #endif
3570 #ifdef CONFIG_SWAP
3571 "swap_ra",
3572 @@ -1673,6 +1673,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
3573 unsigned long *l = arg;
3574 unsigned long off = l - (unsigned long *)m->private;
3575
3576 + /* Skip hidden vmstat items. */
3577 + if (*vmstat_text[off] == '\0')
3578 + return 0;
3579 +
3580 seq_puts(m, vmstat_text[off]);
3581 seq_put_decimal_ull(m, " ", *l);
3582 seq_putc(m, '\n');
3583 diff --git a/net/core/dev.c b/net/core/dev.c
3584 index 85f4a1047707..e8a66ad6d07c 100644
3585 --- a/net/core/dev.c
3586 +++ b/net/core/dev.c
3587 @@ -1688,6 +1688,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
3588 }
3589 EXPORT_SYMBOL(call_netdevice_notifiers);
3590
3591 +/**
3592 + * call_netdevice_notifiers_mtu - call all network notifier blocks
3593 + * @val: value passed unmodified to notifier function
3594 + * @dev: net_device pointer passed unmodified to notifier function
3595 + * @arg: additional u32 argument passed to the notifier function
3596 + *
3597 + * Call all network notifier blocks. Parameters and return value
3598 + * are as for raw_notifier_call_chain().
3599 + */
3600 +static int call_netdevice_notifiers_mtu(unsigned long val,
3601 + struct net_device *dev, u32 arg)
3602 +{
3603 + struct netdev_notifier_info_ext info = {
3604 + .info.dev = dev,
3605 + .ext.mtu = arg,
3606 + };
3607 +
3608 + BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
3609 +
3610 + return call_netdevice_notifiers_info(val, dev, &info.info);
3611 +}
3612 +
3613 #ifdef CONFIG_NET_INGRESS
3614 static struct static_key ingress_needed __read_mostly;
3615
3616 @@ -6891,14 +6913,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
3617 err = __dev_set_mtu(dev, new_mtu);
3618
3619 if (!err) {
3620 - err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3621 + err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
3622 + orig_mtu);
3623 err = notifier_to_errno(err);
3624 if (err) {
3625 /* setting mtu back and notifying everyone again,
3626 * so that they have a chance to revert changes.
3627 */
3628 __dev_set_mtu(dev, orig_mtu);
3629 - call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3630 + call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
3631 + new_mtu);
3632 }
3633 }
3634 return err;
3635 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
3636 index 490eab16b04b..0ae5ac5e090f 100644
3637 --- a/net/core/ethtool.c
3638 +++ b/net/core/ethtool.c
3639 @@ -2572,6 +2572,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
3640 case ETHTOOL_GPHYSTATS:
3641 case ETHTOOL_GTSO:
3642 case ETHTOOL_GPERMADDR:
3643 + case ETHTOOL_GUFO:
3644 case ETHTOOL_GGSO:
3645 case ETHTOOL_GGRO:
3646 case ETHTOOL_GFLAGS:
3647 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3648 index efe396cc77b5..760364526dc1 100644
3649 --- a/net/core/rtnetlink.c
3650 +++ b/net/core/rtnetlink.c
3651 @@ -2430,6 +2430,12 @@ struct net_device *rtnl_create_link(struct net *net,
3652 else if (ops->get_num_rx_queues)
3653 num_rx_queues = ops->get_num_rx_queues();
3654
3655 + if (num_tx_queues < 1 || num_tx_queues > 4096)
3656 + return ERR_PTR(-EINVAL);
3657 +
3658 + if (num_rx_queues < 1 || num_rx_queues > 4096)
3659 + return ERR_PTR(-EINVAL);
3660 +
3661 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
3662 ops->setup, num_tx_queues, num_rx_queues);
3663 if (!dev)
3664 @@ -3292,16 +3298,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3665 int err = 0;
3666 int fidx = 0;
3667
3668 - err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3669 - IFLA_MAX, ifla_policy, NULL);
3670 - if (err < 0) {
3671 - return -EINVAL;
3672 - } else if (err == 0) {
3673 - if (tb[IFLA_MASTER])
3674 - br_idx = nla_get_u32(tb[IFLA_MASTER]);
3675 - }
3676 + /* A hack to preserve kernel<->userspace interface.
3677 + * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3678 + * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3679 + * So, check for ndmsg with an optional u32 attribute (not used here).
3680 + * Fortunately these sizes don't conflict with the size of ifinfomsg
3681 + * with an optional attribute.
3682 + */
3683 + if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
3684 + (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
3685 + nla_attr_size(sizeof(u32)))) {
3686 + err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3687 + IFLA_MAX, ifla_policy, NULL);
3688 + if (err < 0) {
3689 + return -EINVAL;
3690 + } else if (err == 0) {
3691 + if (tb[IFLA_MASTER])
3692 + br_idx = nla_get_u32(tb[IFLA_MASTER]);
3693 + }
3694
3695 - brport_idx = ifm->ifi_index;
3696 + brport_idx = ifm->ifi_index;
3697 + }
3698
3699 if (br_idx) {
3700 br_dev = __dev_get_by_index(net, br_idx);
3701 diff --git a/net/dccp/input.c b/net/dccp/input.c
3702 index fa6be9750bb4..849f399aec21 100644
3703 --- a/net/dccp/input.c
3704 +++ b/net/dccp/input.c
3705 @@ -605,11 +605,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
3706 if (sk->sk_state == DCCP_LISTEN) {
3707 if (dh->dccph_type == DCCP_PKT_REQUEST) {
3708 /* It is possible that we process SYN packets from backlog,
3709 - * so we need to make sure to disable BH right there.
3710 + * so we need to make sure to disable BH and RCU right there.
3711 */
3712 + rcu_read_lock();
3713 local_bh_disable();
3714 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
3715 local_bh_enable();
3716 + rcu_read_unlock();
3717 if (!acceptable)
3718 return 1;
3719 consume_skb(skb);
3720 diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
3721 index b08feb219b44..8e08cea6f178 100644
3722 --- a/net/dccp/ipv4.c
3723 +++ b/net/dccp/ipv4.c
3724 @@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
3725
3726 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
3727 ireq->ir_rmt_addr);
3728 + rcu_read_lock();
3729 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
3730 ireq->ir_rmt_addr,
3731 - ireq_opt_deref(ireq));
3732 + rcu_dereference(ireq->ireq_opt));
3733 + rcu_read_unlock();
3734 err = net_xmit_eval(err);
3735 }
3736
3737 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
3738 index 5bbdd05d0cd3..1b3f860f7dcd 100644
3739 --- a/net/ipv4/fib_frontend.c
3740 +++ b/net/ipv4/fib_frontend.c
3741 @@ -1185,7 +1185,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
3742 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
3743 {
3744 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3745 - struct netdev_notifier_changeupper_info *info;
3746 + struct netdev_notifier_changeupper_info *upper_info = ptr;
3747 + struct netdev_notifier_info_ext *info_ext = ptr;
3748 struct in_device *in_dev;
3749 struct net *net = dev_net(dev);
3750 unsigned int flags;
3751 @@ -1220,16 +1221,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
3752 fib_sync_up(dev, RTNH_F_LINKDOWN);
3753 else
3754 fib_sync_down_dev(dev, event, false);
3755 - /* fall through */
3756 + rt_cache_flush(net);
3757 + break;
3758 case NETDEV_CHANGEMTU:
3759 + fib_sync_mtu(dev, info_ext->ext.mtu);
3760 rt_cache_flush(net);
3761 break;
3762 case NETDEV_CHANGEUPPER:
3763 - info = ptr;
3764 + upper_info = ptr;
3765 /* flush all routes if dev is linked to or unlinked from
3766 * an L3 master device (e.g., VRF)
3767 */
3768 - if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3769 + if (upper_info->upper_dev &&
3770 + netif_is_l3_master(upper_info->upper_dev))
3771 fib_disable_ip(dev, NETDEV_DOWN, true);
3772 break;
3773 }
3774 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3775 index b557af72cde9..e76b8a7bb891 100644
3776 --- a/net/ipv4/fib_semantics.c
3777 +++ b/net/ipv4/fib_semantics.c
3778 @@ -1520,6 +1520,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
3779 return NOTIFY_DONE;
3780 }
3781
3782 +/* Update the PMTU of exceptions when:
3783 + * - the new MTU of the first hop becomes smaller than the PMTU
3784 + * - the old MTU was the same as the PMTU, and it limited discovery of
3785 + * larger MTUs on the path. With that limit raised, we can now
3786 + * discover larger MTUs
3787 + * A special case is locked exceptions, for which the PMTU is smaller
3788 + * than the minimal accepted PMTU:
3789 + * - if the new MTU is greater than the PMTU, don't make any change
3790 + * - otherwise, unlock and set PMTU
3791 + */
3792 +static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
3793 +{
3794 + struct fnhe_hash_bucket *bucket;
3795 + int i;
3796 +
3797 + bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
3798 + if (!bucket)
3799 + return;
3800 +
3801 + for (i = 0; i < FNHE_HASH_SIZE; i++) {
3802 + struct fib_nh_exception *fnhe;
3803 +
3804 + for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
3805 + fnhe;
3806 + fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
3807 + if (fnhe->fnhe_mtu_locked) {
3808 + if (new <= fnhe->fnhe_pmtu) {
3809 + fnhe->fnhe_pmtu = new;
3810 + fnhe->fnhe_mtu_locked = false;
3811 + }
3812 + } else if (new < fnhe->fnhe_pmtu ||
3813 + orig == fnhe->fnhe_pmtu) {
3814 + fnhe->fnhe_pmtu = new;
3815 + }
3816 + }
3817 + }
3818 +}
3819 +
3820 +void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
3821 +{
3822 + unsigned int hash = fib_devindex_hashfn(dev->ifindex);
3823 + struct hlist_head *head = &fib_info_devhash[hash];
3824 + struct fib_nh *nh;
3825 +
3826 + hlist_for_each_entry(nh, head, nh_hash) {
3827 + if (nh->nh_dev == dev)
3828 + nh_update_mtu(nh, dev->mtu, orig_mtu);
3829 + }
3830 +}
3831 +
3832 /* Event force Flags Description
3833 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
3834 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
3835 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
3836 index 0cc08c512202..9d6b172caf6c 100644
3837 --- a/net/ipv4/inet_connection_sock.c
3838 +++ b/net/ipv4/inet_connection_sock.c
3839 @@ -542,7 +542,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
3840 struct ip_options_rcu *opt;
3841 struct rtable *rt;
3842
3843 - opt = ireq_opt_deref(ireq);
3844 + rcu_read_lock();
3845 + opt = rcu_dereference(ireq->ireq_opt);
3846
3847 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
3848 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
3849 @@ -556,11 +557,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
3850 goto no_route;
3851 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
3852 goto route_err;
3853 + rcu_read_unlock();
3854 return &rt->dst;
3855
3856 route_err:
3857 ip_rt_put(rt);
3858 no_route:
3859 + rcu_read_unlock();
3860 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
3861 return NULL;
3862 }
3863 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3864 index 048d5f6dd320..4ef92ebc4f6d 100644
3865 --- a/net/ipv4/ip_sockglue.c
3866 +++ b/net/ipv4/ip_sockglue.c
3867 @@ -147,7 +147,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
3868 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
3869 {
3870 struct sockaddr_in sin;
3871 - const struct iphdr *iph = ip_hdr(skb);
3872 __be16 *ports;
3873 int end;
3874
3875 @@ -162,7 +161,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
3876 ports = (__be16 *)skb_transport_header(skb);
3877
3878 sin.sin_family = AF_INET;
3879 - sin.sin_addr.s_addr = iph->daddr;
3880 + sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
3881 sin.sin_port = ports[1];
3882 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
3883
3884 diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
3885 index 4784f3f36b7e..72eee34092ae 100644
3886 --- a/net/ipv4/ip_tunnel.c
3887 +++ b/net/ipv4/ip_tunnel.c
3888 @@ -635,6 +635,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
3889 const struct iphdr *tnl_params, u8 protocol)
3890 {
3891 struct ip_tunnel *tunnel = netdev_priv(dev);
3892 + unsigned int inner_nhdr_len = 0;
3893 const struct iphdr *inner_iph;
3894 struct flowi4 fl4;
3895 u8 tos, ttl;
3896 @@ -644,6 +645,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
3897 __be32 dst;
3898 bool connected;
3899
3900 + /* ensure we can access the inner net header, for several users below */
3901 + if (skb->protocol == htons(ETH_P_IP))
3902 + inner_nhdr_len = sizeof(struct iphdr);
3903 + else if (skb->protocol == htons(ETH_P_IPV6))
3904 + inner_nhdr_len = sizeof(struct ipv6hdr);
3905 + if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
3906 + goto tx_error;
3907 +
3908 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
3909 connected = (tunnel->parms.iph.daddr != 0);
3910
3911 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3912 index 991f382afc1b..e24c0d7adf65 100644
3913 --- a/net/ipv4/tcp_input.c
3914 +++ b/net/ipv4/tcp_input.c
3915 @@ -5913,11 +5913,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
3916 if (th->fin)
3917 goto discard;
3918 /* It is possible that we process SYN packets from backlog,
3919 - * so we need to make sure to disable BH right there.
3920 + * so we need to make sure to disable BH and RCU right there.
3921 */
3922 + rcu_read_lock();
3923 local_bh_disable();
3924 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
3925 local_bh_enable();
3926 + rcu_read_unlock();
3927
3928 if (!acceptable)
3929 return 1;
3930 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3931 index 0e1a670dabd9..31b34c0c2d5f 100644
3932 --- a/net/ipv4/tcp_ipv4.c
3933 +++ b/net/ipv4/tcp_ipv4.c
3934 @@ -875,9 +875,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
3935 if (skb) {
3936 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
3937
3938 + rcu_read_lock();
3939 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
3940 ireq->ir_rmt_addr,
3941 - ireq_opt_deref(ireq));
3942 + rcu_dereference(ireq->ireq_opt));
3943 + rcu_read_unlock();
3944 err = net_xmit_eval(err);
3945 }
3946
3947 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3948 index 3de413867991..dc0ec227b9d2 100644
3949 --- a/net/ipv4/udp.c
3950 +++ b/net/ipv4/udp.c
3951 @@ -1565,7 +1565,7 @@ busy_check:
3952 *err = error;
3953 return NULL;
3954 }
3955 -EXPORT_SYMBOL_GPL(__skb_recv_udp);
3956 +EXPORT_SYMBOL(__skb_recv_udp);
3957
3958 /*
3959 * This should be easy, if there is something there we
3960 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3961 index 6a76e41e6d51..569f7c3f6b95 100644
3962 --- a/net/ipv6/addrconf.c
3963 +++ b/net/ipv6/addrconf.c
3964 @@ -4136,7 +4136,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
3965 p++;
3966 continue;
3967 }
3968 - state->offset++;
3969 return ifa;
3970 }
3971
3972 @@ -4160,13 +4159,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
3973 return ifa;
3974 }
3975
3976 + state->offset = 0;
3977 while (++state->bucket < IN6_ADDR_HSIZE) {
3978 - state->offset = 0;
3979 hlist_for_each_entry_rcu_bh(ifa,
3980 &inet6_addr_lst[state->bucket], addr_lst) {
3981 if (!net_eq(dev_net(ifa->idev->dev), net))
3982 continue;
3983 - state->offset++;
3984 return ifa;
3985 }
3986 }
3987 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3988 index ee8dbd228fe2..0e9296f44ee4 100644
3989 --- a/net/ipv6/ip6_tunnel.c
3990 +++ b/net/ipv6/ip6_tunnel.c
3991 @@ -1227,7 +1227,7 @@ static inline int
3992 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
3993 {
3994 struct ip6_tnl *t = netdev_priv(dev);
3995 - const struct iphdr *iph = ip_hdr(skb);
3996 + const struct iphdr *iph;
3997 int encap_limit = -1;
3998 struct flowi6 fl6;
3999 __u8 dsfield;
4000 @@ -1235,6 +1235,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4001 u8 tproto;
4002 int err;
4003
4004 + /* ensure we can access the full inner ip header */
4005 + if (!pskb_may_pull(skb, sizeof(struct iphdr)))
4006 + return -1;
4007 +
4008 + iph = ip_hdr(skb);
4009 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
4010
4011 tproto = ACCESS_ONCE(t->parms.proto);
4012 @@ -1298,7 +1303,7 @@ static inline int
4013 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4014 {
4015 struct ip6_tnl *t = netdev_priv(dev);
4016 - struct ipv6hdr *ipv6h = ipv6_hdr(skb);
4017 + struct ipv6hdr *ipv6h;
4018 int encap_limit = -1;
4019 __u16 offset;
4020 struct flowi6 fl6;
4021 @@ -1307,6 +1312,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4022 u8 tproto;
4023 int err;
4024
4025 + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
4026 + return -1;
4027 +
4028 + ipv6h = ipv6_hdr(skb);
4029 tproto = ACCESS_ONCE(t->parms.proto);
4030 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
4031 ip6_tnl_addr_conflict(t, ipv6h))
4032 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
4033 index e4462b0ff801..f08cc6527339 100644
4034 --- a/net/ipv6/raw.c
4035 +++ b/net/ipv6/raw.c
4036 @@ -650,8 +650,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
4037 skb->protocol = htons(ETH_P_IPV6);
4038 skb->priority = sk->sk_priority;
4039 skb->mark = sk->sk_mark;
4040 - skb_dst_set(skb, &rt->dst);
4041 - *dstp = NULL;
4042
4043 skb_put(skb, length);
4044 skb_reset_network_header(skb);
4045 @@ -664,8 +662,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
4046
4047 skb->transport_header = skb->network_header;
4048 err = memcpy_from_msg(iph, msg, length);
4049 - if (err)
4050 - goto error_fault;
4051 + if (err) {
4052 + err = -EFAULT;
4053 + kfree_skb(skb);
4054 + goto error;
4055 + }
4056 +
4057 + skb_dst_set(skb, &rt->dst);
4058 + *dstp = NULL;
4059
4060 /* if egress device is enslaved to an L3 master device pass the
4061 * skb to its handler for processing
4062 @@ -674,21 +678,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
4063 if (unlikely(!skb))
4064 return 0;
4065
4066 + /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
4067 + * in the error path. Since skb has been freed, the dst could
4068 + * have been queued for deletion.
4069 + */
4070 + rcu_read_lock();
4071 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
4072 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
4073 NULL, rt->dst.dev, dst_output);
4074 if (err > 0)
4075 err = net_xmit_errno(err);
4076 - if (err)
4077 - goto error;
4078 + if (err) {
4079 + IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
4080 + rcu_read_unlock();
4081 + goto error_check;
4082 + }
4083 + rcu_read_unlock();
4084 out:
4085 return 0;
4086
4087 -error_fault:
4088 - err = -EFAULT;
4089 - kfree_skb(skb);
4090 error:
4091 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
4092 +error_check:
4093 if (err == -ENOBUFS && !np->recverr)
4094 err = 0;
4095 return err;
4096 diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
4097 index c070dfc0190a..c92894c3e40a 100644
4098 --- a/net/netlabel/netlabel_unlabeled.c
4099 +++ b/net/netlabel/netlabel_unlabeled.c
4100 @@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
4101 {
4102 u32 addr_len;
4103
4104 - if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
4105 + if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
4106 + info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
4107 addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
4108 if (addr_len != sizeof(struct in_addr) &&
4109 addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
4110 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4111 index 8833a58ca3ee..8d1a7c900393 100644
4112 --- a/net/packet/af_packet.c
4113 +++ b/net/packet/af_packet.c
4114 @@ -2753,10 +2753,12 @@ tpacket_error:
4115 }
4116 }
4117
4118 - if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
4119 - vio_le())) {
4120 - tp_len = -EINVAL;
4121 - goto tpacket_error;
4122 + if (po->has_vnet_hdr) {
4123 + if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
4124 + tp_len = -EINVAL;
4125 + goto tpacket_error;
4126 + }
4127 + virtio_net_hdr_set_proto(skb, vnet_hdr);
4128 }
4129
4130 skb->destructor = tpacket_destruct_skb;
4131 @@ -2952,6 +2954,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
4132 if (err)
4133 goto out_free;
4134 len += sizeof(vnet_hdr);
4135 + virtio_net_hdr_set_proto(skb, &vnet_hdr);
4136 }
4137
4138 skb_probe_transport_header(skb, reserve);
4139 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
4140 index 22bc6fc48311..cd69aa067543 100644
4141 --- a/net/sched/sch_api.c
4142 +++ b/net/sched/sch_api.c
4143 @@ -1216,6 +1216,16 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
4144 * Delete/get qdisc.
4145 */
4146
4147 +const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
4148 + [TCA_KIND] = { .type = NLA_STRING },
4149 + [TCA_OPTIONS] = { .type = NLA_NESTED },
4150 + [TCA_RATE] = { .type = NLA_BINARY,
4151 + .len = sizeof(struct tc_estimator) },
4152 + [TCA_STAB] = { .type = NLA_NESTED },
4153 + [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
4154 + [TCA_CHAIN] = { .type = NLA_U32 },
4155 +};
4156 +
4157 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
4158 struct netlink_ext_ack *extack)
4159 {
4160 @@ -1232,7 +1242,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
4161 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
4162 return -EPERM;
4163
4164 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
4165 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
4166 + extack);
4167 if (err < 0)
4168 return err;
4169
4170 @@ -1302,7 +1313,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
4171
4172 replay:
4173 /* Reinit, just in case something touches this. */
4174 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
4175 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
4176 + extack);
4177 if (err < 0)
4178 return err;
4179
4180 @@ -1512,7 +1524,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
4181 idx = 0;
4182 ASSERT_RTNL();
4183
4184 - err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
4185 + err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX,
4186 + rtm_tca_policy, NULL);
4187 if (err < 0)
4188 return err;
4189
4190 @@ -1729,7 +1742,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
4191 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
4192 return -EPERM;
4193
4194 - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
4195 + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
4196 + extack);
4197 if (err < 0)
4198 return err;
4199
4200 diff --git a/net/sctp/transport.c b/net/sctp/transport.c
4201 index e0c2a4e23039..43105cf04bc4 100644
4202 --- a/net/sctp/transport.c
4203 +++ b/net/sctp/transport.c
4204 @@ -254,6 +254,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
4205 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
4206 {
4207 struct dst_entry *dst = sctp_transport_dst_check(t);
4208 + struct sock *sk = t->asoc->base.sk;
4209 bool change = true;
4210
4211 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
4212 @@ -265,12 +266,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
4213 pmtu = SCTP_TRUNC4(pmtu);
4214
4215 if (dst) {
4216 - dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
4217 + struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
4218 + union sctp_addr addr;
4219 +
4220 + pf->af->from_sk(&addr, sk);
4221 + pf->to_sk_daddr(&t->ipaddr, sk);
4222 + dst->ops->update_pmtu(dst, sk, NULL, pmtu);
4223 + pf->to_sk_daddr(&addr, sk);
4224 +
4225 dst = sctp_transport_dst_check(t);
4226 }
4227
4228 if (!dst) {
4229 - t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
4230 + t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
4231 dst = t->dst;
4232 }
4233
4234 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4235 index 0aebf0695ae0..4d2125d258fe 100644
4236 --- a/net/tipc/socket.c
4237 +++ b/net/tipc/socket.c
4238 @@ -1063,8 +1063,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
4239 /* Handle implicit connection setup */
4240 if (unlikely(dest)) {
4241 rc = __tipc_sendmsg(sock, m, dlen);
4242 - if (dlen && (dlen == rc))
4243 + if (dlen && dlen == rc) {
4244 + tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
4245 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
4246 + }
4247 return rc;
4248 }
4249
4250 diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
4251 index f6d2985b2520..778b42ba90b8 100644
4252 --- a/sound/hda/hdac_controller.c
4253 +++ b/sound/hda/hdac_controller.c
4254 @@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
4255 */
4256 void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
4257 {
4258 + WARN_ON_ONCE(!bus->rb.area);
4259 +
4260 spin_lock_irq(&bus->reg_lock);
4261 /* CORB set up */
4262 bus->corb.addr = bus->rb.addr;
4263 @@ -382,7 +384,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
4264 EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
4265
4266 /* reset codec link */
4267 -static int azx_reset(struct hdac_bus *bus, bool full_reset)
4268 +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
4269 {
4270 if (!full_reset)
4271 goto skip_reset;
4272 @@ -407,7 +409,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
4273 skip_reset:
4274 /* check to see if controller is ready */
4275 if (!snd_hdac_chip_readb(bus, GCTL)) {
4276 - dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
4277 + dev_dbg(bus->dev, "controller not ready!\n");
4278 return -EBUSY;
4279 }
4280
4281 @@ -422,6 +424,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
4282
4283 return 0;
4284 }
4285 +EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
4286
4287 /* enable interrupts */
4288 static void azx_int_enable(struct hdac_bus *bus)
4289 @@ -476,15 +479,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
4290 return false;
4291
4292 /* reset controller */
4293 - azx_reset(bus, full_reset);
4294 + snd_hdac_bus_reset_link(bus, full_reset);
4295
4296 - /* initialize interrupts */
4297 + /* clear interrupts */
4298 azx_int_clear(bus);
4299 - azx_int_enable(bus);
4300
4301 /* initialize the codec command I/O */
4302 snd_hdac_bus_init_cmd_io(bus);
4303
4304 + /* enable interrupts after CORB/RIRB buffers are initialized above */
4305 + azx_int_enable(bus);
4306 +
4307 /* program the position buffer */
4308 if (bus->use_posbuf && bus->posbuf.addr) {
4309 snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
4310 diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
4311 index 1bfc8db1826a..56ddab43da7e 100644
4312 --- a/sound/soc/codecs/rt5514.c
4313 +++ b/sound/soc/codecs/rt5514.c
4314 @@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
4315 {RT5514_ANA_CTRL_LDO10, 0x00028604},
4316 {RT5514_ANA_CTRL_ADCFED, 0x00000800},
4317 {RT5514_ASRC_IN_CTRL1, 0x00000003},
4318 - {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
4319 - {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
4320 + {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
4321 + {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
4322 };
4323
4324 static const struct reg_default rt5514_reg[] = {
4325 @@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
4326 {RT5514_ASRC_IN_CTRL1, 0x00000003},
4327 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
4328 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
4329 - {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
4330 + {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
4331 {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
4332 {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
4333 - {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
4334 + {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
4335 {RT5514_ANA_CTRL_LDO10, 0x00028604},
4336 {RT5514_ANA_CTRL_LDO18_16, 0x02000345},
4337 {RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
4338 diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
4339 index d53680ac78e4..6df158669420 100644
4340 --- a/sound/soc/codecs/sigmadsp.c
4341 +++ b/sound/soc/codecs/sigmadsp.c
4342 @@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
4343 struct sigmadsp_control *ctrl, void *data)
4344 {
4345 /* safeload loads up to 20 bytes in a atomic operation */
4346 - if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
4347 - sigmadsp->ops->safeload)
4348 + if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
4349 return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
4350 ctrl->num_bytes);
4351 else
4352 diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
4353 index f27464c2c5ba..79541960f45d 100644
4354 --- a/sound/soc/codecs/wm8804-i2c.c
4355 +++ b/sound/soc/codecs/wm8804-i2c.c
4356 @@ -13,6 +13,7 @@
4357 #include <linux/init.h>
4358 #include <linux/module.h>
4359 #include <linux/i2c.h>
4360 +#include <linux/acpi.h>
4361
4362 #include "wm8804.h"
4363
4364 @@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
4365 };
4366 MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
4367
4368 +#if defined(CONFIG_OF)
4369 static const struct of_device_id wm8804_of_match[] = {
4370 { .compatible = "wlf,wm8804", },
4371 { }
4372 };
4373 MODULE_DEVICE_TABLE(of, wm8804_of_match);
4374 +#endif
4375 +
4376 +#ifdef CONFIG_ACPI
4377 +static const struct acpi_device_id wm8804_acpi_match[] = {
4378 + { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
4379 + { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
4380 + { },
4381 +};
4382 +MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
4383 +#endif
4384
4385 static struct i2c_driver wm8804_i2c_driver = {
4386 .driver = {
4387 .name = "wm8804",
4388 .pm = &wm8804_pm,
4389 - .of_match_table = wm8804_of_match,
4390 + .of_match_table = of_match_ptr(wm8804_of_match),
4391 + .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
4392 },
4393 .probe = wm8804_i2c_probe,
4394 .remove = wm8804_i2c_remove,
4395 diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
4396 index f94b484abb99..a0bef63b8fb1 100644
4397 --- a/sound/soc/intel/skylake/skl.c
4398 +++ b/sound/soc/intel/skylake/skl.c
4399 @@ -698,7 +698,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
4400 return -ENXIO;
4401 }
4402
4403 - skl_init_chip(bus, true);
4404 + snd_hdac_bus_reset_link(bus, true);
4405
4406 snd_hdac_bus_parse_capabilities(bus);
4407
4408 diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
4409 index e28edb1f7263..eb7879bcc6a7 100644
4410 --- a/sound/soc/sh/rcar/adg.c
4411 +++ b/sound/soc/sh/rcar/adg.c
4412 @@ -467,6 +467,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
4413 goto rsnd_adg_get_clkout_end;
4414
4415 req_size = prop->length / sizeof(u32);
4416 + if (req_size > REQ_SIZE) {
4417 + dev_err(dev,
4418 + "too many clock-frequency, use top %d\n", REQ_SIZE);
4419 + req_size = REQ_SIZE;
4420 + }
4421
4422 of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
4423 req_48kHz_rate = 0;
4424 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
4425 index 9896e736fa5c..710c01cd2ad2 100644
4426 --- a/sound/soc/sh/rcar/core.c
4427 +++ b/sound/soc/sh/rcar/core.c
4428 @@ -486,7 +486,7 @@ static int rsnd_status_update(u32 *status,
4429 (func_call && (mod)->ops->fn) ? #fn : ""); \
4430 if (func_call && (mod)->ops->fn) \
4431 tmp = (mod)->ops->fn(mod, io, param); \
4432 - if (tmp) \
4433 + if (tmp && (tmp != -EPROBE_DEFER)) \
4434 dev_err(dev, "%s[%d] : %s error %d\n", \
4435 rsnd_mod_name(mod), rsnd_mod_id(mod), \
4436 #fn, tmp); \
4437 @@ -1469,6 +1469,14 @@ exit_snd_probe:
4438 rsnd_dai_call(remove, &rdai->capture, priv);
4439 }
4440
4441 + /*
4442 + * adg is very special mod which can't use rsnd_dai_call(remove),
4443 + * and it registers ADG clock on probe.
4444 + * It should be unregister if probe failed.
4445 + * Mainly it is assuming -EPROBE_DEFER case
4446 + */
4447 + rsnd_adg_remove(priv);
4448 +
4449 return ret;
4450 }
4451
4452 diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
4453 index 041ec1080d52..39a46e302bab 100644
4454 --- a/sound/soc/sh/rcar/dma.c
4455 +++ b/sound/soc/sh/rcar/dma.c
4456 @@ -330,6 +330,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
4457 /* try to get DMAEngine channel */
4458 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
4459 if (IS_ERR_OR_NULL(chan)) {
4460 + /* Let's follow when -EPROBE_DEFER case */
4461 + if (PTR_ERR(chan) == -EPROBE_DEFER)
4462 + return PTR_ERR(chan);
4463 +
4464 /*
4465 * DMA failed. try to PIO mode
4466 * see
4467 diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
4468 index e37653b0f2d0..76789523429a 100644
4469 --- a/tools/perf/builtin-script.c
4470 +++ b/tools/perf/builtin-script.c
4471 @@ -2304,8 +2304,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
4472 }
4473
4474 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
4475 - snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
4476 - lang_dirent->d_name);
4477 + scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
4478 + lang_dirent->d_name);
4479 lang_dir = opendir(lang_path);
4480 if (!lang_dir)
4481 continue;
4482 @@ -2314,8 +2314,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
4483 script_root = get_script_root(script_dirent, REPORT_SUFFIX);
4484 if (script_root) {
4485 desc = script_desc__findnew(script_root);
4486 - snprintf(script_path, MAXPATHLEN, "%s/%s",
4487 - lang_path, script_dirent->d_name);
4488 + scnprintf(script_path, MAXPATHLEN, "%s/%s",
4489 + lang_path, script_dirent->d_name);
4490 read_script_info(desc, script_path);
4491 free(script_root);
4492 }
4493 @@ -2351,7 +2351,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
4494 int match, len;
4495 FILE *fp;
4496
4497 - sprintf(filename, "%s/bin/%s-record", dir_name, scriptname);
4498 + scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname);
4499
4500 fp = fopen(filename, "r");
4501 if (!fp)
4502 @@ -2427,8 +2427,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
4503 }
4504
4505 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
4506 - snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
4507 - lang_dirent->d_name);
4508 + scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
4509 + lang_dirent->d_name);
4510 #ifdef NO_LIBPERL
4511 if (strstr(lang_path, "perl"))
4512 continue;
4513 @@ -2483,8 +2483,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
4514 return NULL;
4515
4516 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
4517 - snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
4518 - lang_dirent->d_name);
4519 + scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
4520 + lang_dirent->d_name);
4521 lang_dir = opendir(lang_path);
4522 if (!lang_dir)
4523 continue;
4524 @@ -2495,8 +2495,8 @@ static char *get_script_path(const char *script_root, const char *suffix)
4525 free(__script_root);
4526 closedir(lang_dir);
4527 closedir(scripts_dir);
4528 - snprintf(script_path, MAXPATHLEN, "%s/%s",
4529 - lang_path, script_dirent->d_name);
4530 + scnprintf(script_path, MAXPATHLEN, "%s/%s",
4531 + lang_path, script_dirent->d_name);
4532 return strdup(script_path);
4533 }
4534 free(__script_root);
4535 diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
4536 index efcaf6cac2eb..e46f51b17513 100644
4537 --- a/tools/perf/scripts/python/export-to-postgresql.py
4538 +++ b/tools/perf/scripts/python/export-to-postgresql.py
4539 @@ -204,14 +204,23 @@ from ctypes import *
4540 libpq = CDLL("libpq.so.5")
4541 PQconnectdb = libpq.PQconnectdb
4542 PQconnectdb.restype = c_void_p
4543 +PQconnectdb.argtypes = [ c_char_p ]
4544 PQfinish = libpq.PQfinish
4545 +PQfinish.argtypes = [ c_void_p ]
4546 PQstatus = libpq.PQstatus
4547 +PQstatus.restype = c_int
4548 +PQstatus.argtypes = [ c_void_p ]
4549 PQexec = libpq.PQexec
4550 PQexec.restype = c_void_p
4551 +PQexec.argtypes = [ c_void_p, c_char_p ]
4552 PQresultStatus = libpq.PQresultStatus
4553 +PQresultStatus.restype = c_int
4554 +PQresultStatus.argtypes = [ c_void_p ]
4555 PQputCopyData = libpq.PQputCopyData
4556 +PQputCopyData.restype = c_int
4557 PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
4558 PQputCopyEnd = libpq.PQputCopyEnd
4559 +PQputCopyEnd.restype = c_int
4560 PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
4561
4562 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
4563 diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
4564 index f827bf77e9d2..e4bb82c8aba9 100644
4565 --- a/tools/perf/scripts/python/export-to-sqlite.py
4566 +++ b/tools/perf/scripts/python/export-to-sqlite.py
4567 @@ -440,7 +440,11 @@ def branch_type_table(*x):
4568
4569 def sample_table(*x):
4570 if branches:
4571 - bind_exec(sample_query, 18, x)
4572 + for xx in x[0:15]:
4573 + sample_query.addBindValue(str(xx))
4574 + for xx in x[19:22]:
4575 + sample_query.addBindValue(str(xx))
4576 + do_query_(sample_query)
4577 else:
4578 bind_exec(sample_query, 22, x)
4579
4580 diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
4581 index 0e1367f90af5..60fea0a376fc 100644
4582 --- a/tools/perf/tests/attr.c
4583 +++ b/tools/perf/tests/attr.c
4584 @@ -164,8 +164,8 @@ static int run_dir(const char *d, const char *perf)
4585 if (verbose > 0)
4586 vcnt++;
4587
4588 - snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
4589 - d, d, perf, vcnt, v);
4590 + scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
4591 + d, d, perf, vcnt, v);
4592
4593 return system(cmd) ? TEST_FAIL : TEST_OK;
4594 }
4595 diff --git a/tools/perf/tests/mem.c b/tools/perf/tests/mem.c
4596 index 21952e1e6e6d..0f82ee9fd3f7 100644
4597 --- a/tools/perf/tests/mem.c
4598 +++ b/tools/perf/tests/mem.c
4599 @@ -16,7 +16,7 @@ static int check(union perf_mem_data_src data_src,
4600
4601 n = perf_mem__snp_scnprintf(out, sizeof out, &mi);
4602 n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi);
4603 - snprintf(failure, sizeof failure, "unexpected %s", out);
4604 + scnprintf(failure, sizeof failure, "unexpected %s", out);
4605 TEST_ASSERT_VAL(failure, !strcmp(string, out));
4606 return 0;
4607 }
4608 diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
4609 index 9abca267afa9..7bedf8608fdd 100644
4610 --- a/tools/perf/tests/pmu.c
4611 +++ b/tools/perf/tests/pmu.c
4612 @@ -98,7 +98,7 @@ static char *test_format_dir_get(void)
4613 struct test_format *format = &test_formats[i];
4614 FILE *file;
4615
4616 - snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
4617 + scnprintf(name, PATH_MAX, "%s/%s", dir, format->name);
4618
4619 file = fopen(name, "w");
4620 if (!file)
4621 diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
4622 index d9ffc1e6eb39..ce6bcb0a5368 100644
4623 --- a/tools/perf/util/cgroup.c
4624 +++ b/tools/perf/util/cgroup.c
4625 @@ -78,7 +78,7 @@ static int open_cgroup(char *name)
4626 if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
4627 return -1;
4628
4629 - snprintf(path, PATH_MAX, "%s/%s", mnt, name);
4630 + scnprintf(path, PATH_MAX, "%s/%s", mnt, name);
4631
4632 fd = open(path, O_RDONLY);
4633 if (fd == -1)
4634 diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
4635 index b25635e945f3..53f620472151 100644
4636 --- a/tools/perf/util/parse-events.c
4637 +++ b/tools/perf/util/parse-events.c
4638 @@ -202,8 +202,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
4639
4640 for_each_event(sys_dirent, evt_dir, evt_dirent) {
4641
4642 - snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
4643 - evt_dirent->d_name);
4644 + scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
4645 + evt_dirent->d_name);
4646 fd = open(evt_path, O_RDONLY);
4647 if (fd < 0)
4648 continue;
4649 diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4650 index 9dff41bcc776..d87d458996b7 100644
4651 --- a/tools/perf/util/pmu.c
4652 +++ b/tools/perf/util/pmu.c
4653 @@ -349,7 +349,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
4654 if (pmu_alias_info_file(name))
4655 continue;
4656
4657 - snprintf(path, PATH_MAX, "%s/%s", dir, name);
4658 + scnprintf(path, PATH_MAX, "%s/%s", dir, name);
4659
4660 file = fopen(path, "r");
4661 if (!file) {
4662 diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
4663 new file mode 100644
4664 index 000000000000..4e151f1005b2
4665 --- /dev/null
4666 +++ b/tools/testing/selftests/efivarfs/config
4667 @@ -0,0 +1 @@
4668 +CONFIG_EFIVAR_FS=y
4669 diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
4670 index 2fde30191a47..a7e8cd5bb265 100644
4671 --- a/tools/testing/selftests/memory-hotplug/config
4672 +++ b/tools/testing/selftests/memory-hotplug/config
4673 @@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
4674 CONFIG_MEMORY_HOTPLUG_SPARSE=y
4675 CONFIG_NOTIFIER_ERROR_INJECTION=y
4676 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
4677 +CONFIG_MEMORY_HOTREMOVE=y