Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.16/0111-4.16.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3117 - (show annotations) (download)
Mon May 28 08:57:22 2018 UTC (5 years, 11 months ago) by niro
File size: 228322 byte(s)
-linux-4.16.12
1 diff --git a/Makefile b/Makefile
2 index 79c191442771..ded9e8480d74 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 16
9 -SUBLEVEL = 11
10 +SUBLEVEL = 12
11 EXTRAVERSION =
12 NAME = Fearless Coyote
13
14 diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
15 index 471b2274fbeb..c40b4380951c 100644
16 --- a/arch/powerpc/include/asm/exception-64s.h
17 +++ b/arch/powerpc/include/asm/exception-64s.h
18 @@ -74,6 +74,27 @@
19 */
20 #define EX_R3 EX_DAR
21
22 +#define STF_ENTRY_BARRIER_SLOT \
23 + STF_ENTRY_BARRIER_FIXUP_SECTION; \
24 + nop; \
25 + nop; \
26 + nop
27 +
28 +#define STF_EXIT_BARRIER_SLOT \
29 + STF_EXIT_BARRIER_FIXUP_SECTION; \
30 + nop; \
31 + nop; \
32 + nop; \
33 + nop; \
34 + nop; \
35 + nop
36 +
37 +/*
38 + * r10 must be free to use, r13 must be paca
39 + */
40 +#define INTERRUPT_TO_KERNEL \
41 + STF_ENTRY_BARRIER_SLOT
42 +
43 /*
44 * Macros for annotating the expected destination of (h)rfid
45 *
46 @@ -90,16 +111,19 @@
47 rfid
48
49 #define RFI_TO_USER \
50 + STF_EXIT_BARRIER_SLOT; \
51 RFI_FLUSH_SLOT; \
52 rfid; \
53 b rfi_flush_fallback
54
55 #define RFI_TO_USER_OR_KERNEL \
56 + STF_EXIT_BARRIER_SLOT; \
57 RFI_FLUSH_SLOT; \
58 rfid; \
59 b rfi_flush_fallback
60
61 #define RFI_TO_GUEST \
62 + STF_EXIT_BARRIER_SLOT; \
63 RFI_FLUSH_SLOT; \
64 rfid; \
65 b rfi_flush_fallback
66 @@ -108,21 +132,25 @@
67 hrfid
68
69 #define HRFI_TO_USER \
70 + STF_EXIT_BARRIER_SLOT; \
71 RFI_FLUSH_SLOT; \
72 hrfid; \
73 b hrfi_flush_fallback
74
75 #define HRFI_TO_USER_OR_KERNEL \
76 + STF_EXIT_BARRIER_SLOT; \
77 RFI_FLUSH_SLOT; \
78 hrfid; \
79 b hrfi_flush_fallback
80
81 #define HRFI_TO_GUEST \
82 + STF_EXIT_BARRIER_SLOT; \
83 RFI_FLUSH_SLOT; \
84 hrfid; \
85 b hrfi_flush_fallback
86
87 #define HRFI_TO_UNKNOWN \
88 + STF_EXIT_BARRIER_SLOT; \
89 RFI_FLUSH_SLOT; \
90 hrfid; \
91 b hrfi_flush_fallback
92 @@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
93 #define __EXCEPTION_PROLOG_1_PRE(area) \
94 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
95 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
96 + INTERRUPT_TO_KERNEL; \
97 SAVE_CTR(r10, area); \
98 mfcr r9;
99
100 diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
101 index 1e82eb3caabd..a9b64df34e2a 100644
102 --- a/arch/powerpc/include/asm/feature-fixups.h
103 +++ b/arch/powerpc/include/asm/feature-fixups.h
104 @@ -187,6 +187,22 @@ label##3: \
105 FTR_ENTRY_OFFSET label##1b-label##3b; \
106 .popsection;
107
108 +#define STF_ENTRY_BARRIER_FIXUP_SECTION \
109 +953: \
110 + .pushsection __stf_entry_barrier_fixup,"a"; \
111 + .align 2; \
112 +954: \
113 + FTR_ENTRY_OFFSET 953b-954b; \
114 + .popsection;
115 +
116 +#define STF_EXIT_BARRIER_FIXUP_SECTION \
117 +955: \
118 + .pushsection __stf_exit_barrier_fixup,"a"; \
119 + .align 2; \
120 +956: \
121 + FTR_ENTRY_OFFSET 955b-956b; \
122 + .popsection;
123 +
124 #define RFI_FLUSH_FIXUP_SECTION \
125 951: \
126 .pushsection __rfi_flush_fixup,"a"; \
127 @@ -199,6 +215,9 @@ label##3: \
128 #ifndef __ASSEMBLY__
129 #include <linux/types.h>
130
131 +extern long stf_barrier_fallback;
132 +extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
133 +extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
134 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
135
136 void apply_feature_fixups(void);
137 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
138 index eca3f9c68907..5a740feb7bd7 100644
139 --- a/arch/powerpc/include/asm/hvcall.h
140 +++ b/arch/powerpc/include/asm/hvcall.h
141 @@ -337,6 +337,9 @@
142 #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
143 #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
144 #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
145 +#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
146 +#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
147 +#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
148
149 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
150 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
151 diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
152 new file mode 100644
153 index 000000000000..44989b22383c
154 --- /dev/null
155 +++ b/arch/powerpc/include/asm/security_features.h
156 @@ -0,0 +1,85 @@
157 +/* SPDX-License-Identifier: GPL-2.0+ */
158 +/*
159 + * Security related feature bit definitions.
160 + *
161 + * Copyright 2018, Michael Ellerman, IBM Corporation.
162 + */
163 +
164 +#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
165 +#define _ASM_POWERPC_SECURITY_FEATURES_H
166 +
167 +
168 +extern unsigned long powerpc_security_features;
169 +extern bool rfi_flush;
170 +
171 +/* These are bit flags */
172 +enum stf_barrier_type {
173 + STF_BARRIER_NONE = 0x1,
174 + STF_BARRIER_FALLBACK = 0x2,
175 + STF_BARRIER_EIEIO = 0x4,
176 + STF_BARRIER_SYNC_ORI = 0x8,
177 +};
178 +
179 +void setup_stf_barrier(void);
180 +void do_stf_barrier_fixups(enum stf_barrier_type types);
181 +
182 +static inline void security_ftr_set(unsigned long feature)
183 +{
184 + powerpc_security_features |= feature;
185 +}
186 +
187 +static inline void security_ftr_clear(unsigned long feature)
188 +{
189 + powerpc_security_features &= ~feature;
190 +}
191 +
192 +static inline bool security_ftr_enabled(unsigned long feature)
193 +{
194 + return !!(powerpc_security_features & feature);
195 +}
196 +
197 +
198 +// Features indicating support for Spectre/Meltdown mitigations
199 +
200 +// The L1-D cache can be flushed with ori r30,r30,0
201 +#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
202 +
203 +// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
204 +#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
205 +
206 +// ori r31,r31,0 acts as a speculation barrier
207 +#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
208 +
209 +// Speculation past bctr is disabled
210 +#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
211 +
212 +// Entries in L1-D are private to a SMT thread
213 +#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
214 +
215 +// Indirect branch prediction cache disabled
216 +#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
217 +
218 +
219 +// Features indicating need for Spectre/Meltdown mitigations
220 +
221 +// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
222 +#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
223 +
224 +// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
225 +#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
226 +
227 +// A speculation barrier should be used for bounds checks (Spectre variant 1)
228 +#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
229 +
230 +// Firmware configuration indicates user favours security over performance
231 +#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
232 +
233 +
234 +// Features enabled by default
235 +#define SEC_FTR_DEFAULT \
236 + (SEC_FTR_L1D_FLUSH_HV | \
237 + SEC_FTR_L1D_FLUSH_PR | \
238 + SEC_FTR_BNDS_CHK_SPEC_BAR | \
239 + SEC_FTR_FAVOUR_SECURITY)
240 +
241 +#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
242 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
243 index 1b6bc7fba996..d458c45e5004 100644
244 --- a/arch/powerpc/kernel/Makefile
245 +++ b/arch/powerpc/kernel/Makefile
246 @@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
247 obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
248 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
249 obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
250 -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
251 +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
252 obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
253 obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
254 obj-$(CONFIG_PPC64) += vdso64/
255 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
256 index 1ecfd8ffb098..bf9b94e376fd 100644
257 --- a/arch/powerpc/kernel/exceptions-64s.S
258 +++ b/arch/powerpc/kernel/exceptions-64s.S
259 @@ -833,7 +833,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
260 #endif
261
262
263 -EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
264 +EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
265 EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
266 TRAMP_KVM(PACA_EXGEN, 0x900)
267 EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
268 @@ -909,6 +909,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
269 mtctr r13; \
270 GET_PACA(r13); \
271 std r10,PACA_EXGEN+EX_R10(r13); \
272 + INTERRUPT_TO_KERNEL; \
273 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
274 HMT_MEDIUM; \
275 mfctr r9;
276 @@ -917,7 +918,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
277 #define SYSCALL_KVMTEST \
278 HMT_MEDIUM; \
279 mr r9,r13; \
280 - GET_PACA(r13);
281 + GET_PACA(r13); \
282 + INTERRUPT_TO_KERNEL;
283 #endif
284
285 #define LOAD_SYSCALL_HANDLER(reg) \
286 @@ -1455,6 +1457,19 @@ masked_##_H##interrupt: \
287 b .; \
288 MASKED_DEC_HANDLER(_H)
289
290 +TRAMP_REAL_BEGIN(stf_barrier_fallback)
291 + std r9,PACA_EXRFI+EX_R9(r13)
292 + std r10,PACA_EXRFI+EX_R10(r13)
293 + sync
294 + ld r9,PACA_EXRFI+EX_R9(r13)
295 + ld r10,PACA_EXRFI+EX_R10(r13)
296 + ori 31,31,0
297 + .rept 14
298 + b 1f
299 +1:
300 + .endr
301 + blr
302 +
303 TRAMP_REAL_BEGIN(rfi_flush_fallback)
304 SET_SCRATCH0(r13);
305 GET_PACA(r13);
306 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
307 new file mode 100644
308 index 000000000000..b98a722da915
309 --- /dev/null
310 +++ b/arch/powerpc/kernel/security.c
311 @@ -0,0 +1,237 @@
312 +// SPDX-License-Identifier: GPL-2.0+
313 +//
314 +// Security related flags and so on.
315 +//
316 +// Copyright 2018, Michael Ellerman, IBM Corporation.
317 +
318 +#include <linux/kernel.h>
319 +#include <linux/device.h>
320 +#include <linux/seq_buf.h>
321 +
322 +#include <asm/debugfs.h>
323 +#include <asm/security_features.h>
324 +
325 +
326 +unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
327 +
328 +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
329 +{
330 + bool thread_priv;
331 +
332 + thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
333 +
334 + if (rfi_flush || thread_priv) {
335 + struct seq_buf s;
336 + seq_buf_init(&s, buf, PAGE_SIZE - 1);
337 +
338 + seq_buf_printf(&s, "Mitigation: ");
339 +
340 + if (rfi_flush)
341 + seq_buf_printf(&s, "RFI Flush");
342 +
343 + if (rfi_flush && thread_priv)
344 + seq_buf_printf(&s, ", ");
345 +
346 + if (thread_priv)
347 + seq_buf_printf(&s, "L1D private per thread");
348 +
349 + seq_buf_printf(&s, "\n");
350 +
351 + return s.len;
352 + }
353 +
354 + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
355 + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
356 + return sprintf(buf, "Not affected\n");
357 +
358 + return sprintf(buf, "Vulnerable\n");
359 +}
360 +
361 +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
362 +{
363 + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
364 + return sprintf(buf, "Not affected\n");
365 +
366 + return sprintf(buf, "Vulnerable\n");
367 +}
368 +
369 +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
370 +{
371 + bool bcs, ccd, ori;
372 + struct seq_buf s;
373 +
374 + seq_buf_init(&s, buf, PAGE_SIZE - 1);
375 +
376 + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
377 + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
378 + ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
379 +
380 + if (bcs || ccd) {
381 + seq_buf_printf(&s, "Mitigation: ");
382 +
383 + if (bcs)
384 + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
385 +
386 + if (bcs && ccd)
387 + seq_buf_printf(&s, ", ");
388 +
389 + if (ccd)
390 + seq_buf_printf(&s, "Indirect branch cache disabled");
391 + } else
392 + seq_buf_printf(&s, "Vulnerable");
393 +
394 + if (ori)
395 + seq_buf_printf(&s, ", ori31 speculation barrier enabled");
396 +
397 + seq_buf_printf(&s, "\n");
398 +
399 + return s.len;
400 +}
401 +
402 +/*
403 + * Store-forwarding barrier support.
404 + */
405 +
406 +static enum stf_barrier_type stf_enabled_flush_types;
407 +static bool no_stf_barrier;
408 +bool stf_barrier;
409 +
410 +static int __init handle_no_stf_barrier(char *p)
411 +{
412 + pr_info("stf-barrier: disabled on command line.");
413 + no_stf_barrier = true;
414 + return 0;
415 +}
416 +
417 +early_param("no_stf_barrier", handle_no_stf_barrier);
418 +
419 +/* This is the generic flag used by other architectures */
420 +static int __init handle_ssbd(char *p)
421 +{
422 + if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
423 + /* Until firmware tells us, we have the barrier with auto */
424 + return 0;
425 + } else if (strncmp(p, "off", 3) == 0) {
426 + handle_no_stf_barrier(NULL);
427 + return 0;
428 + } else
429 + return 1;
430 +
431 + return 0;
432 +}
433 +early_param("spec_store_bypass_disable", handle_ssbd);
434 +
435 +/* This is the generic flag used by other architectures */
436 +static int __init handle_no_ssbd(char *p)
437 +{
438 + handle_no_stf_barrier(NULL);
439 + return 0;
440 +}
441 +early_param("nospec_store_bypass_disable", handle_no_ssbd);
442 +
443 +static void stf_barrier_enable(bool enable)
444 +{
445 + if (enable)
446 + do_stf_barrier_fixups(stf_enabled_flush_types);
447 + else
448 + do_stf_barrier_fixups(STF_BARRIER_NONE);
449 +
450 + stf_barrier = enable;
451 +}
452 +
453 +void setup_stf_barrier(void)
454 +{
455 + enum stf_barrier_type type;
456 + bool enable, hv;
457 +
458 + hv = cpu_has_feature(CPU_FTR_HVMODE);
459 +
460 + /* Default to fallback in case fw-features are not available */
461 + if (cpu_has_feature(CPU_FTR_ARCH_300))
462 + type = STF_BARRIER_EIEIO;
463 + else if (cpu_has_feature(CPU_FTR_ARCH_207S))
464 + type = STF_BARRIER_SYNC_ORI;
465 + else if (cpu_has_feature(CPU_FTR_ARCH_206))
466 + type = STF_BARRIER_FALLBACK;
467 + else
468 + type = STF_BARRIER_NONE;
469 +
470 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
471 + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
472 + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
473 +
474 + if (type == STF_BARRIER_FALLBACK) {
475 + pr_info("stf-barrier: fallback barrier available\n");
476 + } else if (type == STF_BARRIER_SYNC_ORI) {
477 + pr_info("stf-barrier: hwsync barrier available\n");
478 + } else if (type == STF_BARRIER_EIEIO) {
479 + pr_info("stf-barrier: eieio barrier available\n");
480 + }
481 +
482 + stf_enabled_flush_types = type;
483 +
484 + if (!no_stf_barrier)
485 + stf_barrier_enable(enable);
486 +}
487 +
488 +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
489 +{
490 + if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
491 + const char *type;
492 + switch (stf_enabled_flush_types) {
493 + case STF_BARRIER_EIEIO:
494 + type = "eieio";
495 + break;
496 + case STF_BARRIER_SYNC_ORI:
497 + type = "hwsync";
498 + break;
499 + case STF_BARRIER_FALLBACK:
500 + type = "fallback";
501 + break;
502 + default:
503 + type = "unknown";
504 + }
505 + return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
506 + }
507 +
508 + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
509 + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
510 + return sprintf(buf, "Not affected\n");
511 +
512 + return sprintf(buf, "Vulnerable\n");
513 +}
514 +
515 +#ifdef CONFIG_DEBUG_FS
516 +static int stf_barrier_set(void *data, u64 val)
517 +{
518 + bool enable;
519 +
520 + if (val == 1)
521 + enable = true;
522 + else if (val == 0)
523 + enable = false;
524 + else
525 + return -EINVAL;
526 +
527 + /* Only do anything if we're changing state */
528 + if (enable != stf_barrier)
529 + stf_barrier_enable(enable);
530 +
531 + return 0;
532 +}
533 +
534 +static int stf_barrier_get(void *data, u64 *val)
535 +{
536 + *val = stf_barrier ? 1 : 0;
537 + return 0;
538 +}
539 +
540 +DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
541 +
542 +static __init int stf_barrier_debugfs_init(void)
543 +{
544 + debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
545 + return 0;
546 +}
547 +device_initcall(stf_barrier_debugfs_init);
548 +#endif /* CONFIG_DEBUG_FS */
549 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
550 index c388cc3357fa..c27557aff394 100644
551 --- a/arch/powerpc/kernel/setup_64.c
552 +++ b/arch/powerpc/kernel/setup_64.c
553 @@ -927,12 +927,4 @@ static __init int rfi_flush_debugfs_init(void)
554 }
555 device_initcall(rfi_flush_debugfs_init);
556 #endif
557 -
558 -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
559 -{
560 - if (rfi_flush)
561 - return sprintf(buf, "Mitigation: RFI Flush\n");
562 -
563 - return sprintf(buf, "Vulnerable\n");
564 -}
565 #endif /* CONFIG_PPC_BOOK3S_64 */
566 diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
567 index c8af90ff49f0..b8d82678f8b4 100644
568 --- a/arch/powerpc/kernel/vmlinux.lds.S
569 +++ b/arch/powerpc/kernel/vmlinux.lds.S
570 @@ -133,6 +133,20 @@ SECTIONS
571 RO_DATA(PAGE_SIZE)
572
573 #ifdef CONFIG_PPC64
574 + . = ALIGN(8);
575 + __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
576 + __start___stf_entry_barrier_fixup = .;
577 + *(__stf_entry_barrier_fixup)
578 + __stop___stf_entry_barrier_fixup = .;
579 + }
580 +
581 + . = ALIGN(8);
582 + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
583 + __start___stf_exit_barrier_fixup = .;
584 + *(__stf_exit_barrier_fixup)
585 + __stop___stf_exit_barrier_fixup = .;
586 + }
587 +
588 . = ALIGN(8);
589 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
590 __start___rfi_flush_fixup = .;
591 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
592 index f61ff5a6bddb..6b3c2d405a6d 100644
593 --- a/arch/powerpc/lib/feature-fixups.c
594 +++ b/arch/powerpc/lib/feature-fixups.c
595 @@ -23,6 +23,7 @@
596 #include <asm/page.h>
597 #include <asm/sections.h>
598 #include <asm/setup.h>
599 +#include <asm/security_features.h>
600 #include <asm/firmware.h>
601
602 struct fixup_entry {
603 @@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
604 }
605
606 #ifdef CONFIG_PPC_BOOK3S_64
607 +void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
608 +{
609 + unsigned int instrs[3], *dest;
610 + long *start, *end;
611 + int i;
612 +
613 + start = PTRRELOC(&__start___stf_entry_barrier_fixup),
614 + end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
615 +
616 + instrs[0] = 0x60000000; /* nop */
617 + instrs[1] = 0x60000000; /* nop */
618 + instrs[2] = 0x60000000; /* nop */
619 +
620 + i = 0;
621 + if (types & STF_BARRIER_FALLBACK) {
622 + instrs[i++] = 0x7d4802a6; /* mflr r10 */
623 + instrs[i++] = 0x60000000; /* branch patched below */
624 + instrs[i++] = 0x7d4803a6; /* mtlr r10 */
625 + } else if (types & STF_BARRIER_EIEIO) {
626 + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
627 + } else if (types & STF_BARRIER_SYNC_ORI) {
628 + instrs[i++] = 0x7c0004ac; /* hwsync */
629 + instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
630 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
631 + }
632 +
633 + for (i = 0; start < end; start++, i++) {
634 + dest = (void *)start + *start;
635 +
636 + pr_devel("patching dest %lx\n", (unsigned long)dest);
637 +
638 + patch_instruction(dest, instrs[0]);
639 +
640 + if (types & STF_BARRIER_FALLBACK)
641 + patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
642 + BRANCH_SET_LINK);
643 + else
644 + patch_instruction(dest + 1, instrs[1]);
645 +
646 + patch_instruction(dest + 2, instrs[2]);
647 + }
648 +
649 + printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
650 + (types == STF_BARRIER_NONE) ? "no" :
651 + (types == STF_BARRIER_FALLBACK) ? "fallback" :
652 + (types == STF_BARRIER_EIEIO) ? "eieio" :
653 + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
654 + : "unknown");
655 +}
656 +
657 +void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
658 +{
659 + unsigned int instrs[6], *dest;
660 + long *start, *end;
661 + int i;
662 +
663 + start = PTRRELOC(&__start___stf_exit_barrier_fixup),
664 + end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
665 +
666 + instrs[0] = 0x60000000; /* nop */
667 + instrs[1] = 0x60000000; /* nop */
668 + instrs[2] = 0x60000000; /* nop */
669 + instrs[3] = 0x60000000; /* nop */
670 + instrs[4] = 0x60000000; /* nop */
671 + instrs[5] = 0x60000000; /* nop */
672 +
673 + i = 0;
674 + if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
675 + if (cpu_has_feature(CPU_FTR_HVMODE)) {
676 + instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
677 + instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
678 + } else {
679 + instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
680 + instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
681 + }
682 + instrs[i++] = 0x7c0004ac; /* hwsync */
683 + instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
684 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
685 + if (cpu_has_feature(CPU_FTR_HVMODE)) {
686 + instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
687 + } else {
688 + instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
689 + }
690 + } else if (types & STF_BARRIER_EIEIO) {
691 + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
692 + }
693 +
694 + for (i = 0; start < end; start++, i++) {
695 + dest = (void *)start + *start;
696 +
697 + pr_devel("patching dest %lx\n", (unsigned long)dest);
698 +
699 + patch_instruction(dest, instrs[0]);
700 + patch_instruction(dest + 1, instrs[1]);
701 + patch_instruction(dest + 2, instrs[2]);
702 + patch_instruction(dest + 3, instrs[3]);
703 + patch_instruction(dest + 4, instrs[4]);
704 + patch_instruction(dest + 5, instrs[5]);
705 + }
706 + printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
707 + (types == STF_BARRIER_NONE) ? "no" :
708 + (types == STF_BARRIER_FALLBACK) ? "fallback" :
709 + (types == STF_BARRIER_EIEIO) ? "eieio" :
710 + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
711 + : "unknown");
712 +}
713 +
714 +
715 +void do_stf_barrier_fixups(enum stf_barrier_type types)
716 +{
717 + do_stf_entry_barrier_fixups(types);
718 + do_stf_exit_barrier_fixups(types);
719 +}
720 +
721 void do_rfi_flush_fixups(enum l1d_flush_type types)
722 {
723 unsigned int instrs[3], *dest;
724 diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
725 index 092715b9674b..fc0412d59149 100644
726 --- a/arch/powerpc/platforms/powernv/setup.c
727 +++ b/arch/powerpc/platforms/powernv/setup.c
728 @@ -38,57 +38,92 @@
729 #include <asm/smp.h>
730 #include <asm/tm.h>
731 #include <asm/setup.h>
732 +#include <asm/security_features.h>
733
734 #include "powernv.h"
735
736 +
737 +static bool fw_feature_is(const char *state, const char *name,
738 + struct device_node *fw_features)
739 +{
740 + struct device_node *np;
741 + bool rc = false;
742 +
743 + np = of_get_child_by_name(fw_features, name);
744 + if (np) {
745 + rc = of_property_read_bool(np, state);
746 + of_node_put(np);
747 + }
748 +
749 + return rc;
750 +}
751 +
752 +static void init_fw_feat_flags(struct device_node *np)
753 +{
754 + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
755 + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
756 +
757 + if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
758 + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
759 +
760 + if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
761 + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
762 +
763 + if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
764 + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
765 +
766 + if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
767 + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
768 +
769 + if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
770 + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
771 +
772 + /*
773 + * The features below are enabled by default, so we instead look to see
774 + * if firmware has *disabled* them, and clear them if so.
775 + */
776 + if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
777 + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
778 +
779 + if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
780 + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
781 +
782 + if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
783 + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
784 +
785 + if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
786 + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
787 +}
788 +
789 static void pnv_setup_rfi_flush(void)
790 {
791 struct device_node *np, *fw_features;
792 enum l1d_flush_type type;
793 - int enable;
794 + bool enable;
795
796 /* Default to fallback in case fw-features are not available */
797 type = L1D_FLUSH_FALLBACK;
798 - enable = 1;
799
800 np = of_find_node_by_name(NULL, "ibm,opal");
801 fw_features = of_get_child_by_name(np, "fw-features");
802 of_node_put(np);
803
804 if (fw_features) {
805 - np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
806 - if (np && of_property_read_bool(np, "enabled"))
807 - type = L1D_FLUSH_MTTRIG;
808 + init_fw_feat_flags(fw_features);
809 + of_node_put(fw_features);
810
811 - of_node_put(np);
812 + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
813 + type = L1D_FLUSH_MTTRIG;
814
815 - np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
816 - if (np && of_property_read_bool(np, "enabled"))
817 + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
818 type = L1D_FLUSH_ORI;
819 -
820 - of_node_put(np);
821 -
822 - /* Enable unless firmware says NOT to */
823 - enable = 2;
824 - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
825 - if (np && of_property_read_bool(np, "disabled"))
826 - enable--;
827 -
828 - of_node_put(np);
829 -
830 - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
831 - if (np && of_property_read_bool(np, "disabled"))
832 - enable--;
833 -
834 - np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
835 - if (np && of_property_read_bool(np, "disabled"))
836 - enable = 0;
837 -
838 - of_node_put(np);
839 - of_node_put(fw_features);
840 }
841
842 - setup_rfi_flush(type, enable > 0);
843 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
844 + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
845 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
846 +
847 + setup_rfi_flush(type, enable);
848 }
849
850 static void __init pnv_setup_arch(void)
851 @@ -96,6 +131,7 @@ static void __init pnv_setup_arch(void)
852 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
853
854 pnv_setup_rfi_flush();
855 + setup_stf_barrier();
856
857 /* Initialize SMP */
858 pnv_smp_init();
859 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
860 index 1a527625acf7..21fed38bbbd5 100644
861 --- a/arch/powerpc/platforms/pseries/setup.c
862 +++ b/arch/powerpc/platforms/pseries/setup.c
863 @@ -68,6 +68,7 @@
864 #include <asm/plpar_wrappers.h>
865 #include <asm/kexec.h>
866 #include <asm/isa-bridge.h>
867 +#include <asm/security_features.h>
868
869 #include "pseries.h"
870
871 @@ -459,6 +460,40 @@ static void __init find_and_init_phbs(void)
872 of_pci_check_probe_only();
873 }
874
875 +static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
876 +{
877 + if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
878 + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
879 +
880 + if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
881 + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
882 +
883 + if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
884 + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
885 +
886 + if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
887 + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
888 +
889 + if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
890 + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
891 +
892 + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
893 + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
894 +
895 + /*
896 + * The features below are enabled by default, so we instead look to see
897 + * if firmware has *disabled* them, and clear them if so.
898 + */
899 + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
900 + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
901 +
902 + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
903 + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
904 +
905 + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
906 + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
907 +}
908 +
909 static void pseries_setup_rfi_flush(void)
910 {
911 struct h_cpu_char_result result;
912 @@ -466,29 +501,26 @@ static void pseries_setup_rfi_flush(void)
913 bool enable;
914 long rc;
915
916 - /* Enable by default */
917 - enable = true;
918 -
919 rc = plpar_get_cpu_characteristics(&result);
920 - if (rc == H_SUCCESS) {
921 - types = L1D_FLUSH_NONE;
922 + if (rc == H_SUCCESS)
923 + init_cpu_char_feature_flags(&result);
924 +
925 + /*
926 + * We're the guest so this doesn't apply to us, clear it to simplify
927 + * handling of it elsewhere.
928 + */
929 + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
930
931 - if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
932 - types |= L1D_FLUSH_MTTRIG;
933 - if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
934 - types |= L1D_FLUSH_ORI;
935 + types = L1D_FLUSH_FALLBACK;
936
937 - /* Use fallback if nothing set in hcall */
938 - if (types == L1D_FLUSH_NONE)
939 - types = L1D_FLUSH_FALLBACK;
940 + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
941 + types |= L1D_FLUSH_MTTRIG;
942
943 - if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
944 - (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
945 - enable = false;
946 - } else {
947 - /* Default to fallback if case hcall is not available */
948 - types = L1D_FLUSH_FALLBACK;
949 - }
950 + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
951 + types |= L1D_FLUSH_ORI;
952 +
953 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
954 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
955
956 setup_rfi_flush(types, enable);
957 }
958 @@ -667,6 +699,7 @@ static void __init pSeries_setup_arch(void)
959 fwnmi_init();
960
961 pseries_setup_rfi_flush();
962 + setup_stf_barrier();
963
964 /* By default, only probe PCI (can be overridden by rtas_pci) */
965 pci_add_flags(PCI_PROBE_ONLY);
966 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
967 index 6e91e0d422ea..c94dd09a82d1 100644
968 --- a/arch/s390/Kconfig
969 +++ b/arch/s390/Kconfig
970 @@ -120,6 +120,7 @@ config S390
971 select GENERIC_CLOCKEVENTS
972 select GENERIC_CPU_AUTOPROBE
973 select GENERIC_CPU_DEVICES if !SMP
974 + select GENERIC_CPU_VULNERABILITIES
975 select GENERIC_FIND_FIRST_BIT
976 select GENERIC_SMP_IDLE_THREAD
977 select GENERIC_TIME_VSYSCALL
978 @@ -576,7 +577,7 @@ choice
979 config EXPOLINE_OFF
980 bool "spectre_v2=off"
981
982 -config EXPOLINE_MEDIUM
983 +config EXPOLINE_AUTO
984 bool "spectre_v2=auto"
985
986 config EXPOLINE_FULL
987 diff --git a/arch/s390/Makefile b/arch/s390/Makefile
988 index 2ced3239cb84..e1bc722fba41 100644
989 --- a/arch/s390/Makefile
990 +++ b/arch/s390/Makefile
991 @@ -84,7 +84,7 @@ ifdef CONFIG_EXPOLINE
992 CC_FLAGS_EXPOLINE += -mfunction-return=thunk
993 CC_FLAGS_EXPOLINE += -mindirect-branch-table
994 export CC_FLAGS_EXPOLINE
995 - cflags-y += $(CC_FLAGS_EXPOLINE)
996 + cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
997 endif
998 endif
999
1000 diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
1001 index e8077f0971f8..2bf01ba44107 100644
1002 --- a/arch/s390/crypto/crc32be-vx.S
1003 +++ b/arch/s390/crypto/crc32be-vx.S
1004 @@ -13,6 +13,7 @@
1005 */
1006
1007 #include <linux/linkage.h>
1008 +#include <asm/nospec-insn.h>
1009 #include <asm/vx-insn.h>
1010
1011 /* Vector register range containing CRC-32 constants */
1012 @@ -67,6 +68,8 @@
1013
1014 .previous
1015
1016 + GEN_BR_THUNK %r14
1017 +
1018 .text
1019 /*
1020 * The CRC-32 function(s) use these calling conventions:
1021 @@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
1022
1023 .Ldone:
1024 VLGVF %r2,%v2,3
1025 - br %r14
1026 + BR_EX %r14
1027
1028 .previous
1029 diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
1030 index d8c67a58c0c5..7d6f568bd3ad 100644
1031 --- a/arch/s390/crypto/crc32le-vx.S
1032 +++ b/arch/s390/crypto/crc32le-vx.S
1033 @@ -14,6 +14,7 @@
1034 */
1035
1036 #include <linux/linkage.h>
1037 +#include <asm/nospec-insn.h>
1038 #include <asm/vx-insn.h>
1039
1040 /* Vector register range containing CRC-32 constants */
1041 @@ -76,6 +77,7 @@
1042
1043 .previous
1044
1045 + GEN_BR_THUNK %r14
1046
1047 .text
1048
1049 @@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
1050
1051 .Ldone:
1052 VLGVF %r2,%v2,2
1053 - br %r14
1054 + BR_EX %r14
1055
1056 .previous
1057 diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
1058 new file mode 100644
1059 index 000000000000..955d620db23e
1060 --- /dev/null
1061 +++ b/arch/s390/include/asm/alternative-asm.h
1062 @@ -0,0 +1,108 @@
1063 +/* SPDX-License-Identifier: GPL-2.0 */
1064 +#ifndef _ASM_S390_ALTERNATIVE_ASM_H
1065 +#define _ASM_S390_ALTERNATIVE_ASM_H
1066 +
1067 +#ifdef __ASSEMBLY__
1068 +
1069 +/*
1070 + * Check the length of an instruction sequence. The length may not be larger
1071 + * than 254 bytes and it has to be divisible by 2.
1072 + */
1073 +.macro alt_len_check start,end
1074 + .if ( \end - \start ) > 254
1075 + .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
1076 + .endif
1077 + .if ( \end - \start ) % 2
1078 + .error "cpu alternatives instructions length is odd\n"
1079 + .endif
1080 +.endm
1081 +
1082 +/*
1083 + * Issue one struct alt_instr descriptor entry (need to put it into
1084 + * the section .altinstructions, see below). This entry contains
1085 + * enough information for the alternatives patching code to patch an
1086 + * instruction. See apply_alternatives().
1087 + */
1088 +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
1089 + .long \orig_start - .
1090 + .long \alt_start - .
1091 + .word \feature
1092 + .byte \orig_end - \orig_start
1093 + .byte \alt_end - \alt_start
1094 +.endm
1095 +
1096 +/*
1097 + * Fill up @bytes with nops. The macro emits 6-byte nop instructions
1098 + * for the bulk of the area, possibly followed by a 4-byte and/or
1099 + * a 2-byte nop if the size of the area is not divisible by 6.
1100 + */
1101 +.macro alt_pad_fill bytes
1102 + .fill ( \bytes ) / 6, 6, 0xc0040000
1103 + .fill ( \bytes ) % 6 / 4, 4, 0x47000000
1104 + .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
1105 +.endm
1106 +
1107 +/*
1108 + * Fill up @bytes with nops. If the number of bytes is larger
1109 + * than 6, emit a jg instruction to branch over all nops, then
1110 + * fill an area of size (@bytes - 6) with nop instructions.
1111 + */
1112 +.macro alt_pad bytes
1113 + .if ( \bytes > 0 )
1114 + .if ( \bytes > 6 )
1115 + jg . + \bytes
1116 + alt_pad_fill \bytes - 6
1117 + .else
1118 + alt_pad_fill \bytes
1119 + .endif
1120 + .endif
1121 +.endm
1122 +
1123 +/*
1124 + * Define an alternative between two instructions. If @feature is
1125 + * present, early code in apply_alternatives() replaces @oldinstr with
1126 + * @newinstr. ".skip" directive takes care of proper instruction padding
1127 + * in case @newinstr is longer than @oldinstr.
1128 + */
1129 +.macro ALTERNATIVE oldinstr, newinstr, feature
1130 + .pushsection .altinstr_replacement,"ax"
1131 +770: \newinstr
1132 +771: .popsection
1133 +772: \oldinstr
1134 +773: alt_len_check 770b, 771b
1135 + alt_len_check 772b, 773b
1136 + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
1137 +774: .pushsection .altinstructions,"a"
1138 + alt_entry 772b, 774b, 770b, 771b, \feature
1139 + .popsection
1140 +.endm
1141 +
1142 +/*
1143 + * Define an alternative between two instructions. If @feature is
1144 + * present, early code in apply_alternatives() replaces @oldinstr with
1145 + * @newinstr. ".skip" directive takes care of proper instruction padding
1146 + * in case @newinstr is longer than @oldinstr.
1147 + */
1148 +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
1149 + .pushsection .altinstr_replacement,"ax"
1150 +770: \newinstr1
1151 +771: \newinstr2
1152 +772: .popsection
1153 +773: \oldinstr
1154 +774: alt_len_check 770b, 771b
1155 + alt_len_check 771b, 772b
1156 + alt_len_check 773b, 774b
1157 + .if ( 771b - 770b > 772b - 771b )
1158 + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
1159 + .else
1160 + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
1161 + .endif
1162 +775: .pushsection .altinstructions,"a"
1163 + alt_entry 773b, 775b, 770b, 771b,\feature1
1164 + alt_entry 773b, 775b, 771b, 772b,\feature2
1165 + .popsection
1166 +.endm
1167 +
1168 +#endif /* __ASSEMBLY__ */
1169 +
1170 +#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
1171 diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
1172 index 7df48e5cf36f..b4bd8c41e9d3 100644
1173 --- a/arch/s390/include/asm/nospec-branch.h
1174 +++ b/arch/s390/include/asm/nospec-branch.h
1175 @@ -6,12 +6,11 @@
1176
1177 #include <linux/types.h>
1178
1179 -extern int nospec_call_disable;
1180 -extern int nospec_return_disable;
1181 +extern int nospec_disable;
1182
1183 void nospec_init_branches(void);
1184 -void nospec_call_revert(s32 *start, s32 *end);
1185 -void nospec_return_revert(s32 *start, s32 *end);
1186 +void nospec_auto_detect(void);
1187 +void nospec_revert(s32 *start, s32 *end);
1188
1189 #endif /* __ASSEMBLY__ */
1190
1191 diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
1192 new file mode 100644
1193 index 000000000000..a01f81186e86
1194 --- /dev/null
1195 +++ b/arch/s390/include/asm/nospec-insn.h
1196 @@ -0,0 +1,196 @@
1197 +/* SPDX-License-Identifier: GPL-2.0 */
1198 +#ifndef _ASM_S390_NOSPEC_ASM_H
1199 +#define _ASM_S390_NOSPEC_ASM_H
1200 +
1201 +#include <asm/alternative-asm.h>
1202 +#include <asm/asm-offsets.h>
1203 +#include <asm/dwarf.h>
1204 +
1205 +#ifdef __ASSEMBLY__
1206 +
1207 +#ifdef CONFIG_EXPOLINE
1208 +
1209 +_LC_BR_R1 = __LC_BR_R1
1210 +
1211 +/*
1212 + * The expoline macros are used to create thunks in the same format
1213 + * as gcc generates them. The 'comdat' section flag makes sure that
1214 + * the various thunks are merged into a single copy.
1215 + */
1216 + .macro __THUNK_PROLOG_NAME name
1217 + .pushsection .text.\name,"axG",@progbits,\name,comdat
1218 + .globl \name
1219 + .hidden \name
1220 + .type \name,@function
1221 +\name:
1222 + CFI_STARTPROC
1223 + .endm
1224 +
1225 + .macro __THUNK_EPILOG
1226 + CFI_ENDPROC
1227 + .popsection
1228 + .endm
1229 +
1230 + .macro __THUNK_PROLOG_BR r1,r2
1231 + __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
1232 + .endm
1233 +
1234 + .macro __THUNK_PROLOG_BC d0,r1,r2
1235 + __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
1236 + .endm
1237 +
1238 + .macro __THUNK_BR r1,r2
1239 + jg __s390x_indirect_jump_r\r2\()use_r\r1
1240 + .endm
1241 +
1242 + .macro __THUNK_BC d0,r1,r2
1243 + jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
1244 + .endm
1245 +
1246 + .macro __THUNK_BRASL r1,r2,r3
1247 + brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
1248 + .endm
1249 +
1250 + .macro __DECODE_RR expand,reg,ruse
1251 + .set __decode_fail,1
1252 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1253 + .ifc \reg,%r\r1
1254 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1255 + .ifc \ruse,%r\r2
1256 + \expand \r1,\r2
1257 + .set __decode_fail,0
1258 + .endif
1259 + .endr
1260 + .endif
1261 + .endr
1262 + .if __decode_fail == 1
1263 + .error "__DECODE_RR failed"
1264 + .endif
1265 + .endm
1266 +
1267 + .macro __DECODE_RRR expand,rsave,rtarget,ruse
1268 + .set __decode_fail,1
1269 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1270 + .ifc \rsave,%r\r1
1271 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1272 + .ifc \rtarget,%r\r2
1273 + .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1274 + .ifc \ruse,%r\r3
1275 + \expand \r1,\r2,\r3
1276 + .set __decode_fail,0
1277 + .endif
1278 + .endr
1279 + .endif
1280 + .endr
1281 + .endif
1282 + .endr
1283 + .if __decode_fail == 1
1284 + .error "__DECODE_RRR failed"
1285 + .endif
1286 + .endm
1287 +
1288 + .macro __DECODE_DRR expand,disp,reg,ruse
1289 + .set __decode_fail,1
1290 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1291 + .ifc \reg,%r\r1
1292 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1293 + .ifc \ruse,%r\r2
1294 + \expand \disp,\r1,\r2
1295 + .set __decode_fail,0
1296 + .endif
1297 + .endr
1298 + .endif
1299 + .endr
1300 + .if __decode_fail == 1
1301 + .error "__DECODE_DRR failed"
1302 + .endif
1303 + .endm
1304 +
1305 + .macro __THUNK_EX_BR reg,ruse
1306 + # Be very careful when adding instructions to this macro!
1307 + # The ALTERNATIVE replacement code has a .+10 which targets
1308 + # the "br \reg" after the code has been patched.
1309 +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1310 + exrl 0,555f
1311 + j .
1312 +#else
1313 + .ifc \reg,%r1
1314 + ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
1315 + j .
1316 + .else
1317 + larl \ruse,555f
1318 + ex 0,0(\ruse)
1319 + j .
1320 + .endif
1321 +#endif
1322 +555: br \reg
1323 + .endm
1324 +
1325 + .macro __THUNK_EX_BC disp,reg,ruse
1326 +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1327 + exrl 0,556f
1328 + j .
1329 +#else
1330 + larl \ruse,556f
1331 + ex 0,0(\ruse)
1332 + j .
1333 +#endif
1334 +556: b \disp(\reg)
1335 + .endm
1336 +
1337 + .macro GEN_BR_THUNK reg,ruse=%r1
1338 + __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
1339 + __THUNK_EX_BR \reg,\ruse
1340 + __THUNK_EPILOG
1341 + .endm
1342 +
1343 + .macro GEN_B_THUNK disp,reg,ruse=%r1
1344 + __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
1345 + __THUNK_EX_BC \disp,\reg,\ruse
1346 + __THUNK_EPILOG
1347 + .endm
1348 +
1349 + .macro BR_EX reg,ruse=%r1
1350 +557: __DECODE_RR __THUNK_BR,\reg,\ruse
1351 + .pushsection .s390_indirect_branches,"a",@progbits
1352 + .long 557b-.
1353 + .popsection
1354 + .endm
1355 +
1356 + .macro B_EX disp,reg,ruse=%r1
1357 +558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
1358 + .pushsection .s390_indirect_branches,"a",@progbits
1359 + .long 558b-.
1360 + .popsection
1361 + .endm
1362 +
1363 + .macro BASR_EX rsave,rtarget,ruse=%r1
1364 +559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
1365 + .pushsection .s390_indirect_branches,"a",@progbits
1366 + .long 559b-.
1367 + .popsection
1368 + .endm
1369 +
1370 +#else
1371 + .macro GEN_BR_THUNK reg,ruse=%r1
1372 + .endm
1373 +
1374 + .macro GEN_B_THUNK disp,reg,ruse=%r1
1375 + .endm
1376 +
1377 + .macro BR_EX reg,ruse=%r1
1378 + br \reg
1379 + .endm
1380 +
1381 + .macro B_EX disp,reg,ruse=%r1
1382 + b \disp(\reg)
1383 + .endm
1384 +
1385 + .macro BASR_EX rsave,rtarget,ruse=%r1
1386 + basr \rsave,\rtarget
1387 + .endm
1388 +#endif
1389 +
1390 +#endif /* __ASSEMBLY__ */
1391 +
1392 +#endif /* _ASM_S390_NOSPEC_ASM_H */
1393 diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
1394 index 7f27e3da9709..a02bc90fe5f3 100644
1395 --- a/arch/s390/kernel/Makefile
1396 +++ b/arch/s390/kernel/Makefile
1397 @@ -61,11 +61,12 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
1398 obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
1399 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
1400 obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
1401 +obj-y += nospec-branch.o
1402
1403 extra-y += head.o head64.o vmlinux.lds
1404
1405 -obj-$(CONFIG_EXPOLINE) += nospec-branch.o
1406 -CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE)
1407 +obj-$(CONFIG_SYSFS) += nospec-sysfs.o
1408 +CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
1409
1410 obj-$(CONFIG_MODULES) += module.o
1411 obj-$(CONFIG_SMP) += smp.o
1412 diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
1413 index 22476135f738..8e1f2aee85ef 100644
1414 --- a/arch/s390/kernel/alternative.c
1415 +++ b/arch/s390/kernel/alternative.c
1416 @@ -2,6 +2,7 @@
1417 #include <linux/module.h>
1418 #include <asm/alternative.h>
1419 #include <asm/facility.h>
1420 +#include <asm/nospec-branch.h>
1421
1422 #define MAX_PATCH_LEN (255 - 1)
1423
1424 @@ -15,29 +16,6 @@ static int __init disable_alternative_instructions(char *str)
1425
1426 early_param("noaltinstr", disable_alternative_instructions);
1427
1428 -static int __init nobp_setup_early(char *str)
1429 -{
1430 - bool enabled;
1431 - int rc;
1432 -
1433 - rc = kstrtobool(str, &enabled);
1434 - if (rc)
1435 - return rc;
1436 - if (enabled && test_facility(82))
1437 - __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1438 - else
1439 - __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1440 - return 0;
1441 -}
1442 -early_param("nobp", nobp_setup_early);
1443 -
1444 -static int __init nospec_setup_early(char *str)
1445 -{
1446 - __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1447 - return 0;
1448 -}
1449 -early_param("nospec", nospec_setup_early);
1450 -
1451 struct brcl_insn {
1452 u16 opc;
1453 s32 disp;
1454 diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
1455 index 587b195b588d..3fd0b4535a71 100644
1456 --- a/arch/s390/kernel/asm-offsets.c
1457 +++ b/arch/s390/kernel/asm-offsets.c
1458 @@ -179,6 +179,7 @@ int main(void)
1459 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
1460 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
1461 OFFSET(__LC_GMAP, lowcore, gmap);
1462 + OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
1463 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
1464 OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
1465 /* hardware defined lowcore locations 0x1000 - 0x18ff */
1466 diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
1467 index f6c56009e822..b65874b0b412 100644
1468 --- a/arch/s390/kernel/base.S
1469 +++ b/arch/s390/kernel/base.S
1470 @@ -9,18 +9,22 @@
1471
1472 #include <linux/linkage.h>
1473 #include <asm/asm-offsets.h>
1474 +#include <asm/nospec-insn.h>
1475 #include <asm/ptrace.h>
1476 #include <asm/sigp.h>
1477
1478 + GEN_BR_THUNK %r9
1479 + GEN_BR_THUNK %r14
1480 +
1481 ENTRY(s390_base_mcck_handler)
1482 basr %r13,0
1483 0: lg %r15,__LC_PANIC_STACK # load panic stack
1484 aghi %r15,-STACK_FRAME_OVERHEAD
1485 larl %r1,s390_base_mcck_handler_fn
1486 - lg %r1,0(%r1)
1487 - ltgr %r1,%r1
1488 + lg %r9,0(%r1)
1489 + ltgr %r9,%r9
1490 jz 1f
1491 - basr %r14,%r1
1492 + BASR_EX %r14,%r9
1493 1: la %r1,4095
1494 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
1495 lpswe __LC_MCK_OLD_PSW
1496 @@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
1497 basr %r13,0
1498 0: aghi %r15,-STACK_FRAME_OVERHEAD
1499 larl %r1,s390_base_ext_handler_fn
1500 - lg %r1,0(%r1)
1501 - ltgr %r1,%r1
1502 + lg %r9,0(%r1)
1503 + ltgr %r9,%r9
1504 jz 1f
1505 - basr %r14,%r1
1506 + BASR_EX %r14,%r9
1507 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
1508 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
1509 lpswe __LC_EXT_OLD_PSW
1510 @@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
1511 basr %r13,0
1512 0: aghi %r15,-STACK_FRAME_OVERHEAD
1513 larl %r1,s390_base_pgm_handler_fn
1514 - lg %r1,0(%r1)
1515 - ltgr %r1,%r1
1516 + lg %r9,0(%r1)
1517 + ltgr %r9,%r9
1518 jz 1f
1519 - basr %r14,%r1
1520 + BASR_EX %r14,%r9
1521 lmg %r0,%r15,__LC_SAVE_AREA_SYNC
1522 lpswe __LC_PGM_OLD_PSW
1523 1: lpswe disabled_wait_psw-0b(%r13)
1524 @@ -117,7 +121,7 @@ ENTRY(diag308_reset)
1525 larl %r4,.Lcontinue_psw # Restore PSW flags
1526 lpswe 0(%r4)
1527 .Lcontinue:
1528 - br %r14
1529 + BR_EX %r14
1530 .align 16
1531 .Lrestart_psw:
1532 .long 0x00080000,0x80000000 + .Lrestart_part2
1533 diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1534 index a5621ea6d123..d3e1a510c9c1 100644
1535 --- a/arch/s390/kernel/entry.S
1536 +++ b/arch/s390/kernel/entry.S
1537 @@ -27,6 +27,7 @@
1538 #include <asm/setup.h>
1539 #include <asm/nmi.h>
1540 #include <asm/export.h>
1541 +#include <asm/nospec-insn.h>
1542
1543 __PT_R0 = __PT_GPRS
1544 __PT_R1 = __PT_GPRS + 8
1545 @@ -223,67 +224,9 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
1546 .popsection
1547 .endm
1548
1549 -#ifdef CONFIG_EXPOLINE
1550 -
1551 - .macro GEN_BR_THUNK name,reg,tmp
1552 - .section .text.\name,"axG",@progbits,\name,comdat
1553 - .globl \name
1554 - .hidden \name
1555 - .type \name,@function
1556 -\name:
1557 - CFI_STARTPROC
1558 -#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
1559 - exrl 0,0f
1560 -#else
1561 - larl \tmp,0f
1562 - ex 0,0(\tmp)
1563 -#endif
1564 - j .
1565 -0: br \reg
1566 - CFI_ENDPROC
1567 - .endm
1568 -
1569 - GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
1570 - GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
1571 - GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
1572 -
1573 - .macro BASR_R14_R9
1574 -0: brasl %r14,__s390x_indirect_jump_r1use_r9
1575 - .pushsection .s390_indirect_branches,"a",@progbits
1576 - .long 0b-.
1577 - .popsection
1578 - .endm
1579 -
1580 - .macro BR_R1USE_R14
1581 -0: jg __s390x_indirect_jump_r1use_r14
1582 - .pushsection .s390_indirect_branches,"a",@progbits
1583 - .long 0b-.
1584 - .popsection
1585 - .endm
1586 -
1587 - .macro BR_R11USE_R14
1588 -0: jg __s390x_indirect_jump_r11use_r14
1589 - .pushsection .s390_indirect_branches,"a",@progbits
1590 - .long 0b-.
1591 - .popsection
1592 - .endm
1593 -
1594 -#else /* CONFIG_EXPOLINE */
1595 -
1596 - .macro BASR_R14_R9
1597 - basr %r14,%r9
1598 - .endm
1599 -
1600 - .macro BR_R1USE_R14
1601 - br %r14
1602 - .endm
1603 -
1604 - .macro BR_R11USE_R14
1605 - br %r14
1606 - .endm
1607 -
1608 -#endif /* CONFIG_EXPOLINE */
1609 -
1610 + GEN_BR_THUNK %r9
1611 + GEN_BR_THUNK %r14
1612 + GEN_BR_THUNK %r14,%r11
1613
1614 .section .kprobes.text, "ax"
1615 .Ldummy:
1616 @@ -300,7 +243,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
1617 ENTRY(__bpon)
1618 .globl __bpon
1619 BPON
1620 - BR_R1USE_R14
1621 + BR_EX %r14
1622
1623 /*
1624 * Scheduler resume function, called by switch_to
1625 @@ -326,7 +269,7 @@ ENTRY(__switch_to)
1626 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
1627 jz 0f
1628 .insn s,0xb2800000,__LC_LPP # set program parameter
1629 -0: BR_R1USE_R14
1630 +0: BR_EX %r14
1631
1632 .L__critical_start:
1633
1634 @@ -393,7 +336,7 @@ sie_exit:
1635 xgr %r5,%r5
1636 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1637 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
1638 - BR_R1USE_R14
1639 + BR_EX %r14
1640 .Lsie_fault:
1641 lghi %r14,-EFAULT
1642 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
1643 @@ -452,7 +395,7 @@ ENTRY(system_call)
1644 lgf %r9,0(%r8,%r10) # get system call add.
1645 TSTMSK __TI_flags(%r12),_TIF_TRACE
1646 jnz .Lsysc_tracesys
1647 - BASR_R14_R9 # call sys_xxxx
1648 + BASR_EX %r14,%r9 # call sys_xxxx
1649 stg %r2,__PT_R2(%r11) # store return value
1650
1651 .Lsysc_return:
1652 @@ -637,7 +580,7 @@ ENTRY(system_call)
1653 lmg %r3,%r7,__PT_R3(%r11)
1654 stg %r7,STACK_FRAME_OVERHEAD(%r15)
1655 lg %r2,__PT_ORIG_GPR2(%r11)
1656 - BASR_R14_R9 # call sys_xxx
1657 + BASR_EX %r14,%r9 # call sys_xxx
1658 stg %r2,__PT_R2(%r11) # store return value
1659 .Lsysc_tracenogo:
1660 TSTMSK __TI_flags(%r12),_TIF_TRACE
1661 @@ -661,7 +604,7 @@ ENTRY(ret_from_fork)
1662 lmg %r9,%r10,__PT_R9(%r11) # load gprs
1663 ENTRY(kernel_thread_starter)
1664 la %r2,0(%r10)
1665 - BASR_R14_R9
1666 + BASR_EX %r14,%r9
1667 j .Lsysc_tracenogo
1668
1669 /*
1670 @@ -743,7 +686,7 @@ ENTRY(pgm_check_handler)
1671 je .Lpgm_return
1672 lgf %r9,0(%r10,%r1) # load address of handler routine
1673 lgr %r2,%r11 # pass pointer to pt_regs
1674 - BASR_R14_R9 # branch to interrupt-handler
1675 + BASR_EX %r14,%r9 # branch to interrupt-handler
1676 .Lpgm_return:
1677 LOCKDEP_SYS_EXIT
1678 tm __PT_PSW+1(%r11),0x01 # returning to user ?
1679 @@ -1061,7 +1004,7 @@ ENTRY(psw_idle)
1680 stpt __TIMER_IDLE_ENTER(%r2)
1681 .Lpsw_idle_lpsw:
1682 lpswe __SF_EMPTY(%r15)
1683 - BR_R1USE_R14
1684 + BR_EX %r14
1685 .Lpsw_idle_end:
1686
1687 /*
1688 @@ -1103,7 +1046,7 @@ ENTRY(save_fpu_regs)
1689 .Lsave_fpu_regs_done:
1690 oi __LC_CPU_FLAGS+7,_CIF_FPU
1691 .Lsave_fpu_regs_exit:
1692 - BR_R1USE_R14
1693 + BR_EX %r14
1694 .Lsave_fpu_regs_end:
1695 EXPORT_SYMBOL(save_fpu_regs)
1696
1697 @@ -1149,7 +1092,7 @@ load_fpu_regs:
1698 .Lload_fpu_regs_done:
1699 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1700 .Lload_fpu_regs_exit:
1701 - BR_R1USE_R14
1702 + BR_EX %r14
1703 .Lload_fpu_regs_end:
1704
1705 .L__critical_end:
1706 @@ -1366,7 +1309,7 @@ cleanup_critical:
1707 jl 0f
1708 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1709 jl .Lcleanup_load_fpu_regs
1710 -0: BR_R11USE_R14
1711 +0: BR_EX %r14
1712
1713 .align 8
1714 .Lcleanup_table:
1715 @@ -1402,7 +1345,7 @@ cleanup_critical:
1716 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1717 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1718 larl %r9,sie_exit # skip forward to sie_exit
1719 - BR_R11USE_R14
1720 + BR_EX %r14
1721 #endif
1722
1723 .Lcleanup_system_call:
1724 @@ -1456,7 +1399,7 @@ cleanup_critical:
1725 stg %r15,56(%r11) # r15 stack pointer
1726 # set new psw address and exit
1727 larl %r9,.Lsysc_do_svc
1728 - BR_R11USE_R14
1729 + BR_EX %r14,%r11
1730 .Lcleanup_system_call_insn:
1731 .quad system_call
1732 .quad .Lsysc_stmg
1733 @@ -1468,7 +1411,7 @@ cleanup_critical:
1734
1735 .Lcleanup_sysc_tif:
1736 larl %r9,.Lsysc_tif
1737 - BR_R11USE_R14
1738 + BR_EX %r14,%r11
1739
1740 .Lcleanup_sysc_restore:
1741 # check if stpt has been executed
1742 @@ -1485,14 +1428,14 @@ cleanup_critical:
1743 mvc 0(64,%r11),__PT_R8(%r9)
1744 lmg %r0,%r7,__PT_R0(%r9)
1745 1: lmg %r8,%r9,__LC_RETURN_PSW
1746 - BR_R11USE_R14
1747 + BR_EX %r14,%r11
1748 .Lcleanup_sysc_restore_insn:
1749 .quad .Lsysc_exit_timer
1750 .quad .Lsysc_done - 4
1751
1752 .Lcleanup_io_tif:
1753 larl %r9,.Lio_tif
1754 - BR_R11USE_R14
1755 + BR_EX %r14,%r11
1756
1757 .Lcleanup_io_restore:
1758 # check if stpt has been executed
1759 @@ -1506,7 +1449,7 @@ cleanup_critical:
1760 mvc 0(64,%r11),__PT_R8(%r9)
1761 lmg %r0,%r7,__PT_R0(%r9)
1762 1: lmg %r8,%r9,__LC_RETURN_PSW
1763 - BR_R11USE_R14
1764 + BR_EX %r14,%r11
1765 .Lcleanup_io_restore_insn:
1766 .quad .Lio_exit_timer
1767 .quad .Lio_done - 4
1768 @@ -1559,17 +1502,17 @@ cleanup_critical:
1769 # prepare return psw
1770 nihh %r8,0xfcfd # clear irq & wait state bits
1771 lg %r9,48(%r11) # return from psw_idle
1772 - BR_R11USE_R14
1773 + BR_EX %r14,%r11
1774 .Lcleanup_idle_insn:
1775 .quad .Lpsw_idle_lpsw
1776
1777 .Lcleanup_save_fpu_regs:
1778 larl %r9,save_fpu_regs
1779 - BR_R11USE_R14
1780 + BR_EX %r14,%r11
1781
1782 .Lcleanup_load_fpu_regs:
1783 larl %r9,load_fpu_regs
1784 - BR_R11USE_R14
1785 + BR_EX %r14,%r11
1786
1787 /*
1788 * Integer constants
1789 diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
1790 index 82df7d80fab2..27110f3294ed 100644
1791 --- a/arch/s390/kernel/mcount.S
1792 +++ b/arch/s390/kernel/mcount.S
1793 @@ -9,13 +9,17 @@
1794 #include <linux/linkage.h>
1795 #include <asm/asm-offsets.h>
1796 #include <asm/ftrace.h>
1797 +#include <asm/nospec-insn.h>
1798 #include <asm/ptrace.h>
1799 #include <asm/export.h>
1800
1801 + GEN_BR_THUNK %r1
1802 + GEN_BR_THUNK %r14
1803 +
1804 .section .kprobes.text, "ax"
1805
1806 ENTRY(ftrace_stub)
1807 - br %r14
1808 + BR_EX %r14
1809
1810 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
1811 #define STACK_PTREGS (STACK_FRAME_OVERHEAD)
1812 @@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
1813 #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
1814
1815 ENTRY(_mcount)
1816 - br %r14
1817 + BR_EX %r14
1818
1819 EXPORT_SYMBOL(_mcount)
1820
1821 @@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
1822 #endif
1823 lgr %r3,%r14
1824 la %r5,STACK_PTREGS(%r15)
1825 - basr %r14,%r1
1826 + BASR_EX %r14,%r1
1827 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1828 # The j instruction gets runtime patched to a nop instruction.
1829 # See ftrace_enable_ftrace_graph_caller.
1830 @@ -68,7 +72,7 @@ ftrace_graph_caller_end:
1831 #endif
1832 lg %r1,(STACK_PTREGS_PSW+8)(%r15)
1833 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
1834 - br %r1
1835 + BR_EX %r1
1836
1837 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1838
1839 @@ -81,6 +85,6 @@ ENTRY(return_to_handler)
1840 aghi %r15,STACK_FRAME_OVERHEAD
1841 lgr %r14,%r2
1842 lmg %r2,%r5,32(%r15)
1843 - br %r14
1844 + BR_EX %r14
1845
1846 #endif
1847 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
1848 index 1fc6d1ff92d3..0dc8ac8548ee 100644
1849 --- a/arch/s390/kernel/module.c
1850 +++ b/arch/s390/kernel/module.c
1851 @@ -159,7 +159,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
1852 me->core_layout.size += me->arch.got_size;
1853 me->arch.plt_offset = me->core_layout.size;
1854 if (me->arch.plt_size) {
1855 - if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable)
1856 + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
1857 me->arch.plt_size += PLT_ENTRY_SIZE;
1858 me->core_layout.size += me->arch.plt_size;
1859 }
1860 @@ -318,8 +318,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
1861 info->plt_offset;
1862 ip[0] = 0x0d10e310; /* basr 1,0 */
1863 ip[1] = 0x100a0004; /* lg 1,10(1) */
1864 - if (IS_ENABLED(CONFIG_EXPOLINE) &&
1865 - !nospec_call_disable) {
1866 + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
1867 unsigned int *ij;
1868 ij = me->core_layout.base +
1869 me->arch.plt_offset +
1870 @@ -440,7 +439,7 @@ int module_finalize(const Elf_Ehdr *hdr,
1871 void *aseg;
1872
1873 if (IS_ENABLED(CONFIG_EXPOLINE) &&
1874 - !nospec_call_disable && me->arch.plt_size) {
1875 + !nospec_disable && me->arch.plt_size) {
1876 unsigned int *ij;
1877
1878 ij = me->core_layout.base + me->arch.plt_offset +
1879 @@ -466,12 +465,12 @@ int module_finalize(const Elf_Ehdr *hdr,
1880 apply_alternatives(aseg, aseg + s->sh_size);
1881
1882 if (IS_ENABLED(CONFIG_EXPOLINE) &&
1883 - (!strcmp(".nospec_call_table", secname)))
1884 - nospec_call_revert(aseg, aseg + s->sh_size);
1885 + (!strncmp(".s390_indirect", secname, 14)))
1886 + nospec_revert(aseg, aseg + s->sh_size);
1887
1888 if (IS_ENABLED(CONFIG_EXPOLINE) &&
1889 - (!strcmp(".nospec_return_table", secname)))
1890 - nospec_return_revert(aseg, aseg + s->sh_size);
1891 + (!strncmp(".s390_return", secname, 12)))
1892 + nospec_revert(aseg, aseg + s->sh_size);
1893 }
1894
1895 jump_label_apply_nops(me);
1896 diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
1897 index 9aff72d3abda..8ad6a7128b3a 100644
1898 --- a/arch/s390/kernel/nospec-branch.c
1899 +++ b/arch/s390/kernel/nospec-branch.c
1900 @@ -1,32 +1,86 @@
1901 // SPDX-License-Identifier: GPL-2.0
1902 #include <linux/module.h>
1903 +#include <linux/device.h>
1904 #include <asm/nospec-branch.h>
1905
1906 -int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1907 -int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
1908 +static int __init nobp_setup_early(char *str)
1909 +{
1910 + bool enabled;
1911 + int rc;
1912 +
1913 + rc = kstrtobool(str, &enabled);
1914 + if (rc)
1915 + return rc;
1916 + if (enabled && test_facility(82)) {
1917 + /*
1918 + * The user explicitely requested nobp=1, enable it and
1919 + * disable the expoline support.
1920 + */
1921 + __set_facility(82, S390_lowcore.alt_stfle_fac_list);
1922 + if (IS_ENABLED(CONFIG_EXPOLINE))
1923 + nospec_disable = 1;
1924 + } else {
1925 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1926 + }
1927 + return 0;
1928 +}
1929 +early_param("nobp", nobp_setup_early);
1930 +
1931 +static int __init nospec_setup_early(char *str)
1932 +{
1933 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1934 + return 0;
1935 +}
1936 +early_param("nospec", nospec_setup_early);
1937 +
1938 +static int __init nospec_report(void)
1939 +{
1940 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
1941 + pr_info("Spectre V2 mitigation: execute trampolines.\n");
1942 + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
1943 + pr_info("Spectre V2 mitigation: limited branch prediction.\n");
1944 + return 0;
1945 +}
1946 +arch_initcall(nospec_report);
1947 +
1948 +#ifdef CONFIG_EXPOLINE
1949 +
1950 +int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
1951
1952 static int __init nospectre_v2_setup_early(char *str)
1953 {
1954 - nospec_call_disable = 1;
1955 - nospec_return_disable = 1;
1956 + nospec_disable = 1;
1957 return 0;
1958 }
1959 early_param("nospectre_v2", nospectre_v2_setup_early);
1960
1961 +void __init nospec_auto_detect(void)
1962 +{
1963 + if (IS_ENABLED(CC_USING_EXPOLINE)) {
1964 + /*
1965 + * The kernel has been compiled with expolines.
1966 + * Keep expolines enabled and disable nobp.
1967 + */
1968 + nospec_disable = 0;
1969 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1970 + }
1971 + /*
1972 + * If the kernel has not been compiled with expolines the
1973 + * nobp setting decides what is done, this depends on the
1974 + * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
1975 + */
1976 +}
1977 +
1978 static int __init spectre_v2_setup_early(char *str)
1979 {
1980 if (str && !strncmp(str, "on", 2)) {
1981 - nospec_call_disable = 0;
1982 - nospec_return_disable = 0;
1983 - }
1984 - if (str && !strncmp(str, "off", 3)) {
1985 - nospec_call_disable = 1;
1986 - nospec_return_disable = 1;
1987 - }
1988 - if (str && !strncmp(str, "auto", 4)) {
1989 - nospec_call_disable = 0;
1990 - nospec_return_disable = 1;
1991 + nospec_disable = 0;
1992 + __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
1993 }
1994 + if (str && !strncmp(str, "off", 3))
1995 + nospec_disable = 1;
1996 + if (str && !strncmp(str, "auto", 4))
1997 + nospec_auto_detect();
1998 return 0;
1999 }
2000 early_param("spectre_v2", spectre_v2_setup_early);
2001 @@ -39,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
2002 s32 *epo;
2003
2004 /* Second part of the instruction replace is always a nop */
2005 - memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
2006 for (epo = start; epo < end; epo++) {
2007 instr = (u8 *) epo + *epo;
2008 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
2009 @@ -60,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
2010 br = thunk + (*(int *)(thunk + 2)) * 2;
2011 else
2012 continue;
2013 - if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
2014 + /* Check for unconditional branch 0x07f? or 0x47f???? */
2015 + if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
2016 continue;
2017 +
2018 + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
2019 switch (type) {
2020 case BRCL_EXPOLINE:
2021 - /* brcl to thunk, replace with br + nop */
2022 insnbuf[0] = br[0];
2023 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
2024 + if (br[0] == 0x47) {
2025 + /* brcl to b, replace with bc + nopr */
2026 + insnbuf[2] = br[2];
2027 + insnbuf[3] = br[3];
2028 + } else {
2029 + /* brcl to br, replace with bcr + nop */
2030 + }
2031 break;
2032 case BRASL_EXPOLINE:
2033 - /* brasl to thunk, replace with basr + nop */
2034 - insnbuf[0] = 0x0d;
2035 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
2036 + if (br[0] == 0x47) {
2037 + /* brasl to b, replace with bas + nopr */
2038 + insnbuf[0] = 0x4d;
2039 + insnbuf[2] = br[2];
2040 + insnbuf[3] = br[3];
2041 + } else {
2042 + /* brasl to br, replace with basr + nop */
2043 + insnbuf[0] = 0x0d;
2044 + }
2045 break;
2046 }
2047
2048 @@ -79,15 +148,9 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
2049 }
2050 }
2051
2052 -void __init_or_module nospec_call_revert(s32 *start, s32 *end)
2053 -{
2054 - if (nospec_call_disable)
2055 - __nospec_revert(start, end);
2056 -}
2057 -
2058 -void __init_or_module nospec_return_revert(s32 *start, s32 *end)
2059 +void __init_or_module nospec_revert(s32 *start, s32 *end)
2060 {
2061 - if (nospec_return_disable)
2062 + if (nospec_disable)
2063 __nospec_revert(start, end);
2064 }
2065
2066 @@ -95,6 +158,8 @@ extern s32 __nospec_call_start[], __nospec_call_end[];
2067 extern s32 __nospec_return_start[], __nospec_return_end[];
2068 void __init nospec_init_branches(void)
2069 {
2070 - nospec_call_revert(__nospec_call_start, __nospec_call_end);
2071 - nospec_return_revert(__nospec_return_start, __nospec_return_end);
2072 + nospec_revert(__nospec_call_start, __nospec_call_end);
2073 + nospec_revert(__nospec_return_start, __nospec_return_end);
2074 }
2075 +
2076 +#endif /* CONFIG_EXPOLINE */
2077 diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
2078 new file mode 100644
2079 index 000000000000..8affad5f18cb
2080 --- /dev/null
2081 +++ b/arch/s390/kernel/nospec-sysfs.c
2082 @@ -0,0 +1,21 @@
2083 +// SPDX-License-Identifier: GPL-2.0
2084 +#include <linux/device.h>
2085 +#include <linux/cpu.h>
2086 +#include <asm/facility.h>
2087 +#include <asm/nospec-branch.h>
2088 +
2089 +ssize_t cpu_show_spectre_v1(struct device *dev,
2090 + struct device_attribute *attr, char *buf)
2091 +{
2092 + return sprintf(buf, "Mitigation: __user pointer sanitization\n");
2093 +}
2094 +
2095 +ssize_t cpu_show_spectre_v2(struct device *dev,
2096 + struct device_attribute *attr, char *buf)
2097 +{
2098 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
2099 + return sprintf(buf, "Mitigation: execute trampolines\n");
2100 + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
2101 + return sprintf(buf, "Mitigation: limited branch prediction\n");
2102 + return sprintf(buf, "Vulnerable\n");
2103 +}
2104 diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
2105 index a40ebd1d29d0..8e954c102639 100644
2106 --- a/arch/s390/kernel/reipl.S
2107 +++ b/arch/s390/kernel/reipl.S
2108 @@ -7,8 +7,11 @@
2109
2110 #include <linux/linkage.h>
2111 #include <asm/asm-offsets.h>
2112 +#include <asm/nospec-insn.h>
2113 #include <asm/sigp.h>
2114
2115 + GEN_BR_THUNK %r9
2116 +
2117 #
2118 # Issue "store status" for the current CPU to its prefix page
2119 # and call passed function afterwards
2120 @@ -67,9 +70,9 @@ ENTRY(store_status)
2121 st %r4,0(%r1)
2122 st %r5,4(%r1)
2123 stg %r2,8(%r1)
2124 - lgr %r1,%r2
2125 + lgr %r9,%r2
2126 lgr %r2,%r3
2127 - br %r1
2128 + BR_EX %r9
2129
2130 .section .bss
2131 .align 8
2132 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
2133 index ce5ff4c4d435..0786a6b53f98 100644
2134 --- a/arch/s390/kernel/setup.c
2135 +++ b/arch/s390/kernel/setup.c
2136 @@ -893,6 +893,9 @@ void __init setup_arch(char **cmdline_p)
2137 init_mm.end_data = (unsigned long) &_edata;
2138 init_mm.brk = (unsigned long) &_end;
2139
2140 + if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
2141 + nospec_auto_detect();
2142 +
2143 parse_early_param();
2144 #ifdef CONFIG_CRASH_DUMP
2145 /* Deactivate elfcorehdr= kernel parameter */
2146 diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
2147 index e99187149f17..a049a7b9d6e8 100644
2148 --- a/arch/s390/kernel/swsusp.S
2149 +++ b/arch/s390/kernel/swsusp.S
2150 @@ -13,6 +13,7 @@
2151 #include <asm/ptrace.h>
2152 #include <asm/thread_info.h>
2153 #include <asm/asm-offsets.h>
2154 +#include <asm/nospec-insn.h>
2155 #include <asm/sigp.h>
2156
2157 /*
2158 @@ -24,6 +25,8 @@
2159 * (see below) in the resume process.
2160 * This function runs with disabled interrupts.
2161 */
2162 + GEN_BR_THUNK %r14
2163 +
2164 .section .text
2165 ENTRY(swsusp_arch_suspend)
2166 stmg %r6,%r15,__SF_GPRS(%r15)
2167 @@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
2168 spx 0x318(%r1)
2169 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
2170 lghi %r2,0
2171 - br %r14
2172 + BR_EX %r14
2173
2174 /*
2175 * Restore saved memory image to correct place and restore register context.
2176 @@ -197,11 +200,10 @@ pgm_check_entry:
2177 larl %r15,init_thread_union
2178 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
2179 larl %r2,.Lpanic_string
2180 - larl %r3,sclp_early_printk
2181 lghi %r1,0
2182 sam31
2183 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
2184 - basr %r14,%r3
2185 + brasl %r14,sclp_early_printk
2186 larl %r3,.Ldisabled_wait_31
2187 lpsw 0(%r3)
2188 4:
2189 @@ -267,7 +269,7 @@ restore_registers:
2190 /* Return 0 */
2191 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
2192 lghi %r2,0
2193 - br %r14
2194 + BR_EX %r14
2195
2196 .section .data..nosave,"aw",@progbits
2197 .align 8
2198 diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
2199 index 495c9c4bacc7..2311f15be9cf 100644
2200 --- a/arch/s390/lib/mem.S
2201 +++ b/arch/s390/lib/mem.S
2202 @@ -7,6 +7,9 @@
2203
2204 #include <linux/linkage.h>
2205 #include <asm/export.h>
2206 +#include <asm/nospec-insn.h>
2207 +
2208 + GEN_BR_THUNK %r14
2209
2210 /*
2211 * void *memmove(void *dest, const void *src, size_t n)
2212 @@ -33,14 +36,14 @@ ENTRY(memmove)
2213 .Lmemmove_forward_remainder:
2214 larl %r5,.Lmemmove_mvc
2215 ex %r4,0(%r5)
2216 - br %r14
2217 + BR_EX %r14
2218 .Lmemmove_reverse:
2219 ic %r0,0(%r4,%r3)
2220 stc %r0,0(%r4,%r1)
2221 brctg %r4,.Lmemmove_reverse
2222 ic %r0,0(%r4,%r3)
2223 stc %r0,0(%r4,%r1)
2224 - br %r14
2225 + BR_EX %r14
2226 .Lmemmove_mvc:
2227 mvc 0(1,%r1),0(%r3)
2228 EXPORT_SYMBOL(memmove)
2229 @@ -77,7 +80,7 @@ ENTRY(memset)
2230 .Lmemset_clear_remainder:
2231 larl %r3,.Lmemset_xc
2232 ex %r4,0(%r3)
2233 - br %r14
2234 + BR_EX %r14
2235 .Lmemset_fill:
2236 cghi %r4,1
2237 lgr %r1,%r2
2238 @@ -95,10 +98,10 @@ ENTRY(memset)
2239 stc %r3,0(%r1)
2240 larl %r5,.Lmemset_mvc
2241 ex %r4,0(%r5)
2242 - br %r14
2243 + BR_EX %r14
2244 .Lmemset_fill_exit:
2245 stc %r3,0(%r1)
2246 - br %r14
2247 + BR_EX %r14
2248 .Lmemset_xc:
2249 xc 0(1,%r1),0(%r1)
2250 .Lmemset_mvc:
2251 @@ -121,7 +124,7 @@ ENTRY(memcpy)
2252 .Lmemcpy_remainder:
2253 larl %r5,.Lmemcpy_mvc
2254 ex %r4,0(%r5)
2255 - br %r14
2256 + BR_EX %r14
2257 .Lmemcpy_loop:
2258 mvc 0(256,%r1),0(%r3)
2259 la %r1,256(%r1)
2260 @@ -159,10 +162,10 @@ ENTRY(__memset\bits)
2261 \insn %r3,0(%r1)
2262 larl %r5,.L__memset_mvc\bits
2263 ex %r4,0(%r5)
2264 - br %r14
2265 + BR_EX %r14
2266 .L__memset_exit\bits:
2267 \insn %r3,0(%r2)
2268 - br %r14
2269 + BR_EX %r14
2270 .L__memset_mvc\bits:
2271 mvc \bytes(1,%r1),0(%r1)
2272 .endm
2273 diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
2274 index 25bb4643c4f4..9f794869c1b0 100644
2275 --- a/arch/s390/net/bpf_jit.S
2276 +++ b/arch/s390/net/bpf_jit.S
2277 @@ -9,6 +9,7 @@
2278 */
2279
2280 #include <linux/linkage.h>
2281 +#include <asm/nospec-insn.h>
2282 #include "bpf_jit.h"
2283
2284 /*
2285 @@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \
2286 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
2287 jh sk_load_##NAME##_slow; \
2288 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
2289 - b OFF_OK(%r6); /* Return */ \
2290 + B_EX OFF_OK,%r6; /* Return */ \
2291 \
2292 sk_load_##NAME##_slow:; \
2293 lgr %r2,%r7; /* Arg1 = skb pointer */ \
2294 @@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \
2295 brasl %r14,skb_copy_bits; /* Get data from skb */ \
2296 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
2297 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
2298 - br %r6; /* Return */
2299 + BR_EX %r6; /* Return */
2300
2301 sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
2302 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
2303
2304 + GEN_BR_THUNK %r6
2305 + GEN_B_THUNK OFF_OK,%r6
2306 +
2307 /*
2308 * Load 1 byte from SKB (optimized version)
2309 */
2310 @@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
2311 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
2312 jnl sk_load_byte_slow
2313 llgc %r14,0(%r3,%r12) # Get byte from skb
2314 - b OFF_OK(%r6) # Return OK
2315 + B_EX OFF_OK,%r6 # Return OK
2316
2317 sk_load_byte_slow:
2318 lgr %r2,%r7 # Arg1 = skb pointer
2319 @@ -90,7 +94,7 @@ sk_load_byte_slow:
2320 brasl %r14,skb_copy_bits # Get data from skb
2321 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
2322 ltgr %r2,%r2 # Set cc to (%r2 != 0)
2323 - br %r6 # Return cc
2324 + BR_EX %r6 # Return cc
2325
2326 #define sk_negative_common(NAME, SIZE, LOAD) \
2327 sk_load_##NAME##_slow_neg:; \
2328 @@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \
2329 jz bpf_error; \
2330 LOAD %r14,0(%r2); /* Get data from pointer */ \
2331 xr %r3,%r3; /* Set cc to zero */ \
2332 - br %r6; /* Return cc */
2333 + BR_EX %r6; /* Return cc */
2334
2335 sk_negative_common(word, 4, llgf)
2336 sk_negative_common(half, 2, llgh)
2337 @@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
2338 bpf_error:
2339 # force a return 0 from jit handler
2340 ltgr %r15,%r15 # Set condition code
2341 - br %r6
2342 + BR_EX %r6
2343 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
2344 index 78a19c93b380..dd2bcf0e7d00 100644
2345 --- a/arch/s390/net/bpf_jit_comp.c
2346 +++ b/arch/s390/net/bpf_jit_comp.c
2347 @@ -25,6 +25,8 @@
2348 #include <linux/bpf.h>
2349 #include <asm/cacheflush.h>
2350 #include <asm/dis.h>
2351 +#include <asm/facility.h>
2352 +#include <asm/nospec-branch.h>
2353 #include <asm/set_memory.h>
2354 #include "bpf_jit.h"
2355
2356 @@ -41,6 +43,8 @@ struct bpf_jit {
2357 int base_ip; /* Base address for literal pool */
2358 int ret0_ip; /* Address of return 0 */
2359 int exit_ip; /* Address of exit */
2360 + int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
2361 + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
2362 int tail_call_start; /* Tail call start offset */
2363 int labels[1]; /* Labels for local jumps */
2364 };
2365 @@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
2366 REG_SET_SEEN(b2); \
2367 })
2368
2369 +#define EMIT6_PCREL_RILB(op, b, target) \
2370 +({ \
2371 + int rel = (target - jit->prg) / 2; \
2372 + _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
2373 + REG_SET_SEEN(b); \
2374 +})
2375 +
2376 +#define EMIT6_PCREL_RIL(op, target) \
2377 +({ \
2378 + int rel = (target - jit->prg) / 2; \
2379 + _EMIT6(op | rel >> 16, rel & 0xffff); \
2380 +})
2381 +
2382 #define _EMIT6_IMM(op, imm) \
2383 ({ \
2384 unsigned int __imm = (imm); \
2385 @@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
2386 EMIT4(0xb9040000, REG_2, BPF_REG_0);
2387 /* Restore registers */
2388 save_restore_regs(jit, REGS_RESTORE, stack_depth);
2389 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
2390 + jit->r14_thunk_ip = jit->prg;
2391 + /* Generate __s390_indirect_jump_r14 thunk */
2392 + if (test_facility(35)) {
2393 + /* exrl %r0,.+10 */
2394 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
2395 + } else {
2396 + /* larl %r1,.+14 */
2397 + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
2398 + /* ex 0,0(%r1) */
2399 + EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
2400 + }
2401 + /* j . */
2402 + EMIT4_PCREL(0xa7f40000, 0);
2403 + }
2404 /* br %r14 */
2405 _EMIT2(0x07fe);
2406 +
2407 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
2408 + (jit->seen & SEEN_FUNC)) {
2409 + jit->r1_thunk_ip = jit->prg;
2410 + /* Generate __s390_indirect_jump_r1 thunk */
2411 + if (test_facility(35)) {
2412 + /* exrl %r0,.+10 */
2413 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
2414 + /* j . */
2415 + EMIT4_PCREL(0xa7f40000, 0);
2416 + /* br %r1 */
2417 + _EMIT2(0x07f1);
2418 + } else {
2419 + /* larl %r1,.+14 */
2420 + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
2421 + /* ex 0,S390_lowcore.br_r1_tampoline */
2422 + EMIT4_DISP(0x44000000, REG_0, REG_0,
2423 + offsetof(struct lowcore, br_r1_trampoline));
2424 + /* j . */
2425 + EMIT4_PCREL(0xa7f40000, 0);
2426 + }
2427 + }
2428 }
2429
2430 /*
2431 @@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
2432 /* lg %w1,<d(imm)>(%l) */
2433 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
2434 EMIT_CONST_U64(func));
2435 - /* basr %r14,%w1 */
2436 - EMIT2(0x0d00, REG_14, REG_W1);
2437 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
2438 + /* brasl %r14,__s390_indirect_jump_r1 */
2439 + EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
2440 + } else {
2441 + /* basr %r14,%w1 */
2442 + EMIT2(0x0d00, REG_14, REG_W1);
2443 + }
2444 /* lgr %b0,%r2: load return value into %b0 */
2445 EMIT4(0xb9040000, BPF_REG_0, REG_2);
2446 if ((jit->seen & SEEN_SKB) &&
2447 diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
2448 index 1a0fa10cb6b7..32bae68e34c1 100644
2449 --- a/arch/sparc/kernel/vio.c
2450 +++ b/arch/sparc/kernel/vio.c
2451 @@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
2452 if (err) {
2453 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
2454 dev_name(&vdev->dev), err);
2455 - kfree(vdev);
2456 + put_device(&vdev->dev);
2457 return NULL;
2458 }
2459 if (vdev->dp)
2460 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
2461 index edfede768688..5167f3f74136 100644
2462 --- a/arch/x86/kernel/machine_kexec_32.c
2463 +++ b/arch/x86/kernel/machine_kexec_32.c
2464 @@ -57,12 +57,17 @@ static void load_segments(void)
2465 static void machine_kexec_free_page_tables(struct kimage *image)
2466 {
2467 free_page((unsigned long)image->arch.pgd);
2468 + image->arch.pgd = NULL;
2469 #ifdef CONFIG_X86_PAE
2470 free_page((unsigned long)image->arch.pmd0);
2471 + image->arch.pmd0 = NULL;
2472 free_page((unsigned long)image->arch.pmd1);
2473 + image->arch.pmd1 = NULL;
2474 #endif
2475 free_page((unsigned long)image->arch.pte0);
2476 + image->arch.pte0 = NULL;
2477 free_page((unsigned long)image->arch.pte1);
2478 + image->arch.pte1 = NULL;
2479 }
2480
2481 static int machine_kexec_alloc_page_tables(struct kimage *image)
2482 @@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
2483 !image->arch.pmd0 || !image->arch.pmd1 ||
2484 #endif
2485 !image->arch.pte0 || !image->arch.pte1) {
2486 - machine_kexec_free_page_tables(image);
2487 return -ENOMEM;
2488 }
2489 return 0;
2490 diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
2491 index 3b7427aa7d85..5bce2a88e8a3 100644
2492 --- a/arch/x86/kernel/machine_kexec_64.c
2493 +++ b/arch/x86/kernel/machine_kexec_64.c
2494 @@ -38,9 +38,13 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
2495 static void free_transition_pgtable(struct kimage *image)
2496 {
2497 free_page((unsigned long)image->arch.p4d);
2498 + image->arch.p4d = NULL;
2499 free_page((unsigned long)image->arch.pud);
2500 + image->arch.pud = NULL;
2501 free_page((unsigned long)image->arch.pmd);
2502 + image->arch.pmd = NULL;
2503 free_page((unsigned long)image->arch.pte);
2504 + image->arch.pte = NULL;
2505 }
2506
2507 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
2508 @@ -90,7 +94,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
2509 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
2510 return 0;
2511 err:
2512 - free_transition_pgtable(image);
2513 return result;
2514 }
2515
2516 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
2517 index fe92cb972dd1..1629a2099adf 100644
2518 --- a/drivers/block/loop.c
2519 +++ b/drivers/block/loop.c
2520 @@ -1171,21 +1171,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
2521 static int
2522 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
2523 {
2524 - struct file *file = lo->lo_backing_file;
2525 + struct file *file;
2526 struct kstat stat;
2527 - int error;
2528 + int ret;
2529
2530 - if (lo->lo_state != Lo_bound)
2531 + if (lo->lo_state != Lo_bound) {
2532 + mutex_unlock(&lo->lo_ctl_mutex);
2533 return -ENXIO;
2534 - error = vfs_getattr(&file->f_path, &stat,
2535 - STATX_INO, AT_STATX_SYNC_AS_STAT);
2536 - if (error)
2537 - return error;
2538 + }
2539 +
2540 memset(info, 0, sizeof(*info));
2541 info->lo_number = lo->lo_number;
2542 - info->lo_device = huge_encode_dev(stat.dev);
2543 - info->lo_inode = stat.ino;
2544 - info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
2545 info->lo_offset = lo->lo_offset;
2546 info->lo_sizelimit = lo->lo_sizelimit;
2547 info->lo_flags = lo->lo_flags;
2548 @@ -1198,7 +1194,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
2549 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
2550 lo->lo_encrypt_key_size);
2551 }
2552 - return 0;
2553 +
2554 + /* Drop lo_ctl_mutex while we call into the filesystem. */
2555 + file = get_file(lo->lo_backing_file);
2556 + mutex_unlock(&lo->lo_ctl_mutex);
2557 + ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
2558 + AT_STATX_SYNC_AS_STAT);
2559 + if (!ret) {
2560 + info->lo_device = huge_encode_dev(stat.dev);
2561 + info->lo_inode = stat.ino;
2562 + info->lo_rdevice = huge_encode_dev(stat.rdev);
2563 + }
2564 + fput(file);
2565 + return ret;
2566 }
2567
2568 static void
2569 @@ -1279,12 +1287,13 @@ static int
2570 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
2571 struct loop_info info;
2572 struct loop_info64 info64;
2573 - int err = 0;
2574 + int err;
2575
2576 - if (!arg)
2577 - err = -EINVAL;
2578 - if (!err)
2579 - err = loop_get_status(lo, &info64);
2580 + if (!arg) {
2581 + mutex_unlock(&lo->lo_ctl_mutex);
2582 + return -EINVAL;
2583 + }
2584 + err = loop_get_status(lo, &info64);
2585 if (!err)
2586 err = loop_info64_to_old(&info64, &info);
2587 if (!err && copy_to_user(arg, &info, sizeof(info)))
2588 @@ -1296,12 +1305,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
2589 static int
2590 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
2591 struct loop_info64 info64;
2592 - int err = 0;
2593 + int err;
2594
2595 - if (!arg)
2596 - err = -EINVAL;
2597 - if (!err)
2598 - err = loop_get_status(lo, &info64);
2599 + if (!arg) {
2600 + mutex_unlock(&lo->lo_ctl_mutex);
2601 + return -EINVAL;
2602 + }
2603 + err = loop_get_status(lo, &info64);
2604 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
2605 err = -EFAULT;
2606
2607 @@ -1378,7 +1388,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
2608 break;
2609 case LOOP_GET_STATUS:
2610 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
2611 - break;
2612 + /* loop_get_status() unlocks lo_ctl_mutex */
2613 + goto out_unlocked;
2614 case LOOP_SET_STATUS64:
2615 err = -EPERM;
2616 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
2617 @@ -1387,7 +1398,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
2618 break;
2619 case LOOP_GET_STATUS64:
2620 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
2621 - break;
2622 + /* loop_get_status() unlocks lo_ctl_mutex */
2623 + goto out_unlocked;
2624 case LOOP_SET_CAPACITY:
2625 err = -EPERM;
2626 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
2627 @@ -1520,12 +1532,13 @@ loop_get_status_compat(struct loop_device *lo,
2628 struct compat_loop_info __user *arg)
2629 {
2630 struct loop_info64 info64;
2631 - int err = 0;
2632 + int err;
2633
2634 - if (!arg)
2635 - err = -EINVAL;
2636 - if (!err)
2637 - err = loop_get_status(lo, &info64);
2638 + if (!arg) {
2639 + mutex_unlock(&lo->lo_ctl_mutex);
2640 + return -EINVAL;
2641 + }
2642 + err = loop_get_status(lo, &info64);
2643 if (!err)
2644 err = loop_info64_to_compat(&info64, arg);
2645 return err;
2646 @@ -1548,7 +1561,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
2647 mutex_lock(&lo->lo_ctl_mutex);
2648 err = loop_get_status_compat(
2649 lo, (struct compat_loop_info __user *) arg);
2650 - mutex_unlock(&lo->lo_ctl_mutex);
2651 + /* loop_get_status() unlocks lo_ctl_mutex */
2652 break;
2653 case LOOP_SET_CAPACITY:
2654 case LOOP_CLR_FD:
2655 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2656 index dcb982e3a41f..6bfb8088e5f5 100644
2657 --- a/drivers/bluetooth/btusb.c
2658 +++ b/drivers/bluetooth/btusb.c
2659 @@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
2660
2661 /* Intel Bluetooth devices */
2662 { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
2663 + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
2664 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
2665 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
2666 { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
2667 @@ -367,6 +368,9 @@ static const struct usb_device_id blacklist_table[] = {
2668 { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
2669 { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
2670
2671 + /* Additional Realtek 8723BU Bluetooth devices */
2672 + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
2673 +
2674 /* Additional Realtek 8821AE Bluetooth devices */
2675 { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
2676 { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
2677 @@ -374,6 +378,9 @@ static const struct usb_device_id blacklist_table[] = {
2678 { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
2679 { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
2680
2681 + /* Additional Realtek 8822BE Bluetooth devices */
2682 + { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
2683 +
2684 /* Silicon Wave based devices */
2685 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
2686
2687 @@ -2080,6 +2087,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2688 case 0x0c: /* WsP */
2689 case 0x11: /* JfP */
2690 case 0x12: /* ThP */
2691 + case 0x13: /* HrP */
2692 + case 0x14: /* QnJ, IcP */
2693 break;
2694 default:
2695 BT_ERR("%s: Unsupported Intel hardware variant (%u)",
2696 @@ -2172,6 +2181,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2697 break;
2698 case 0x11: /* JfP */
2699 case 0x12: /* ThP */
2700 + case 0x13: /* HrP */
2701 + case 0x14: /* QnJ, IcP */
2702 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
2703 le16_to_cpu(ver.hw_variant),
2704 le16_to_cpu(ver.hw_revision),
2705 @@ -2203,6 +2214,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2706 break;
2707 case 0x11: /* JfP */
2708 case 0x12: /* ThP */
2709 + case 0x13: /* HrP */
2710 + case 0x14: /* QnJ, IcP */
2711 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
2712 le16_to_cpu(ver.hw_variant),
2713 le16_to_cpu(ver.hw_revision),
2714 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
2715 index 076d4244d672..5698d2fac1af 100644
2716 --- a/drivers/clk/clk.c
2717 +++ b/drivers/clk/clk.c
2718 @@ -2375,6 +2375,9 @@ static int clk_core_get_phase(struct clk_core *core)
2719 int ret;
2720
2721 clk_prepare_lock();
2722 + /* Always try to update cached phase if possible */
2723 + if (core->ops->get_phase)
2724 + core->phase = core->ops->get_phase(core->hw);
2725 ret = core->phase;
2726 clk_prepare_unlock();
2727
2728 diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
2729 index 2007123832bb..53450b651e4c 100644
2730 --- a/drivers/clk/hisilicon/crg-hi3516cv300.c
2731 +++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
2732 @@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
2733 /* hi3516CV300 sysctrl CRG */
2734 #define HI3516CV300_SYSCTRL_NR_CLKS 16
2735
2736 -static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
2737 +static const char *const wdt_mux_p[] __initconst = { "3m", "apb" };
2738 static u32 wdt_mux_table[] = {0, 1};
2739
2740 static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
2741 diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
2742 index 1294f3ad7cd5..3b8b53b279dc 100644
2743 --- a/drivers/clk/meson/axg.c
2744 +++ b/drivers/clk/meson/axg.c
2745 @@ -129,6 +129,11 @@ static struct meson_clk_pll axg_fixed_pll = {
2746 .shift = 16,
2747 .width = 2,
2748 },
2749 + .frac = {
2750 + .reg_off = HHI_MPLL_CNTL2,
2751 + .shift = 0,
2752 + .width = 12,
2753 + },
2754 .lock = &meson_clk_lock,
2755 .hw.init = &(struct clk_init_data){
2756 .name = "fixed_pll",
2757 @@ -151,7 +156,7 @@ static struct meson_clk_pll axg_sys_pll = {
2758 },
2759 .od = {
2760 .reg_off = HHI_SYS_PLL_CNTL,
2761 - .shift = 10,
2762 + .shift = 16,
2763 .width = 2,
2764 },
2765 .rate_table = sys_pll_rate_table,
2766 diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
2767 index 077fcdc7908b..fe7d9ed1d436 100644
2768 --- a/drivers/clk/rockchip/clk-mmc-phase.c
2769 +++ b/drivers/clk/rockchip/clk-mmc-phase.c
2770 @@ -58,6 +58,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
2771 u16 degrees;
2772 u32 delay_num = 0;
2773
2774 + /* See the comment for rockchip_mmc_set_phase below */
2775 + if (!rate) {
2776 + pr_err("%s: invalid clk rate\n", __func__);
2777 + return -EINVAL;
2778 + }
2779 +
2780 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
2781
2782 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
2783 @@ -84,6 +90,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
2784 u32 raw_value;
2785 u32 delay;
2786
2787 + /*
2788 + * The below calculation is based on the output clock from
2789 + * MMC host to the card, which expects the phase clock inherits
2790 + * the clock rate from its parent, namely the output clock
2791 + * provider of MMC host. However, things may go wrong if
2792 + * (1) It is orphan.
2793 + * (2) It is assigned to the wrong parent.
2794 + *
2795 + * This check help debug the case (1), which seems to be the
2796 + * most likely problem we often face and which makes it difficult
2797 + * for people to debug unstable mmc tuning results.
2798 + */
2799 + if (!rate) {
2800 + pr_err("%s: invalid clk rate\n", __func__);
2801 + return -EINVAL;
2802 + }
2803 +
2804 nineties = degrees / 90;
2805 remainder = (degrees % 90);
2806
2807 diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
2808 index 11e7f2d1c054..7af48184b022 100644
2809 --- a/drivers/clk/rockchip/clk-rk3228.c
2810 +++ b/drivers/clk/rockchip/clk-rk3228.c
2811 @@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
2812 RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
2813 RK2928_CLKGATE_CON(2), 15, GFLAGS),
2814
2815 - COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
2816 + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
2817 RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
2818 RK2928_CLKGATE_CON(2), 11, GFLAGS),
2819
2820 diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
2821 index 1b81e283f605..ed36728424a2 100644
2822 --- a/drivers/clk/samsung/clk-exynos3250.c
2823 +++ b/drivers/clk/samsung/clk-exynos3250.c
2824 @@ -698,7 +698,7 @@ static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst =
2825 PLL_36XX_RATE(144000000, 96, 2, 3, 0),
2826 PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
2827 PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
2828 - PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
2829 + PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
2830 PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
2831 PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
2832 PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
2833 @@ -734,7 +734,7 @@ static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst =
2834 PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
2835 PLL_36XX_RATE(108000000, 144, 2, 4, 0),
2836 PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
2837 - PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
2838 + PLL_36XX_RATE( 74176002, 98, 2, 4, 59070),
2839 PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
2840 PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
2841 { /* sentinel */ }
2842 diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
2843 index 9b073c98a891..923c608b1b95 100644
2844 --- a/drivers/clk/samsung/clk-exynos5250.c
2845 +++ b/drivers/clk/samsung/clk-exynos5250.c
2846 @@ -711,13 +711,13 @@ static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
2847 /* sorted in descending order */
2848 /* PLL_36XX_RATE(rate, m, p, s, k) */
2849 PLL_36XX_RATE(192000000, 64, 2, 2, 0),
2850 - PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
2851 + PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
2852 PLL_36XX_RATE(180000000, 90, 3, 2, 0),
2853 PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
2854 - PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
2855 + PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
2856 PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
2857 - PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
2858 - PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
2859 + PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
2860 + PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
2861 { },
2862 };
2863
2864 diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
2865 index fd1d9bfc151b..8eae1752d700 100644
2866 --- a/drivers/clk/samsung/clk-exynos5260.c
2867 +++ b/drivers/clk/samsung/clk-exynos5260.c
2868 @@ -65,7 +65,7 @@ static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
2869 PLL_36XX_RATE(480000000, 160, 2, 2, 0),
2870 PLL_36XX_RATE(432000000, 144, 2, 2, 0),
2871 PLL_36XX_RATE(400000000, 200, 3, 2, 0),
2872 - PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
2873 + PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
2874 PLL_36XX_RATE(333000000, 111, 2, 2, 0),
2875 PLL_36XX_RATE(300000000, 100, 2, 2, 0),
2876 PLL_36XX_RATE(266000000, 266, 3, 3, 0),
2877 diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
2878 index db270908037a..335bebfa21c0 100644
2879 --- a/drivers/clk/samsung/clk-exynos5433.c
2880 +++ b/drivers/clk/samsung/clk-exynos5433.c
2881 @@ -729,7 +729,7 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
2882 PLL_35XX_RATE(800000000U, 400, 6, 1),
2883 PLL_35XX_RATE(733000000U, 733, 12, 1),
2884 PLL_35XX_RATE(700000000U, 175, 3, 1),
2885 - PLL_35XX_RATE(667000000U, 222, 4, 1),
2886 + PLL_35XX_RATE(666000000U, 222, 4, 1),
2887 PLL_35XX_RATE(633000000U, 211, 4, 1),
2888 PLL_35XX_RATE(600000000U, 500, 5, 2),
2889 PLL_35XX_RATE(552000000U, 460, 5, 2),
2890 @@ -757,12 +757,12 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
2891 /* AUD_PLL */
2892 static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = {
2893 PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
2894 - PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
2895 + PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
2896 PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
2897 - PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
2898 - PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
2899 - PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
2900 - PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
2901 + PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
2902 + PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
2903 + PLL_36XX_RATE(338687988U, 113, 2, 2, -6816),
2904 + PLL_36XX_RATE(294912002U, 98, 1, 3, 19923),
2905 PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
2906 PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
2907 { /* sentinel */ }
2908 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
2909 index 5931a4140c3d..bbfa57b4e017 100644
2910 --- a/drivers/clk/samsung/clk-exynos7.c
2911 +++ b/drivers/clk/samsung/clk-exynos7.c
2912 @@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = {
2913 };
2914
2915 static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
2916 - PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
2917 + PLL_36XX_RATE(491519897, 20, 1, 0, 31457),
2918 {},
2919 };
2920
2921 diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
2922 index e0650c33863b..d8e58a659467 100644
2923 --- a/drivers/clk/samsung/clk-s3c2410.c
2924 +++ b/drivers/clk/samsung/clk-s3c2410.c
2925 @@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
2926 PLL_35XX_RATE(226000000, 105, 1, 1),
2927 PLL_35XX_RATE(210000000, 132, 2, 1),
2928 /* 2410 common */
2929 - PLL_35XX_RATE(203000000, 161, 3, 1),
2930 + PLL_35XX_RATE(202800000, 161, 3, 1),
2931 PLL_35XX_RATE(192000000, 88, 1, 1),
2932 PLL_35XX_RATE(186000000, 85, 1, 1),
2933 PLL_35XX_RATE(180000000, 82, 1, 1),
2934 @@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
2935 PLL_35XX_RATE(147000000, 90, 2, 1),
2936 PLL_35XX_RATE(135000000, 82, 2, 1),
2937 PLL_35XX_RATE(124000000, 116, 1, 2),
2938 - PLL_35XX_RATE(118000000, 150, 2, 2),
2939 + PLL_35XX_RATE(118500000, 150, 2, 2),
2940 PLL_35XX_RATE(113000000, 105, 1, 2),
2941 - PLL_35XX_RATE(101000000, 127, 2, 2),
2942 + PLL_35XX_RATE(101250000, 127, 2, 2),
2943 PLL_35XX_RATE(90000000, 112, 2, 2),
2944 - PLL_35XX_RATE(85000000, 105, 2, 2),
2945 + PLL_35XX_RATE(84750000, 105, 2, 2),
2946 PLL_35XX_RATE(79000000, 71, 1, 2),
2947 - PLL_35XX_RATE(68000000, 82, 2, 2),
2948 - PLL_35XX_RATE(56000000, 142, 2, 3),
2949 + PLL_35XX_RATE(67500000, 82, 2, 2),
2950 + PLL_35XX_RATE(56250000, 142, 2, 3),
2951 PLL_35XX_RATE(48000000, 120, 2, 3),
2952 - PLL_35XX_RATE(51000000, 161, 3, 3),
2953 + PLL_35XX_RATE(50700000, 161, 3, 3),
2954 PLL_35XX_RATE(45000000, 82, 1, 3),
2955 - PLL_35XX_RATE(34000000, 82, 2, 3),
2956 + PLL_35XX_RATE(33750000, 82, 2, 3),
2957 { /* sentinel */ },
2958 };
2959
2960 diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
2961 index 7c369e21c91c..830d1c87fa7c 100644
2962 --- a/drivers/clk/tegra/clk-pll.c
2963 +++ b/drivers/clk/tegra/clk-pll.c
2964 @@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = {
2965 .enable = clk_pllu_enable,
2966 .disable = clk_pll_disable,
2967 .recalc_rate = clk_pll_recalc_rate,
2968 + .round_rate = clk_pll_round_rate,
2969 + .set_rate = clk_pll_set_rate,
2970 };
2971
2972 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
2973 diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
2974 index 691c6465b71e..8561cce67741 100644
2975 --- a/drivers/crypto/atmel-aes.c
2976 +++ b/drivers/crypto/atmel-aes.c
2977 @@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
2978
2979 badkey:
2980 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2981 - memzero_explicit(&key, sizeof(keys));
2982 + memzero_explicit(&keys, sizeof(keys));
2983 return -EINVAL;
2984 }
2985
2986 diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
2987 index 59d4ca4e72d8..1a734bd2070a 100644
2988 --- a/drivers/crypto/ccp/ccp-debugfs.c
2989 +++ b/drivers/crypto/ccp/ccp-debugfs.c
2990 @@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
2991 };
2992
2993 static struct dentry *ccp_debugfs_dir;
2994 -static DEFINE_RWLOCK(ccp_debugfs_lock);
2995 +static DEFINE_MUTEX(ccp_debugfs_lock);
2996
2997 #define MAX_NAME_LEN 20
2998
2999 @@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
3000 struct dentry *debugfs_stats;
3001 struct dentry *debugfs_q_instance;
3002 struct dentry *debugfs_q_stats;
3003 - unsigned long flags;
3004 int i;
3005
3006 if (!debugfs_initialized())
3007 return;
3008
3009 - write_lock_irqsave(&ccp_debugfs_lock, flags);
3010 + mutex_lock(&ccp_debugfs_lock);
3011 if (!ccp_debugfs_dir)
3012 ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
3013 - write_unlock_irqrestore(&ccp_debugfs_lock, flags);
3014 + mutex_unlock(&ccp_debugfs_lock);
3015 if (!ccp_debugfs_dir)
3016 return;
3017
3018 diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
3019 index 0dd3a7ac1dd1..f4a76971b4ac 100644
3020 --- a/drivers/crypto/inside-secure/safexcel.c
3021 +++ b/drivers/crypto/inside-secure/safexcel.c
3022 @@ -490,6 +490,15 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
3023 if (backlog)
3024 backlog->complete(backlog, -EINPROGRESS);
3025
3026 + /* In case the send() helper did not issue any command to push
3027 + * to the engine because the input data was cached, continue to
3028 + * dequeue other requests as this is valid and not an error.
3029 + */
3030 + if (!commands && !results) {
3031 + kfree(request);
3032 + continue;
3033 + }
3034 +
3035 spin_lock_bh(&priv->ring[ring].egress_lock);
3036 list_add_tail(&request->list, &priv->ring[ring].list);
3037 spin_unlock_bh(&priv->ring[ring].egress_lock);
3038 @@ -514,8 +523,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
3039
3040 if (!priv->ring[ring].busy) {
3041 nreq -= safexcel_try_push_requests(priv, ring, nreq);
3042 - if (nreq)
3043 - priv->ring[ring].busy = true;
3044 + priv->ring[ring].busy = true;
3045 }
3046
3047 priv->ring[ring].requests_left += nreq;
3048 diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
3049 index 63a8768ed2ae..17a7725a6f6d 100644
3050 --- a/drivers/crypto/inside-secure/safexcel_cipher.c
3051 +++ b/drivers/crypto/inside-secure/safexcel_cipher.c
3052 @@ -456,7 +456,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
3053 queue_work(priv->ring[ring].workqueue,
3054 &priv->ring[ring].work_data.work);
3055
3056 - wait_for_completion_interruptible(&result.completion);
3057 + wait_for_completion(&result.completion);
3058
3059 if (result.error) {
3060 dev_warn(priv->dev,
3061 diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
3062 index 122a2a58e98f..3e65bb5732da 100644
3063 --- a/drivers/crypto/inside-secure/safexcel_hash.c
3064 +++ b/drivers/crypto/inside-secure/safexcel_hash.c
3065 @@ -21,7 +21,6 @@ struct safexcel_ahash_ctx {
3066 struct safexcel_crypto_priv *priv;
3067
3068 u32 alg;
3069 - u32 digest;
3070
3071 u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
3072 u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
3073 @@ -35,6 +34,8 @@ struct safexcel_ahash_req {
3074
3075 int nents;
3076
3077 + u32 digest;
3078 +
3079 u8 state_sz; /* expected sate size, only set once */
3080 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
3081
3082 @@ -49,6 +50,8 @@ struct safexcel_ahash_export_state {
3083 u64 len;
3084 u64 processed;
3085
3086 + u32 digest;
3087 +
3088 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
3089 u8 cache[SHA256_BLOCK_SIZE];
3090 };
3091 @@ -82,9 +85,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
3092
3093 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
3094 cdesc->control_data.control0 |= ctx->alg;
3095 - cdesc->control_data.control0 |= ctx->digest;
3096 + cdesc->control_data.control0 |= req->digest;
3097
3098 - if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
3099 + if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
3100 if (req->processed) {
3101 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
3102 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
3103 @@ -112,7 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
3104 if (req->finish)
3105 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
3106 }
3107 - } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
3108 + } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
3109 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
3110
3111 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
3112 @@ -184,7 +187,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
3113 int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
3114
3115 queued = len = req->len - req->processed;
3116 - if (queued < crypto_ahash_blocksize(ahash))
3117 + if (queued <= crypto_ahash_blocksize(ahash))
3118 cache_len = queued;
3119 else
3120 cache_len = queued - areq->nbytes;
3121 @@ -198,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
3122 /* If this is not the last request and the queued data
3123 * is a multiple of a block, cache the last one for now.
3124 */
3125 - extra = queued - crypto_ahash_blocksize(ahash);
3126 + extra = crypto_ahash_blocksize(ahash);
3127
3128 if (extra) {
3129 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
3130 @@ -493,7 +496,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
3131 queue_work(priv->ring[ring].workqueue,
3132 &priv->ring[ring].work_data.work);
3133
3134 - wait_for_completion_interruptible(&result.completion);
3135 + wait_for_completion(&result.completion);
3136
3137 if (result.error) {
3138 dev_warn(priv->dev, "hash: completion error (%d)\n",
3139 @@ -550,7 +553,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
3140 if (ctx->base.ctxr) {
3141 if (priv->version == EIP197 &&
3142 !ctx->base.needs_inv && req->processed &&
3143 - ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
3144 + req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
3145 /* We're still setting needs_inv here, even though it is
3146 * cleared right away, because the needs_inv flag can be
3147 * set in other functions and we want to keep the same
3148 @@ -585,7 +588,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
3149
3150 static int safexcel_ahash_update(struct ahash_request *areq)
3151 {
3152 - struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
3153 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
3154 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
3155
3156 @@ -601,7 +603,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
3157 * We're not doing partial updates when performing an hmac request.
3158 * Everything will be handled by the final() call.
3159 */
3160 - if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
3161 + if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
3162 return 0;
3163
3164 if (req->hmac)
3165 @@ -660,6 +662,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
3166 export->len = req->len;
3167 export->processed = req->processed;
3168
3169 + export->digest = req->digest;
3170 +
3171 memcpy(export->state, req->state, req->state_sz);
3172 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
3173
3174 @@ -680,6 +684,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
3175 req->len = export->len;
3176 req->processed = export->processed;
3177
3178 + req->digest = export->digest;
3179 +
3180 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
3181 memcpy(req->state, export->state, req->state_sz);
3182
3183 @@ -716,7 +722,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
3184 req->state[4] = SHA1_H4;
3185
3186 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
3187 - ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3188 + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3189 req->state_sz = SHA1_DIGEST_SIZE;
3190
3191 return 0;
3192 @@ -783,10 +789,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
3193
3194 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
3195 {
3196 - struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
3197 + struct safexcel_ahash_req *req = ahash_request_ctx(areq);
3198
3199 safexcel_sha1_init(areq);
3200 - ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
3201 + req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
3202 return 0;
3203 }
3204
3205 @@ -839,7 +845,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
3206 init_completion(&result.completion);
3207
3208 ret = crypto_ahash_digest(areq);
3209 - if (ret == -EINPROGRESS) {
3210 + if (ret == -EINPROGRESS || ret == -EBUSY) {
3211 wait_for_completion_interruptible(&result.completion);
3212 ret = result.error;
3213 }
3214 @@ -1024,7 +1030,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
3215 req->state[7] = SHA256_H7;
3216
3217 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
3218 - ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3219 + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3220 req->state_sz = SHA256_DIGEST_SIZE;
3221
3222 return 0;
3223 @@ -1086,7 +1092,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
3224 req->state[7] = SHA224_H7;
3225
3226 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
3227 - ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3228 + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
3229 req->state_sz = SHA256_DIGEST_SIZE;
3230
3231 return 0;
3232 diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
3233 index 1547cbe13dc2..a81d89b3b7d8 100644
3234 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
3235 +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
3236 @@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
3237
3238 module_platform_driver(sun4i_ss_driver);
3239
3240 +MODULE_ALIAS("platform:sun4i-ss");
3241 MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
3242 MODULE_LICENSE("GPL");
3243 MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
3244 diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
3245 index 3a7c80cd1a17..359fb9804d16 100644
3246 --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
3247 +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
3248 @@ -106,7 +106,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
3249 if (nums[i-1] + 1 != nums[i])
3250 goto fail_map;
3251 buf->vaddr = (__force void *)
3252 - ioremap_nocache(nums[0] << PAGE_SHIFT, size);
3253 + ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
3254 } else {
3255 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
3256 PAGE_KERNEL);
3257 diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
3258 index 6356815cf3e1..3642e6e4761e 100644
3259 --- a/drivers/media/dvb-frontends/lgdt3306a.c
3260 +++ b/drivers/media/dvb-frontends/lgdt3306a.c
3261 @@ -1768,7 +1768,13 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
3262 struct lgdt3306a_state *state = fe->demodulator_priv;
3263
3264 dbg_info("\n");
3265 - kfree(state);
3266 +
3267 + /*
3268 + * If state->muxc is not NULL, then we are an i2c device
3269 + * and lgdt3306a_remove will clean up state
3270 + */
3271 + if (!state->muxc)
3272 + kfree(state);
3273 }
3274
3275 static const struct dvb_frontend_ops lgdt3306a_ops;
3276 @@ -2169,7 +2175,7 @@ static int lgdt3306a_probe(struct i2c_client *client,
3277 sizeof(struct lgdt3306a_config));
3278
3279 config->i2c_addr = client->addr;
3280 - fe = lgdt3306a_attach(config, client->adapter);
3281 + fe = dvb_attach(lgdt3306a_attach, config, client->adapter);
3282 if (fe == NULL) {
3283 ret = -ENODEV;
3284 goto err_fe;
3285 diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
3286 index 4da4253553fc..10d229a4f088 100644
3287 --- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
3288 +++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
3289 @@ -105,6 +105,9 @@ static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi,
3290
3291 fmt->width = hdmi->timings.bt.width;
3292 fmt->height = hdmi->timings.bt.height;
3293 +
3294 + if (fmt->field == V4L2_FIELD_ALTERNATE)
3295 + fmt->height /= 2;
3296 }
3297
3298 static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings)
3299 diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
3300 index d28845f7356f..a31fe18c71d6 100644
3301 --- a/drivers/media/i2c/ov5645.c
3302 +++ b/drivers/media/i2c/ov5645.c
3303 @@ -1131,13 +1131,14 @@ static int ov5645_probe(struct i2c_client *client,
3304
3305 ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
3306 &ov5645->ep);
3307 +
3308 + of_node_put(endpoint);
3309 +
3310 if (ret < 0) {
3311 dev_err(dev, "parsing endpoint node failed\n");
3312 return ret;
3313 }
3314
3315 - of_node_put(endpoint);
3316 -
3317 if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) {
3318 dev_err(dev, "invalid bus type, must be CSI2\n");
3319 return -EINVAL;
3320 diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
3321 index 3622521431f5..7ec8de7aee4f 100644
3322 --- a/drivers/media/pci/cx23885/cx23885-cards.c
3323 +++ b/drivers/media/pci/cx23885/cx23885-cards.c
3324 @@ -2286,6 +2286,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
3325 &dev->i2c_bus[2].i2c_adap,
3326 "cx25840", 0x88 >> 1, NULL);
3327 if (dev->sd_cx25840) {
3328 + /* set host data for clk_freq configuration */
3329 + v4l2_set_subdev_hostdata(dev->sd_cx25840,
3330 + &dev->clk_freq);
3331 +
3332 dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
3333 v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
3334 }
3335 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
3336 index 8f63df1cb418..4612f26fcd6d 100644
3337 --- a/drivers/media/pci/cx23885/cx23885-core.c
3338 +++ b/drivers/media/pci/cx23885/cx23885-core.c
3339 @@ -873,6 +873,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
3340 if (cx23885_boards[dev->board].clk_freq > 0)
3341 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
3342
3343 + if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
3344 + dev->pci->subsystem_device == 0x7137) {
3345 + /* Hauppauge ImpactVCBe device ID 0x7137 is populated
3346 + * with an 888, and a 25Mhz crystal, instead of the
3347 + * usual third overtone 50Mhz. The default clock rate must
3348 + * be overridden so the cx25840 is properly configured
3349 + */
3350 + dev->clk_freq = 25000000;
3351 + }
3352 +
3353 dev->pci_bus = dev->pci->bus->number;
3354 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
3355 cx23885_irq_add(dev, 0x001f00);
3356 diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
3357 index 04aa4a68a0ae..040c6c251d3a 100644
3358 --- a/drivers/media/pci/cx25821/cx25821-core.c
3359 +++ b/drivers/media/pci/cx25821/cx25821-core.c
3360 @@ -867,6 +867,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
3361 dev->nr = ++cx25821_devcount;
3362 sprintf(dev->name, "cx25821[%d]", dev->nr);
3363
3364 + if (dev->nr >= ARRAY_SIZE(card)) {
3365 + CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
3366 + return -ENODEV;
3367 + }
3368 if (dev->pci->device != 0x8210) {
3369 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
3370 __func__, dev->pci->device);
3371 @@ -882,9 +886,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
3372 dev->channels[i].sram_channels = &cx25821_sram_channels[i];
3373 }
3374
3375 - if (dev->nr > 1)
3376 - CX25821_INFO("dev->nr > 1!");
3377 -
3378 /* board config */
3379 dev->board = 1; /* card[dev->nr]; */
3380 dev->_max_num_decoders = MAX_DECODERS;
3381 diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
3382 index 437395a61065..9ab8e7ee2e1e 100644
3383 --- a/drivers/media/platform/s3c-camif/camif-capture.c
3384 +++ b/drivers/media/platform/s3c-camif/camif-capture.c
3385 @@ -1256,16 +1256,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
3386 {
3387 const struct s3c_camif_variant *variant = camif->variant;
3388 const struct vp_pix_limits *pix_lim;
3389 - int i = ARRAY_SIZE(camif_mbus_formats);
3390 + unsigned int i;
3391
3392 /* FIXME: constraints against codec or preview path ? */
3393 pix_lim = &variant->vp_pix_limits[VP_CODEC];
3394
3395 - while (i-- >= 0)
3396 + for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
3397 if (camif_mbus_formats[i] == mf->code)
3398 break;
3399
3400 - mf->code = camif_mbus_formats[i];
3401 + if (i == ARRAY_SIZE(camif_mbus_formats))
3402 + mf->code = camif_mbus_formats[0];
3403
3404 if (pad == CAMIF_SD_PAD_SINK) {
3405 v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
3406 diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
3407 index 3f9d354827af..c586c2ab9b31 100644
3408 --- a/drivers/media/platform/vivid/vivid-ctrls.c
3409 +++ b/drivers/media/platform/vivid/vivid-ctrls.c
3410 @@ -1208,6 +1208,7 @@ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl)
3411 v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
3412 v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
3413 v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
3414 + dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
3415 break;
3416 case V4L2_CID_RDS_RECEPTION:
3417 dev->radio_rx_rds_enabled = ctrl->val;
3418 @@ -1282,6 +1283,7 @@ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl)
3419 dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
3420 if (!dev->radio_tx_rds_controls)
3421 dev->radio_tx_caps |= V4L2_CAP_READWRITE;
3422 + dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
3423 break;
3424 case V4L2_CID_RDS_TX_PTY:
3425 if (dev->radio_rx_rds_controls)
3426 diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
3427 index 7ce69f23f50a..ac85942162c1 100644
3428 --- a/drivers/media/platform/vsp1/vsp1_drm.c
3429 +++ b/drivers/media/platform/vsp1/vsp1_drm.c
3430 @@ -530,6 +530,15 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index)
3431 struct vsp1_rwpf *rpf = vsp1->rpf[i];
3432 unsigned int j;
3433
3434 + /*
3435 + * Make sure we don't accept more inputs than the hardware can
3436 + * handle. This is a temporary fix to avoid display stall, we
3437 + * need to instead allocate the BRU or BRS to display pipelines
3438 + * dynamically based on the number of planes they each use.
3439 + */
3440 + if (pipe->num_inputs >= pipe->bru->source_pad)
3441 + pipe->inputs[i] = NULL;
3442 +
3443 if (!pipe->inputs[i])
3444 continue;
3445
3446 diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
3447 index 34e16f6ab4ac..545f9c1b6a58 100644
3448 --- a/drivers/media/usb/em28xx/em28xx-cards.c
3449 +++ b/drivers/media/usb/em28xx/em28xx-cards.c
3450 @@ -507,8 +507,10 @@ static struct em28xx_reg_seq plex_px_bcud[] = {
3451 };
3452
3453 /*
3454 - * 2040:0265 Hauppauge WinTV-dualHD DVB
3455 - * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM
3456 + * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc
3457 + * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk
3458 + * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc
3459 + * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk
3460 * reg 0x80/0x84:
3461 * GPIO_0: Yellow LED tuner 1, 0=on, 1=off
3462 * GPIO_1: Green LED tuner 1, 0=on, 1=off
3463 @@ -2391,7 +2393,8 @@ struct em28xx_board em28xx_boards[] = {
3464 .has_dvb = 1,
3465 },
3466 /*
3467 - * 2040:0265 Hauppauge WinTV-dualHD (DVB version).
3468 + * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc.
3469 + * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk.
3470 * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157
3471 */
3472 [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = {
3473 @@ -2406,7 +2409,8 @@ struct em28xx_board em28xx_boards[] = {
3474 .leds = hauppauge_dualhd_leds,
3475 },
3476 /*
3477 - * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM).
3478 + * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
3479 + * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk.
3480 * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157
3481 */
3482 [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = {
3483 @@ -2547,8 +2551,12 @@ struct usb_device_id em28xx_id_table[] = {
3484 .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
3485 { USB_DEVICE(0x2040, 0x0265),
3486 .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
3487 + { USB_DEVICE(0x2040, 0x8265),
3488 + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
3489 { USB_DEVICE(0x2040, 0x026d),
3490 .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
3491 + { USB_DEVICE(0x2040, 0x826d),
3492 + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
3493 { USB_DEVICE(0x0438, 0xb002),
3494 .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
3495 { USB_DEVICE(0x2001, 0xf112),
3496 @@ -2609,7 +2617,11 @@ struct usb_device_id em28xx_id_table[] = {
3497 .driver_info = EM28178_BOARD_PCTV_461E },
3498 { USB_DEVICE(0x2013, 0x025f),
3499 .driver_info = EM28178_BOARD_PCTV_292E },
3500 - { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */
3501 + { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */
3502 + .driver_info = EM28178_BOARD_PCTV_292E },
3503 + { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */
3504 + .driver_info = EM28178_BOARD_PCTV_292E },
3505 + { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */
3506 .driver_info = EM28178_BOARD_PCTV_292E },
3507 { USB_DEVICE(0x0413, 0x6f07),
3508 .driver_info = EM2861_BOARD_LEADTEK_VC100 },
3509 diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
3510 index 88084f24f033..094e83b6908d 100644
3511 --- a/drivers/media/usb/em28xx/em28xx.h
3512 +++ b/drivers/media/usb/em28xx/em28xx.h
3513 @@ -191,7 +191,7 @@
3514 USB 2.0 spec says bulk packet size is always 512 bytes
3515 */
3516 #define EM28XX_BULK_PACKET_MULTIPLIER 384
3517 -#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
3518 +#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
3519
3520 #define EM28XX_INTERLACED_DEFAULT 1
3521
3522 diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
3523 index 23b45da784cb..b89acaee12d4 100644
3524 --- a/drivers/net/dsa/bcm_sf2_cfp.c
3525 +++ b/drivers/net/dsa/bcm_sf2_cfp.c
3526 @@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
3527 /* Locate the first rule available */
3528 if (fs->location == RX_CLS_LOC_ANY)
3529 rule_index = find_first_zero_bit(priv->cfp.used,
3530 - bcm_sf2_cfp_rule_size(priv));
3531 + priv->num_cfp_rules);
3532 else
3533 rule_index = fs->location;
3534
3535 + if (rule_index > bcm_sf2_cfp_rule_size(priv))
3536 + return -ENOSPC;
3537 +
3538 layout = &udf_tcpip4_layout;
3539 /* We only use one UDF slice for now */
3540 slice_num = bcm_sf2_get_slice_number(layout, 0);
3541 @@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
3542 * first half because the HW search is by incrementing addresses.
3543 */
3544 if (fs->location == RX_CLS_LOC_ANY)
3545 - rule_index[0] = find_first_zero_bit(priv->cfp.used,
3546 - bcm_sf2_cfp_rule_size(priv));
3547 + rule_index[1] = find_first_zero_bit(priv->cfp.used,
3548 + priv->num_cfp_rules);
3549 else
3550 - rule_index[0] = fs->location;
3551 + rule_index[1] = fs->location;
3552 + if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
3553 + return -ENOSPC;
3554
3555 /* Flag it as used (cleared on error path) such that we can immediately
3556 * obtain a second one to chain from.
3557 */
3558 - set_bit(rule_index[0], priv->cfp.used);
3559 + set_bit(rule_index[1], priv->cfp.used);
3560
3561 - rule_index[1] = find_first_zero_bit(priv->cfp.used,
3562 - bcm_sf2_cfp_rule_size(priv));
3563 - if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
3564 + rule_index[0] = find_first_zero_bit(priv->cfp.used,
3565 + priv->num_cfp_rules);
3566 + if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
3567 ret = -ENOSPC;
3568 goto out_err;
3569 }
3570 @@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
3571 /* Flag the second half rule as being used now, return it as the
3572 * location, and flag it as unique while dumping rules
3573 */
3574 - set_bit(rule_index[1], priv->cfp.used);
3575 + set_bit(rule_index[0], priv->cfp.used);
3576 set_bit(rule_index[1], priv->cfp.unique);
3577 fs->location = rule_index[1];
3578
3579 return ret;
3580
3581 out_err:
3582 - clear_bit(rule_index[0], priv->cfp.used);
3583 + clear_bit(rule_index[1], priv->cfp.used);
3584 return ret;
3585 }
3586
3587 @@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
3588 int ret;
3589 u32 reg;
3590
3591 - /* Refuse deletion of unused rules, and the default reserved rule */
3592 - if (!test_bit(loc, priv->cfp.used) || loc == 0)
3593 - return -EINVAL;
3594 -
3595 /* Indicate which rule we want to read */
3596 bcm_sf2_cfp_rule_addr_set(priv, loc);
3597
3598 @@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
3599 u32 next_loc = 0;
3600 int ret;
3601
3602 + /* Refuse deleting unused rules, and those that are not unique since
3603 + * that could leave IPv6 rules with one of the chained rule in the
3604 + * table.
3605 + */
3606 + if (!test_bit(loc, priv->cfp.unique) || loc == 0)
3607 + return -EINVAL;
3608 +
3609 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
3610 if (ret)
3611 return ret;
3612 diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
3613 index 36c8950dbd2d..176861bd2252 100644
3614 --- a/drivers/net/ethernet/3com/3c59x.c
3615 +++ b/drivers/net/ethernet/3com/3c59x.c
3616 @@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
3617 vp->mii.reg_num_mask = 0x1f;
3618
3619 /* Makes sure rings are at least 16 byte aligned. */
3620 - vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
3621 + vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
3622 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3623 - &vp->rx_ring_dma);
3624 + &vp->rx_ring_dma, GFP_KERNEL);
3625 retval = -ENOMEM;
3626 if (!vp->rx_ring)
3627 goto free_device;
3628 @@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
3629 return 0;
3630
3631 free_ring:
3632 - pci_free_consistent(pdev,
3633 - sizeof(struct boom_rx_desc) * RX_RING_SIZE
3634 - + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3635 - vp->rx_ring,
3636 - vp->rx_ring_dma);
3637 + dma_free_coherent(&pdev->dev,
3638 + sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3639 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3640 + vp->rx_ring, vp->rx_ring_dma);
3641 free_device:
3642 free_netdev(dev);
3643 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
3644 @@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
3645 break; /* Bad news! */
3646
3647 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
3648 - dma = pci_map_single(VORTEX_PCI(vp), skb->data,
3649 - PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3650 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
3651 + dma = dma_map_single(vp->gendev, skb->data,
3652 + PKT_BUF_SZ, DMA_FROM_DEVICE);
3653 + if (dma_mapping_error(vp->gendev, dma))
3654 break;
3655 vp->rx_ring[i].addr = cpu_to_le32(dma);
3656 }
3657 @@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
3658 if (vp->bus_master) {
3659 /* Set the bus-master controller to transfer the packet. */
3660 int len = (skb->len + 3) & ~3;
3661 - vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
3662 - PCI_DMA_TODEVICE);
3663 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
3664 + vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
3665 + DMA_TO_DEVICE);
3666 + if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
3667 dev_kfree_skb_any(skb);
3668 dev->stats.tx_dropped++;
3669 return NETDEV_TX_OK;
3670 @@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
3671 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
3672
3673 if (!skb_shinfo(skb)->nr_frags) {
3674 - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
3675 - PCI_DMA_TODEVICE);
3676 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
3677 + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
3678 + DMA_TO_DEVICE);
3679 + if (dma_mapping_error(vp->gendev, dma_addr))
3680 goto out_dma_err;
3681
3682 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
3683 @@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
3684 } else {
3685 int i;
3686
3687 - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
3688 - skb_headlen(skb), PCI_DMA_TODEVICE);
3689 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
3690 + dma_addr = dma_map_single(vp->gendev, skb->data,
3691 + skb_headlen(skb), DMA_TO_DEVICE);
3692 + if (dma_mapping_error(vp->gendev, dma_addr))
3693 goto out_dma_err;
3694
3695 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
3696 @@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
3697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3699
3700 - dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
3701 + dma_addr = skb_frag_dma_map(vp->gendev, frag,
3702 0,
3703 frag->size,
3704 DMA_TO_DEVICE);
3705 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
3706 + if (dma_mapping_error(vp->gendev, dma_addr)) {
3707 for(i = i-1; i >= 0; i--)
3708 - dma_unmap_page(&VORTEX_PCI(vp)->dev,
3709 + dma_unmap_page(vp->gendev,
3710 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
3711 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
3712 DMA_TO_DEVICE);
3713
3714 - pci_unmap_single(VORTEX_PCI(vp),
3715 + dma_unmap_single(vp->gendev,
3716 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
3717 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
3718 - PCI_DMA_TODEVICE);
3719 + DMA_TO_DEVICE);
3720
3721 goto out_dma_err;
3722 }
3723 @@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724 }
3725 }
3726 #else
3727 - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
3728 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
3729 + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
3730 + if (dma_mapping_error(vp->gendev, dma_addr))
3731 goto out_dma_err;
3732 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
3733 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
3734 @@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
3735 out:
3736 return NETDEV_TX_OK;
3737 out_dma_err:
3738 - dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
3739 + dev_err(vp->gendev, "Error mapping dma buffer\n");
3740 goto out;
3741 }
3742
3743 @@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
3744 if (status & DMADone) {
3745 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
3746 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
3747 - pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
3748 + dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
3749 pkts_compl++;
3750 bytes_compl += vp->tx_skb->len;
3751 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
3752 @@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
3753 struct sk_buff *skb = vp->tx_skbuff[entry];
3754 #if DO_ZEROCOPY
3755 int i;
3756 - pci_unmap_single(VORTEX_PCI(vp),
3757 + dma_unmap_single(vp->gendev,
3758 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
3759 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
3760 - PCI_DMA_TODEVICE);
3761 + DMA_TO_DEVICE);
3762
3763 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
3764 - pci_unmap_page(VORTEX_PCI(vp),
3765 + dma_unmap_page(vp->gendev,
3766 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
3767 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
3768 - PCI_DMA_TODEVICE);
3769 + DMA_TO_DEVICE);
3770 #else
3771 - pci_unmap_single(VORTEX_PCI(vp),
3772 - le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
3773 + dma_unmap_single(vp->gendev,
3774 + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
3775 #endif
3776 pkts_compl++;
3777 bytes_compl += skb->len;
3778 @@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
3779 /* 'skb_put()' points to the start of sk_buff data area. */
3780 if (vp->bus_master &&
3781 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
3782 - dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
3783 - pkt_len, PCI_DMA_FROMDEVICE);
3784 + dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
3785 + pkt_len, DMA_FROM_DEVICE);
3786 iowrite32(dma, ioaddr + Wn7_MasterAddr);
3787 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
3788 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
3789 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
3790 ;
3791 - pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
3792 + dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
3793 } else {
3794 ioread32_rep(ioaddr + RX_FIFO,
3795 skb_put(skb, pkt_len),
3796 @@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
3797 if (pkt_len < rx_copybreak &&
3798 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
3799 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
3800 - pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3801 + dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
3802 /* 'skb_put()' points to the start of sk_buff data area. */
3803 skb_put_data(skb, vp->rx_skbuff[entry]->data,
3804 pkt_len);
3805 - pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3806 + dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
3807 vp->rx_copy++;
3808 } else {
3809 /* Pre-allocate the replacement skb. If it or its
3810 @@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
3811 dev->stats.rx_dropped++;
3812 goto clear_complete;
3813 }
3814 - newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
3815 - PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3816 - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
3817 + newdma = dma_map_single(vp->gendev, newskb->data,
3818 + PKT_BUF_SZ, DMA_FROM_DEVICE);
3819 + if (dma_mapping_error(vp->gendev, newdma)) {
3820 dev->stats.rx_dropped++;
3821 consume_skb(newskb);
3822 goto clear_complete;
3823 @@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
3824 vp->rx_skbuff[entry] = newskb;
3825 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
3826 skb_put(skb, pkt_len);
3827 - pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3828 + dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
3829 vp->rx_nocopy++;
3830 }
3831 skb->protocol = eth_type_trans(skb, dev);
3832 @@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
3833 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
3834 for (i = 0; i < RX_RING_SIZE; i++)
3835 if (vp->rx_skbuff[i]) {
3836 - pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
3837 - PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
3838 + dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
3839 + PKT_BUF_SZ, DMA_FROM_DEVICE);
3840 dev_kfree_skb(vp->rx_skbuff[i]);
3841 vp->rx_skbuff[i] = NULL;
3842 }
3843 @@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
3844 int k;
3845
3846 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
3847 - pci_unmap_single(VORTEX_PCI(vp),
3848 + dma_unmap_single(vp->gendev,
3849 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
3850 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
3851 - PCI_DMA_TODEVICE);
3852 + DMA_TO_DEVICE);
3853 #else
3854 - pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
3855 + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
3856 #endif
3857 dev_kfree_skb(skb);
3858 vp->tx_skbuff[i] = NULL;
3859 @@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
3860
3861 pci_iounmap(pdev, vp->ioaddr);
3862
3863 - pci_free_consistent(pdev,
3864 - sizeof(struct boom_rx_desc) * RX_RING_SIZE
3865 - + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3866 - vp->rx_ring,
3867 - vp->rx_ring_dma);
3868 + dma_free_coherent(&pdev->dev,
3869 + sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3870 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3871 + vp->rx_ring, vp->rx_ring_dma);
3872
3873 pci_release_regions(pdev);
3874
3875 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
3876 index b57acb8dc35b..dc25066c59a1 100644
3877 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
3878 +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
3879 @@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
3880 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
3881 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
3882 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
3883 - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
3884 - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
3885 - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
3886 - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
3887 - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
3888 - {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
3889 - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
3890 - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
3891 - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
3892 + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
3893 + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
3894 + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
3895 + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
3896 + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
3897 + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
3898 + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
3899 + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
3900 + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
3901 };
3902
3903 static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
3904 @@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
3905 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
3906 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
3907 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
3908 - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
3909 - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
3910 - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
3911 - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
3912 - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
3913 - {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
3914 - {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
3915 - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
3916 - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
3917 - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
3918 };
3919
3920 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
3921 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3922 index 3177b0c9bd2d..829dc8c5ddff 100644
3923 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3924 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
3925 @@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
3926 {
3927 struct tp_params *tp = &adap->params.tp;
3928 u64 hash_filter_mask = tp->hash_filter_mask;
3929 - u32 mask;
3930 + u64 ntuple_mask = 0;
3931
3932 if (!is_hashfilter(adap))
3933 return false;
3934 @@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
3935 if (!fs->val.fport || fs->mask.fport != 0xffff)
3936 return false;
3937
3938 - if (tp->fcoe_shift >= 0) {
3939 - mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
3940 - if (mask && !fs->mask.fcoe)
3941 - return false;
3942 - }
3943 + /* calculate tuple mask and compare with mask configured in hw */
3944 + if (tp->fcoe_shift >= 0)
3945 + ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
3946
3947 - if (tp->port_shift >= 0) {
3948 - mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
3949 - if (mask && !fs->mask.iport)
3950 - return false;
3951 - }
3952 + if (tp->port_shift >= 0)
3953 + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
3954
3955 if (tp->vnic_shift >= 0) {
3956 - mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
3957 -
3958 - if ((adap->params.tp.ingress_config & VNIC_F)) {
3959 - if (mask && !fs->mask.pfvf_vld)
3960 - return false;
3961 - } else {
3962 - if (mask && !fs->mask.ovlan_vld)
3963 - return false;
3964 - }
3965 + if ((adap->params.tp.ingress_config & VNIC_F))
3966 + ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
3967 + else
3968 + ntuple_mask |= (u64)fs->mask.ovlan_vld <<
3969 + tp->vnic_shift;
3970 }
3971
3972 - if (tp->vlan_shift >= 0) {
3973 - mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
3974 - if (mask && !fs->mask.ivlan)
3975 - return false;
3976 - }
3977 + if (tp->vlan_shift >= 0)
3978 + ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
3979
3980 - if (tp->tos_shift >= 0) {
3981 - mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
3982 - if (mask && !fs->mask.tos)
3983 - return false;
3984 - }
3985 + if (tp->tos_shift >= 0)
3986 + ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
3987
3988 - if (tp->protocol_shift >= 0) {
3989 - mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
3990 - if (mask && !fs->mask.proto)
3991 - return false;
3992 - }
3993 + if (tp->protocol_shift >= 0)
3994 + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
3995
3996 - if (tp->ethertype_shift >= 0) {
3997 - mask = (hash_filter_mask >> tp->ethertype_shift) &
3998 - FT_ETHERTYPE_W;
3999 - if (mask && !fs->mask.ethtype)
4000 - return false;
4001 - }
4002 + if (tp->ethertype_shift >= 0)
4003 + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
4004
4005 - if (tp->macmatch_shift >= 0) {
4006 - mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
4007 - if (mask && !fs->mask.macidx)
4008 - return false;
4009 - }
4010 + if (tp->macmatch_shift >= 0)
4011 + ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
4012 +
4013 + if (tp->matchtype_shift >= 0)
4014 + ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
4015 +
4016 + if (tp->frag_shift >= 0)
4017 + ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
4018 +
4019 + if (ntuple_mask != hash_filter_mask)
4020 + return false;
4021
4022 - if (tp->matchtype_shift >= 0) {
4023 - mask = (hash_filter_mask >> tp->matchtype_shift) &
4024 - FT_MPSHITTYPE_W;
4025 - if (mask && !fs->mask.matchtype)
4026 - return false;
4027 - }
4028 - if (tp->frag_shift >= 0) {
4029 - mask = (hash_filter_mask >> tp->frag_shift) &
4030 - FT_FRAGMENTATION_W;
4031 - if (mask && !fs->mask.frag)
4032 - return false;
4033 - }
4034 return true;
4035 }
4036
4037 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
4038 index 4d84cab77105..e8a3a45d0b53 100644
4039 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
4040 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
4041 @@ -3007,6 +3007,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
4042 mlx4_err(dev, "Failed to create file for port %d\n", port);
4043 devlink_port_unregister(&info->devlink_port);
4044 info->port = -1;
4045 + return err;
4046 }
4047
4048 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
4049 @@ -3028,9 +3029,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
4050 &info->port_attr);
4051 devlink_port_unregister(&info->devlink_port);
4052 info->port = -1;
4053 + return err;
4054 }
4055
4056 - return err;
4057 + return 0;
4058 }
4059
4060 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
4061 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
4062 index c4f14fdc4e77..0161e01778f2 100644
4063 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
4064 +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
4065 @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4066 struct qed_ll2_tx_packet *p_pkt = NULL;
4067 struct qed_ll2_info *p_ll2_conn;
4068 struct qed_ll2_tx_queue *p_tx;
4069 + unsigned long flags = 0;
4070 dma_addr_t tx_frag;
4071
4072 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
4073 @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4074
4075 p_tx = &p_ll2_conn->tx_queue;
4076
4077 + spin_lock_irqsave(&p_tx->lock, flags);
4078 while (!list_empty(&p_tx->active_descq)) {
4079 p_pkt = list_first_entry(&p_tx->active_descq,
4080 struct qed_ll2_tx_packet, list_entry);
4081 @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4082 list_del(&p_pkt->list_entry);
4083 b_last_packet = list_empty(&p_tx->active_descq);
4084 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
4085 + spin_unlock_irqrestore(&p_tx->lock, flags);
4086 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
4087 struct qed_ooo_buffer *p_buffer;
4088
4089 @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4090 b_last_frag,
4091 b_last_packet);
4092 }
4093 + spin_lock_irqsave(&p_tx->lock, flags);
4094 }
4095 + spin_unlock_irqrestore(&p_tx->lock, flags);
4096 }
4097
4098 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
4099 @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4100 struct qed_ll2_info *p_ll2_conn = NULL;
4101 struct qed_ll2_rx_packet *p_pkt = NULL;
4102 struct qed_ll2_rx_queue *p_rx;
4103 + unsigned long flags = 0;
4104
4105 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
4106 if (!p_ll2_conn)
4107 @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4108
4109 p_rx = &p_ll2_conn->rx_queue;
4110
4111 + spin_lock_irqsave(&p_rx->lock, flags);
4112 while (!list_empty(&p_rx->active_descq)) {
4113 p_pkt = list_first_entry(&p_rx->active_descq,
4114 struct qed_ll2_rx_packet, list_entry);
4115 if (!p_pkt)
4116 break;
4117 -
4118 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
4119 + spin_unlock_irqrestore(&p_rx->lock, flags);
4120
4121 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
4122 struct qed_ooo_buffer *p_buffer;
4123 @@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
4124 cookie,
4125 rx_buf_addr, b_last);
4126 }
4127 + spin_lock_irqsave(&p_rx->lock, flags);
4128 }
4129 + spin_unlock_irqrestore(&p_rx->lock, flags);
4130 }
4131
4132 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
4133 @@ -601,6 +610,27 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
4134 return bd_flags;
4135 }
4136
4137 +static bool
4138 +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
4139 + struct core_rx_slow_path_cqe *p_cqe)
4140 +{
4141 + struct ooo_opaque *iscsi_ooo;
4142 + u32 cid;
4143 +
4144 + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
4145 + return false;
4146 +
4147 + iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
4148 + if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
4149 + return false;
4150 +
4151 + /* Need to make a flush */
4152 + cid = le32_to_cpu(iscsi_ooo->cid);
4153 + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
4154 +
4155 + return true;
4156 +}
4157 +
4158 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
4159 struct qed_ll2_info *p_ll2_conn)
4160 {
4161 @@ -627,6 +657,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
4162 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
4163 cqe_type = cqe->rx_cqe_sp.type;
4164
4165 + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
4166 + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
4167 + &cqe->rx_cqe_sp))
4168 + continue;
4169 +
4170 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
4171 DP_NOTICE(p_hwfn,
4172 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
4173 @@ -807,6 +842,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
4174 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
4175 int rc;
4176
4177 + if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
4178 + return 0;
4179 +
4180 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
4181 if (rc)
4182 return rc;
4183 @@ -827,6 +865,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
4184 u16 new_idx = 0, num_bds = 0;
4185 int rc;
4186
4187 + if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
4188 + return 0;
4189 +
4190 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
4191 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
4192
4193 @@ -1880,17 +1921,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
4194
4195 /* Stop Tx & Rx of connection, if needed */
4196 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
4197 + p_ll2_conn->tx_queue.b_cb_registred = false;
4198 + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
4199 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
4200 if (rc)
4201 goto out;
4202 +
4203 qed_ll2_txq_flush(p_hwfn, connection_handle);
4204 + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
4205 }
4206
4207 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
4208 + p_ll2_conn->rx_queue.b_cb_registred = false;
4209 + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
4210 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
4211 if (rc)
4212 goto out;
4213 +
4214 qed_ll2_rxq_flush(p_hwfn, connection_handle);
4215 + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
4216 }
4217
4218 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
4219 @@ -1938,16 +1987,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
4220 if (!p_ll2_conn)
4221 return;
4222
4223 - if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
4224 - p_ll2_conn->rx_queue.b_cb_registred = false;
4225 - qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
4226 - }
4227 -
4228 - if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
4229 - p_ll2_conn->tx_queue.b_cb_registred = false;
4230 - qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
4231 - }
4232 -
4233 kfree(p_ll2_conn->tx_queue.descq_mem);
4234 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
4235
4236 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
4237 index 6c7bdd0c361a..ffae19714ffd 100644
4238 --- a/drivers/net/tun.c
4239 +++ b/drivers/net/tun.c
4240 @@ -680,15 +680,6 @@ static void tun_queue_purge(struct tun_file *tfile)
4241 skb_queue_purge(&tfile->sk.sk_error_queue);
4242 }
4243
4244 -static void tun_cleanup_tx_ring(struct tun_file *tfile)
4245 -{
4246 - if (tfile->tx_ring.queue) {
4247 - ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
4248 - xdp_rxq_info_unreg(&tfile->xdp_rxq);
4249 - memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
4250 - }
4251 -}
4252 -
4253 static void __tun_detach(struct tun_file *tfile, bool clean)
4254 {
4255 struct tun_file *ntfile;
4256 @@ -735,7 +726,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
4257 tun->dev->reg_state == NETREG_REGISTERED)
4258 unregister_netdevice(tun->dev);
4259 }
4260 - tun_cleanup_tx_ring(tfile);
4261 + if (tun)
4262 + xdp_rxq_info_unreg(&tfile->xdp_rxq);
4263 + ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
4264 sock_put(&tfile->sk);
4265 }
4266 }
4267 @@ -775,14 +768,14 @@ static void tun_detach_all(struct net_device *dev)
4268 tun_napi_del(tun, tfile);
4269 /* Drop read queue */
4270 tun_queue_purge(tfile);
4271 + xdp_rxq_info_unreg(&tfile->xdp_rxq);
4272 sock_put(&tfile->sk);
4273 - tun_cleanup_tx_ring(tfile);
4274 }
4275 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
4276 tun_enable_queue(tfile);
4277 tun_queue_purge(tfile);
4278 + xdp_rxq_info_unreg(&tfile->xdp_rxq);
4279 sock_put(&tfile->sk);
4280 - tun_cleanup_tx_ring(tfile);
4281 }
4282 BUG_ON(tun->numdisabled != 0);
4283
4284 @@ -826,7 +819,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
4285 }
4286
4287 if (!tfile->detached &&
4288 - ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
4289 + ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
4290 + GFP_KERNEL, tun_ptr_free)) {
4291 err = -ENOMEM;
4292 goto out;
4293 }
4294 @@ -3131,6 +3125,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
4295 &tun_proto, 0);
4296 if (!tfile)
4297 return -ENOMEM;
4298 + if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
4299 + sk_free(&tfile->sk);
4300 + return -ENOMEM;
4301 + }
4302 +
4303 RCU_INIT_POINTER(tfile->tun, NULL);
4304 tfile->flags = 0;
4305 tfile->ifindex = 0;
4306 @@ -3151,8 +3150,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
4307
4308 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
4309
4310 - memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
4311 -
4312 return 0;
4313 }
4314
4315 diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
4316 index 9ebe2a689966..27a9bb8c9611 100644
4317 --- a/drivers/net/vmxnet3/vmxnet3_drv.c
4318 +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
4319 @@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
4320
4321 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
4322 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
4323 + /* Prevent any &gdesc->tcd field from being (speculatively)
4324 + * read before (&gdesc->tcd)->gen is read.
4325 + */
4326 + dma_rmb();
4327 +
4328 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
4329 &gdesc->tcd), tq, adapter->pdev,
4330 adapter);
4331 @@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
4332 gdesc->txd.tci = skb_vlan_tag_get(skb);
4333 }
4334
4335 + /* Ensure that the write to (&gdesc->txd)->gen will be observed after
4336 + * all other writes to &gdesc->txd.
4337 + */
4338 + dma_wmb();
4339 +
4340 /* finally flips the GEN bit of the SOP desc. */
4341 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
4342 VMXNET3_TXD_GEN);
4343 @@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
4344 */
4345 break;
4346 }
4347 +
4348 + /* Prevent any rcd field from being (speculatively) read before
4349 + * rcd->gen is read.
4350 + */
4351 + dma_rmb();
4352 +
4353 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
4354 rcd->rqID != rq->dataRingQid);
4355 idx = rcd->rxdIdx;
4356 @@ -1528,6 +1544,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
4357 ring->next2comp = idx;
4358 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
4359 ring = rq->rx_ring + ring_idx;
4360 +
4361 + /* Ensure that the writes to rxd->gen bits will be observed
4362 + * after all other writes to rxd objects.
4363 + */
4364 + dma_wmb();
4365 +
4366 while (num_to_alloc) {
4367 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
4368 &rxCmdDesc);
4369 @@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
4370 /* ==================== initialization and cleanup routines ============ */
4371
4372 static int
4373 -vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
4374 +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
4375 {
4376 int err;
4377 unsigned long mmio_start, mmio_len;
4378 @@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
4379 return err;
4380 }
4381
4382 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
4383 - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
4384 - dev_err(&pdev->dev,
4385 - "pci_set_consistent_dma_mask failed\n");
4386 - err = -EIO;
4387 - goto err_set_mask;
4388 - }
4389 - *dma64 = true;
4390 - } else {
4391 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
4392 - dev_err(&pdev->dev,
4393 - "pci_set_dma_mask failed\n");
4394 - err = -EIO;
4395 - goto err_set_mask;
4396 - }
4397 - *dma64 = false;
4398 - }
4399 -
4400 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
4401 vmxnet3_driver_name);
4402 if (err) {
4403 dev_err(&pdev->dev,
4404 "Failed to request region for adapter: error %d\n", err);
4405 - goto err_set_mask;
4406 + goto err_enable_device;
4407 }
4408
4409 pci_set_master(pdev);
4410 @@ -2751,7 +2755,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
4411 iounmap(adapter->hw_addr0);
4412 err_ioremap:
4413 pci_release_selected_regions(pdev, (1 << 2) - 1);
4414 -err_set_mask:
4415 +err_enable_device:
4416 pci_disable_device(pdev);
4417 return err;
4418 }
4419 @@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
4420 #endif
4421 };
4422 int err;
4423 - bool dma64 = false; /* stupid gcc */
4424 + bool dma64;
4425 u32 ver;
4426 struct net_device *netdev;
4427 struct vmxnet3_adapter *adapter;
4428 @@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
4429 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
4430 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
4431
4432 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
4433 + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
4434 + dev_err(&pdev->dev,
4435 + "pci_set_consistent_dma_mask failed\n");
4436 + err = -EIO;
4437 + goto err_set_mask;
4438 + }
4439 + dma64 = true;
4440 + } else {
4441 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
4442 + dev_err(&pdev->dev,
4443 + "pci_set_dma_mask failed\n");
4444 + err = -EIO;
4445 + goto err_set_mask;
4446 + }
4447 + dma64 = false;
4448 + }
4449 +
4450 spin_lock_init(&adapter->cmd_lock);
4451 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
4452 sizeof(struct vmxnet3_adapter),
4453 @@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
4454 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
4455 dev_err(&pdev->dev, "Failed to map dma\n");
4456 err = -EFAULT;
4457 - goto err_dma_map;
4458 + goto err_set_mask;
4459 }
4460 adapter->shared = dma_alloc_coherent(
4461 &adapter->pdev->dev,
4462 @@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
4463 }
4464 #endif /* VMXNET3_RSS */
4465
4466 - err = vmxnet3_alloc_pci_resources(adapter, &dma64);
4467 + err = vmxnet3_alloc_pci_resources(adapter);
4468 if (err < 0)
4469 goto err_alloc_pci;
4470
4471 @@ -3504,7 +3526,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
4472 err_alloc_shared:
4473 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4474 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
4475 -err_dma_map:
4476 +err_set_mask:
4477 free_netdev(netdev);
4478 return err;
4479 }
4480 diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
4481 index a3326463b71f..a2c554f8a61b 100644
4482 --- a/drivers/net/vmxnet3/vmxnet3_int.h
4483 +++ b/drivers/net/vmxnet3/vmxnet3_int.h
4484 @@ -69,10 +69,12 @@
4485 /*
4486 * Version numbers
4487 */
4488 -#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
4489 +#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
4490
4491 -/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
4492 -#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
4493 +/* Each byte of this 32-bit integer encodes a version number in
4494 + * VMXNET3_DRIVER_VERSION_STRING.
4495 + */
4496 +#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
4497
4498 #if defined(CONFIG_PCI_MSI)
4499 /* RSS only makes sense if MSI-X is supported. */
4500 diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
4501 index e1cfa06810ef..e79f2a181ad2 100644
4502 --- a/drivers/rtc/hctosys.c
4503 +++ b/drivers/rtc/hctosys.c
4504 @@ -49,6 +49,11 @@ static int __init rtc_hctosys(void)
4505
4506 tv64.tv_sec = rtc_tm_to_time64(&tm);
4507
4508 +#if BITS_PER_LONG == 32
4509 + if (tv64.tv_sec > INT_MAX)
4510 + goto err_read;
4511 +#endif
4512 +
4513 err = do_settimeofday64(&tv64);
4514
4515 dev_info(rtc->dev.parent,
4516 diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
4517 index d67769265185..a1c44d0c8557 100644
4518 --- a/drivers/rtc/rtc-goldfish.c
4519 +++ b/drivers/rtc/rtc-goldfish.c
4520 @@ -235,3 +235,5 @@ static struct platform_driver goldfish_rtc = {
4521 };
4522
4523 module_platform_driver(goldfish_rtc);
4524 +
4525 +MODULE_LICENSE("GPL v2");
4526 diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
4527 index c90fba3ed861..6620016869cf 100644
4528 --- a/drivers/rtc/rtc-m41t80.c
4529 +++ b/drivers/rtc/rtc-m41t80.c
4530 @@ -885,7 +885,6 @@ static int m41t80_probe(struct i2c_client *client,
4531 {
4532 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
4533 int rc = 0;
4534 - struct rtc_device *rtc = NULL;
4535 struct rtc_time tm;
4536 struct m41t80_data *m41t80_data = NULL;
4537 bool wakeup_source = false;
4538 @@ -909,6 +908,10 @@ static int m41t80_probe(struct i2c_client *client,
4539 m41t80_data->features = id->driver_data;
4540 i2c_set_clientdata(client, m41t80_data);
4541
4542 + m41t80_data->rtc = devm_rtc_allocate_device(&client->dev);
4543 + if (IS_ERR(m41t80_data->rtc))
4544 + return PTR_ERR(m41t80_data->rtc);
4545 +
4546 #ifdef CONFIG_OF
4547 wakeup_source = of_property_read_bool(client->dev.of_node,
4548 "wakeup-source");
4549 @@ -932,15 +935,11 @@ static int m41t80_probe(struct i2c_client *client,
4550 device_init_wakeup(&client->dev, true);
4551 }
4552
4553 - rtc = devm_rtc_device_register(&client->dev, client->name,
4554 - &m41t80_rtc_ops, THIS_MODULE);
4555 - if (IS_ERR(rtc))
4556 - return PTR_ERR(rtc);
4557 + m41t80_data->rtc->ops = &m41t80_rtc_ops;
4558
4559 - m41t80_data->rtc = rtc;
4560 if (client->irq <= 0) {
4561 /* We cannot support UIE mode if we do not have an IRQ line */
4562 - rtc->uie_unsupported = 1;
4563 + m41t80_data->rtc->uie_unsupported = 1;
4564 }
4565
4566 /* Make sure HT (Halt Update) bit is cleared */
4567 @@ -993,6 +992,11 @@ static int m41t80_probe(struct i2c_client *client,
4568 if (m41t80_data->features & M41T80_FEATURE_SQ)
4569 m41t80_sqw_register_clk(m41t80_data);
4570 #endif
4571 +
4572 + rc = rtc_register_device(m41t80_data->rtc);
4573 + if (rc)
4574 + return rc;
4575 +
4576 return 0;
4577 }
4578
4579 diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
4580 index 35c9aada07c8..79c8da54e922 100644
4581 --- a/drivers/rtc/rtc-rk808.c
4582 +++ b/drivers/rtc/rtc-rk808.c
4583 @@ -416,12 +416,11 @@ static int rk808_rtc_probe(struct platform_device *pdev)
4584
4585 device_init_wakeup(&pdev->dev, 1);
4586
4587 - rk808_rtc->rtc = devm_rtc_device_register(&pdev->dev, "rk808-rtc",
4588 - &rk808_rtc_ops, THIS_MODULE);
4589 - if (IS_ERR(rk808_rtc->rtc)) {
4590 - ret = PTR_ERR(rk808_rtc->rtc);
4591 - return ret;
4592 - }
4593 + rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
4594 + if (IS_ERR(rk808_rtc->rtc))
4595 + return PTR_ERR(rk808_rtc->rtc);
4596 +
4597 + rk808_rtc->rtc->ops = &rk808_rtc_ops;
4598
4599 rk808_rtc->irq = platform_get_irq(pdev, 0);
4600 if (rk808_rtc->irq < 0) {
4601 @@ -438,9 +437,10 @@ static int rk808_rtc_probe(struct platform_device *pdev)
4602 if (ret) {
4603 dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
4604 rk808_rtc->irq, ret);
4605 + return ret;
4606 }
4607
4608 - return ret;
4609 + return rtc_register_device(rk808_rtc->rtc);
4610 }
4611
4612 static struct platform_driver rk808_rtc_driver = {
4613 diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
4614 index 026035373ae6..38a12435b5a0 100644
4615 --- a/drivers/rtc/rtc-rp5c01.c
4616 +++ b/drivers/rtc/rtc-rp5c01.c
4617 @@ -249,16 +249,24 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
4618
4619 platform_set_drvdata(dev, priv);
4620
4621 - rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops,
4622 - THIS_MODULE);
4623 + rtc = devm_rtc_allocate_device(&dev->dev);
4624 if (IS_ERR(rtc))
4625 return PTR_ERR(rtc);
4626 +
4627 + rtc->ops = &rp5c01_rtc_ops;
4628 +
4629 priv->rtc = rtc;
4630
4631 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
4632 if (error)
4633 return error;
4634
4635 + error = rtc_register_device(rtc);
4636 + if (error) {
4637 + sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr);
4638 + return error;
4639 + }
4640 +
4641 return 0;
4642 }
4643
4644 diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
4645 index d8ef9e052c4f..9af591d5223c 100644
4646 --- a/drivers/rtc/rtc-snvs.c
4647 +++ b/drivers/rtc/rtc-snvs.c
4648 @@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
4649 {
4650 struct snvs_rtc_data *data = dev_get_drvdata(dev);
4651 unsigned long time;
4652 + int ret;
4653
4654 rtc_tm_to_time(tm, &time);
4655
4656 /* Disable RTC first */
4657 - snvs_rtc_enable(data, false);
4658 + ret = snvs_rtc_enable(data, false);
4659 + if (ret)
4660 + return ret;
4661
4662 /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
4663 regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
4664 regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
4665
4666 /* Enable RTC again */
4667 - snvs_rtc_enable(data, true);
4668 + ret = snvs_rtc_enable(data, true);
4669
4670 - return 0;
4671 + return ret;
4672 }
4673
4674 static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
4675 @@ -288,7 +291,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
4676 regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
4677
4678 /* Enable RTC */
4679 - snvs_rtc_enable(data, true);
4680 + ret = snvs_rtc_enable(data, true);
4681 + if (ret) {
4682 + dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
4683 + goto error_rtc_device_register;
4684 + }
4685
4686 device_init_wakeup(&pdev->dev, true);
4687
4688 diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
4689 index 560d9a5e0225..a9528083061d 100644
4690 --- a/drivers/rtc/rtc-tx4939.c
4691 +++ b/drivers/rtc/rtc-tx4939.c
4692 @@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
4693 for (i = 2; i < 6; i++)
4694 buf[i] = __raw_readl(&rtcreg->dat);
4695 spin_unlock_irq(&pdata->lock);
4696 - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
4697 + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
4698 + (buf[3] << 8) | buf[2];
4699 rtc_time_to_tm(sec, tm);
4700 return rtc_valid_tm(tm);
4701 }
4702 @@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
4703 alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
4704 alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
4705 spin_unlock_irq(&pdata->lock);
4706 - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
4707 + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
4708 + (buf[3] << 8) | buf[2];
4709 rtc_time_to_tm(sec, &alrm->time);
4710 return rtc_valid_tm(&alrm->time);
4711 }
4712 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
4713 index a8b831000b2d..18c4f933e8b9 100644
4714 --- a/drivers/s390/scsi/zfcp_dbf.c
4715 +++ b/drivers/s390/scsi/zfcp_dbf.c
4716 @@ -4,7 +4,7 @@
4717 *
4718 * Debug traces for zfcp.
4719 *
4720 - * Copyright IBM Corp. 2002, 2017
4721 + * Copyright IBM Corp. 2002, 2018
4722 */
4723
4724 #define KMSG_COMPONENT "zfcp"
4725 @@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
4726 spin_unlock_irqrestore(&dbf->rec_lock, flags);
4727 }
4728
4729 +/**
4730 + * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
4731 + * @tag: identifier for event
4732 + * @adapter: adapter on which the erp_action should run
4733 + * @port: remote port involved in the erp_action
4734 + * @sdev: scsi device involved in the erp_action
4735 + * @want: wanted erp_action
4736 + * @need: required erp_action
4737 + *
4738 + * The adapter->erp_lock must not be held.
4739 + */
4740 +void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
4741 + struct zfcp_port *port, struct scsi_device *sdev,
4742 + u8 want, u8 need)
4743 +{
4744 + unsigned long flags;
4745 +
4746 + read_lock_irqsave(&adapter->erp_lock, flags);
4747 + zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
4748 + read_unlock_irqrestore(&adapter->erp_lock, flags);
4749 +}
4750
4751 /**
4752 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
4753 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
4754 index bf8ea4df2bb8..e5eed8aac0ce 100644
4755 --- a/drivers/s390/scsi/zfcp_ext.h
4756 +++ b/drivers/s390/scsi/zfcp_ext.h
4757 @@ -4,7 +4,7 @@
4758 *
4759 * External function declarations.
4760 *
4761 - * Copyright IBM Corp. 2002, 2016
4762 + * Copyright IBM Corp. 2002, 2018
4763 */
4764
4765 #ifndef ZFCP_EXT_H
4766 @@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
4767 extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
4768 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
4769 struct zfcp_port *, struct scsi_device *, u8, u8);
4770 +extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
4771 + struct zfcp_port *port,
4772 + struct scsi_device *sdev, u8 want, u8 need);
4773 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
4774 extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
4775 struct zfcp_erp_action *erp);
4776 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
4777 index 4d2ba5682493..22f9562f415c 100644
4778 --- a/drivers/s390/scsi/zfcp_scsi.c
4779 +++ b/drivers/s390/scsi/zfcp_scsi.c
4780 @@ -4,7 +4,7 @@
4781 *
4782 * Interface to Linux SCSI midlayer.
4783 *
4784 - * Copyright IBM Corp. 2002, 2017
4785 + * Copyright IBM Corp. 2002, 2018
4786 */
4787
4788 #define KMSG_COMPONENT "zfcp"
4789 @@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
4790 ids.port_id = port->d_id;
4791 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
4792
4793 - zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
4794 - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
4795 - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
4796 + zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
4797 + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
4798 + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
4799 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
4800 if (!rport) {
4801 dev_err(&port->adapter->ccw_device->dev,
4802 @@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
4803 struct fc_rport *rport = port->rport;
4804
4805 if (rport) {
4806 - zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
4807 - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
4808 - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
4809 + zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
4810 + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
4811 + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
4812 fc_remote_port_delete(rport);
4813 port->rport = NULL;
4814 }
4815 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
4816 index d9f2229664ad..d62ddd63f4fe 100644
4817 --- a/drivers/scsi/aacraid/commsup.c
4818 +++ b/drivers/scsi/aacraid/commsup.c
4819 @@ -1502,9 +1502,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
4820 host = aac->scsi_host_ptr;
4821 scsi_block_requests(host);
4822 aac_adapter_disable_int(aac);
4823 - if (aac->thread->pid != current->pid) {
4824 + if (aac->thread && aac->thread->pid != current->pid) {
4825 spin_unlock_irq(host->host_lock);
4826 kthread_stop(aac->thread);
4827 + aac->thread = NULL;
4828 jafo = 1;
4829 }
4830
4831 @@ -1591,6 +1592,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
4832 aac->name);
4833 if (IS_ERR(aac->thread)) {
4834 retval = PTR_ERR(aac->thread);
4835 + aac->thread = NULL;
4836 goto out;
4837 }
4838 }
4839 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
4840 index 2664ea0df35f..f24fb942065d 100644
4841 --- a/drivers/scsi/aacraid/linit.c
4842 +++ b/drivers/scsi/aacraid/linit.c
4843 @@ -1562,6 +1562,7 @@ static void __aac_shutdown(struct aac_dev * aac)
4844 up(&fib->event_wait);
4845 }
4846 kthread_stop(aac->thread);
4847 + aac->thread = NULL;
4848 }
4849
4850 aac_send_shutdown(aac);
4851 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
4852 index ac77081e6e9e..b07612562c39 100644
4853 --- a/drivers/scsi/lpfc/lpfc_attr.c
4854 +++ b/drivers/scsi/lpfc/lpfc_attr.c
4855 @@ -905,7 +905,12 @@ lpfc_issue_lip(struct Scsi_Host *shost)
4856 LPFC_MBOXQ_t *pmboxq;
4857 int mbxstatus = MBXERR_ERROR;
4858
4859 + /*
4860 + * If the link is offline, disabled or BLOCK_MGMT_IO
4861 + * it doesn't make any sense to allow issue_lip
4862 + */
4863 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4864 + (phba->hba_flag & LINK_DISABLED) ||
4865 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
4866 return -EPERM;
4867
4868 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
4869 index b159a5c4e388..9265906d956e 100644
4870 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
4871 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
4872 @@ -696,8 +696,9 @@ lpfc_work_done(struct lpfc_hba *phba)
4873 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
4874 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
4875 pring->flag |= LPFC_DEFERRED_RING_EVENT;
4876 - /* Set the lpfc data pending flag */
4877 - set_bit(LPFC_DATA_READY, &phba->data_flags);
4878 + /* Preserve legacy behavior. */
4879 + if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
4880 + set_bit(LPFC_DATA_READY, &phba->data_flags);
4881 } else {
4882 if (phba->link_state >= LPFC_LINK_UP ||
4883 phba->link_flag & LS_MDS_LOOPBACK) {
4884 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
4885 index d841aa42f607..730393a65e25 100644
4886 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
4887 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
4888 @@ -1998,8 +1998,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4889 ndlp->nlp_type |= NLP_NVME_TARGET;
4890 if (bf_get_be32(prli_disc, nvpr))
4891 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
4892 +
4893 + /*
4894 + * If prli_fba is set, the Target supports FirstBurst.
4895 + * If prli_fb_sz is 0, the FirstBurst size is unlimited,
4896 + * otherwise it defines the actual size supported by
4897 + * the NVME Target.
4898 + */
4899 if ((bf_get_be32(prli_fba, nvpr) == 1) &&
4900 - (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
4901 (phba->cfg_nvme_enable_fb) &&
4902 (!phba->nvmet_support)) {
4903 /* Both sides support FB. The target's first
4904 @@ -2008,6 +2014,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4905 ndlp->nlp_flag |= NLP_FIRSTBURST;
4906 ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
4907 nvpr);
4908 +
4909 + /* Expressed in units of 512 bytes */
4910 + if (ndlp->nvme_fb_size)
4911 + ndlp->nvme_fb_size <<=
4912 + LPFC_NVME_FB_SHIFT;
4913 + else
4914 + ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
4915 }
4916 }
4917
4918 diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
4919 index 81e3a4f10c3c..6327f858c4c8 100644
4920 --- a/drivers/scsi/lpfc/lpfc_nvme.c
4921 +++ b/drivers/scsi/lpfc/lpfc_nvme.c
4922 @@ -241,10 +241,11 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
4923 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
4924 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
4925 "6047 nvme cmpl Enter "
4926 - "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
4927 - "bmp:%p ndlp:%p\n",
4928 + "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
4929 + "lsreg:%p bmp:%p ndlp:%p\n",
4930 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
4931 cmdwqe->sli4_xritag, status,
4932 + (wcqe->parameter & 0xffff),
4933 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
4934
4935 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
4936 @@ -419,6 +420,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
4937 {
4938 int ret = 0;
4939 struct lpfc_nvme_lport *lport;
4940 + struct lpfc_nvme_rport *rport;
4941 struct lpfc_vport *vport;
4942 struct lpfc_nodelist *ndlp;
4943 struct ulp_bde64 *bpl;
4944 @@ -437,19 +439,18 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
4945 */
4946
4947 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
4948 + rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
4949 vport = lport->vport;
4950
4951 if (vport->load_flag & FC_UNLOADING)
4952 return -ENODEV;
4953
4954 - if (vport->load_flag & FC_UNLOADING)
4955 - return -ENODEV;
4956 -
4957 - ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
4958 + /* Need the ndlp. It is stored in the driver's rport. */
4959 + ndlp = rport->ndlp;
4960 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
4961 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
4962 - "6051 DID x%06x not an active rport.\n",
4963 - pnvme_rport->port_id);
4964 + "6051 Remoteport %p, rport has invalid ndlp. "
4965 + "Failing LS Req\n", pnvme_rport);
4966 return -ENODEV;
4967 }
4968
4969 @@ -500,8 +501,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
4970
4971 /* Expand print to include key fields. */
4972 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
4973 - "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
4974 - "rsplen:%d %pad %pad\n",
4975 + "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
4976 + "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
4977 + ndlp->nlp_DID,
4978 pnvme_lport, pnvme_rport,
4979 pnvme_lsreq, pnvme_lsreq->rqstlen,
4980 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
4981 @@ -517,7 +519,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
4982 ndlp, 2, 30, 0);
4983 if (ret != WQE_SUCCESS) {
4984 atomic_inc(&lport->xmt_ls_err);
4985 - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
4986 + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
4987 "6052 EXIT. issue ls wqe failed lport %p, "
4988 "rport %p lsreq%p Status %x DID %x\n",
4989 pnvme_lport, pnvme_rport, pnvme_lsreq,
4990 @@ -980,14 +982,14 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4991 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
4992 }
4993 #endif
4994 - freqpriv = nCmd->private;
4995 - freqpriv->nvme_buf = NULL;
4996
4997 /* NVME targets need completion held off until the abort exchange
4998 * completes unless the NVME Rport is getting unregistered.
4999 */
5000
5001 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
5002 + freqpriv = nCmd->private;
5003 + freqpriv->nvme_buf = NULL;
5004 nCmd->done(nCmd);
5005 lpfc_ncmd->nvmeCmd = NULL;
5006 }
5007 diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
5008 index e79f8f75758c..48b0229ebc99 100644
5009 --- a/drivers/scsi/lpfc/lpfc_nvme.h
5010 +++ b/drivers/scsi/lpfc/lpfc_nvme.h
5011 @@ -27,6 +27,8 @@
5012
5013 #define LPFC_NVME_WAIT_TMO 10
5014 #define LPFC_NVME_EXPEDITE_XRICNT 8
5015 +#define LPFC_NVME_FB_SHIFT 9
5016 +#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
5017
5018 struct lpfc_nvme_qhandle {
5019 uint32_t index; /* WQ index to use */
5020 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
5021 index 5f5528a12308..149f21f53b13 100644
5022 --- a/drivers/scsi/lpfc/lpfc_sli.c
5023 +++ b/drivers/scsi/lpfc/lpfc_sli.c
5024 @@ -129,6 +129,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
5025 /* set consumption flag every once in a while */
5026 if (!((q->host_index + 1) % q->entry_repost))
5027 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
5028 + else
5029 + bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
5030 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
5031 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
5032 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
5033 diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
5034 index 7de5d8d75480..eb5471bc7263 100644
5035 --- a/drivers/scsi/mvsas/mv_94xx.c
5036 +++ b/drivers/scsi/mvsas/mv_94xx.c
5037 @@ -1080,16 +1080,16 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
5038 void __iomem *regs = mvi->regs_ex - 0x10200;
5039
5040 int drive = (i/3) & (4-1); /* drive number on host */
5041 - u32 block = mr32(MVS_SGPIO_DCTRL +
5042 + int driveshift = drive * 8; /* bit offset of drive */
5043 + u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
5044 MVS_SGPIO_HOST_OFFSET * mvi->id);
5045
5046 -
5047 /*
5048 * if bit is set then create a mask with the first
5049 * bit of the drive set in the mask ...
5050 */
5051 - u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ?
5052 - 1<<(24-drive*8) : 0;
5053 + u32 bit = get_unaligned_be32(write_data) & (1 << i) ?
5054 + 1 << driveshift : 0;
5055
5056 /*
5057 * ... and then shift it to the right position based
5058 @@ -1098,26 +1098,27 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
5059 switch (i%3) {
5060 case 0: /* activity */
5061 block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
5062 - << (24-drive*8));
5063 + << driveshift);
5064 /* hardwire activity bit to SOF */
5065 block |= LED_BLINKA_SOF << (
5066 MVS_SGPIO_DCTRL_ACT_SHIFT +
5067 - (24-drive*8));
5068 + driveshift);
5069 break;
5070 case 1: /* id */
5071 block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
5072 - << (24-drive*8));
5073 + << driveshift);
5074 block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
5075 break;
5076 case 2: /* fail */
5077 block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
5078 - << (24-drive*8));
5079 + << driveshift);
5080 block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
5081 break;
5082 }
5083
5084 - mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
5085 - block);
5086 + iowrite32be(block,
5087 + regs + MVS_SGPIO_DCTRL +
5088 + MVS_SGPIO_HOST_OFFSET * mvi->id);
5089
5090 }
5091
5092 @@ -1132,7 +1133,7 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
5093 void __iomem *regs = mvi->regs_ex - 0x10200;
5094
5095 mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
5096 - be32_to_cpu(((u32 *) write_data)[i]));
5097 + ((u32 *) write_data)[i]);
5098 }
5099 return reg_count;
5100 }
5101 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
5102 index f3b117246d47..4a2d276c42eb 100644
5103 --- a/drivers/scsi/scsi_devinfo.c
5104 +++ b/drivers/scsi/scsi_devinfo.c
5105 @@ -189,6 +189,7 @@ static struct {
5106 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
5107 {"HP", "DF400", "*", BLIST_REPORTLUN2},
5108 {"HP", "DF500", "*", BLIST_REPORTLUN2},
5109 + {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
5110 {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
5111 {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
5112 {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
5113 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5114 index 912eacdc2d83..e93e9178978c 100644
5115 --- a/drivers/scsi/scsi_lib.c
5116 +++ b/drivers/scsi/scsi_lib.c
5117 @@ -856,6 +856,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
5118 /* for passthrough error may be set */
5119 error = BLK_STS_OK;
5120 }
5121 + /*
5122 + * Another corner case: the SCSI status byte is non-zero but 'good'.
5123 + * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
5124 + * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
5125 + * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
5126 + * intermediate statuses (both obsolete in SAM-4) as good.
5127 + */
5128 + if (status_byte(result) && scsi_status_is_good(result)) {
5129 + result = 0;
5130 + error = BLK_STS_OK;
5131 + }
5132
5133 /*
5134 * special case: failed zero length commands always need to
5135 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
5136 index c198b96368dd..5c40d809830f 100644
5137 --- a/drivers/scsi/sg.c
5138 +++ b/drivers/scsi/sg.c
5139 @@ -1894,7 +1894,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
5140 num = (rem_sz > scatter_elem_sz_prev) ?
5141 scatter_elem_sz_prev : rem_sz;
5142
5143 - schp->pages[k] = alloc_pages(gfp_mask, order);
5144 + schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
5145 if (!schp->pages[k])
5146 goto out;
5147
5148 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
5149 index 2817e67df3d5..98a51521d853 100644
5150 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
5151 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
5152 @@ -324,7 +324,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
5153 }
5154
5155 fd = dpaa2_dq_fd(dq);
5156 - fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
5157 + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
5158 fq->stats.frames++;
5159
5160 fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
5161 @@ -374,12 +374,14 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
5162 /* Prepare the HW SGT structure */
5163 sgt_buf_size = priv->tx_data_offset +
5164 sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
5165 - sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
5166 + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
5167 if (unlikely(!sgt_buf)) {
5168 err = -ENOMEM;
5169 goto sgt_buf_alloc_failed;
5170 }
5171 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
5172 + memset(sgt_buf, 0, sgt_buf_size);
5173 +
5174 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
5175
5176 /* Fill in the HW SGT structure.
5177 @@ -421,7 +423,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
5178 return 0;
5179
5180 dma_map_single_failed:
5181 - kfree(sgt_buf);
5182 + skb_free_frag(sgt_buf);
5183 sgt_buf_alloc_failed:
5184 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
5185 dma_map_sg_failed:
5186 @@ -525,9 +527,9 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
5187 return;
5188 }
5189
5190 - /* Free SGT buffer kmalloc'ed on tx */
5191 + /* Free SGT buffer allocated on tx */
5192 if (fd_format != dpaa2_fd_single)
5193 - kfree(skbh);
5194 + skb_free_frag(skbh);
5195
5196 /* Move on with skb release */
5197 dev_kfree_skb(skb);
5198 @@ -1906,7 +1908,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
5199 queue.destination.id = fq->channel->dpcon_id;
5200 queue.destination.type = DPNI_DEST_DPCON;
5201 queue.destination.priority = 1;
5202 - queue.user_context = (u64)fq;
5203 + queue.user_context = (u64)(uintptr_t)fq;
5204 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
5205 DPNI_QUEUE_RX, 0, fq->flowid,
5206 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
5207 @@ -1958,7 +1960,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
5208 queue.destination.id = fq->channel->dpcon_id;
5209 queue.destination.type = DPNI_DEST_DPCON;
5210 queue.destination.priority = 0;
5211 - queue.user_context = (u64)fq;
5212 + queue.user_context = (u64)(uintptr_t)fq;
5213 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
5214 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
5215 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
5216 diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
5217 index 975dbbb3abd0..7da3eb4ca4be 100644
5218 --- a/drivers/staging/ks7010/ks_hostif.c
5219 +++ b/drivers/staging/ks7010/ks_hostif.c
5220 @@ -242,9 +242,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
5221 offset = 0;
5222
5223 while (bsize > offset) {
5224 - /* DPRINTK(4, "Element ID=%d\n",*bp); */
5225 - switch (*bp) {
5226 - case 0: /* ssid */
5227 + switch (*bp) { /* Information Element ID */
5228 + case WLAN_EID_SSID:
5229 if (*(bp + 1) <= SSID_MAX_SIZE) {
5230 ap->ssid.size = *(bp + 1);
5231 } else {
5232 @@ -254,8 +253,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
5233 }
5234 memcpy(ap->ssid.body, bp + 2, ap->ssid.size);
5235 break;
5236 - case 1: /* rate */
5237 - case 50: /* ext rate */
5238 + case WLAN_EID_SUPP_RATES:
5239 + case WLAN_EID_EXT_SUPP_RATES:
5240 if ((*(bp + 1) + ap->rate_set.size) <=
5241 RATE_SET_MAX_SIZE) {
5242 memcpy(&ap->rate_set.body[ap->rate_set.size],
5243 @@ -271,9 +270,9 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
5244 (RATE_SET_MAX_SIZE - ap->rate_set.size);
5245 }
5246 break;
5247 - case 3: /* DS parameter */
5248 + case WLAN_EID_DS_PARAMS:
5249 break;
5250 - case 48: /* RSN(WPA2) */
5251 + case WLAN_EID_RSN:
5252 ap->rsn_ie.id = *bp;
5253 if (*(bp + 1) <= RSN_IE_BODY_MAX) {
5254 ap->rsn_ie.size = *(bp + 1);
5255 @@ -284,8 +283,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
5256 }
5257 memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size);
5258 break;
5259 - case 221: /* WPA */
5260 - if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
5261 + case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
5262 + if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
5263 ap->wpa_ie.id = *bp;
5264 if (*(bp + 1) <= RSN_IE_BODY_MAX) {
5265 ap->wpa_ie.size = *(bp + 1);
5266 @@ -300,18 +299,18 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
5267 }
5268 break;
5269
5270 - case 2: /* FH parameter */
5271 - case 4: /* CF parameter */
5272 - case 5: /* TIM */
5273 - case 6: /* IBSS parameter */
5274 - case 7: /* Country */
5275 - case 42: /* ERP information */
5276 - case 47: /* Reserve ID 47 Broadcom AP */
5277 + case WLAN_EID_FH_PARAMS:
5278 + case WLAN_EID_CF_PARAMS:
5279 + case WLAN_EID_TIM:
5280 + case WLAN_EID_IBSS_PARAMS:
5281 + case WLAN_EID_COUNTRY:
5282 + case WLAN_EID_ERP_INFO:
5283 break;
5284 default:
5285 DPRINTK(4, "unknown Element ID=%d\n", *bp);
5286 break;
5287 }
5288 +
5289 offset += 2; /* id & size field */
5290 offset += *(bp + 1); /* +size offset */
5291 bp += (*(bp + 1) + 2); /* pointer update */
5292 diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
5293 index 5bae8d468e23..9ac317e4b507 100644
5294 --- a/drivers/staging/ks7010/ks_hostif.h
5295 +++ b/drivers/staging/ks7010/ks_hostif.h
5296 @@ -13,6 +13,7 @@
5297 #define _KS_HOSTIF_H_
5298
5299 #include <linux/compiler.h>
5300 +#include <linux/ieee80211.h>
5301
5302 /*
5303 * HOST-MAC I/F events
5304 diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
5305 index 4368f4e9f208..f1233ca7d337 100644
5306 --- a/drivers/staging/lustre/lustre/include/obd.h
5307 +++ b/drivers/staging/lustre/lustre/include/obd.h
5308 @@ -191,7 +191,7 @@ struct client_obd {
5309 struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
5310
5311 /* the grant values are protected by loi_list_lock below */
5312 - unsigned long cl_dirty_pages; /* all _dirty_ in pahges */
5313 + unsigned long cl_dirty_pages; /* all _dirty_ in pages */
5314 unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
5315 unsigned long cl_dirty_transit; /* dirty synchronous */
5316 unsigned long cl_avail_grant; /* bytes of credit for ost */
5317 diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
5318 index c2c57f65431e..ff9c2f96bada 100644
5319 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
5320 +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
5321 @@ -2695,7 +2695,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
5322 if (lsm && !lmm) {
5323 int i;
5324
5325 - for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
5326 + for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
5327 /*
5328 * For migrating inode, the master stripe and master
5329 * object will be the same, so do not need iput, see
5330 diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
5331 index 5767ac2a7d16..a907d956443f 100644
5332 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c
5333 +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
5334 @@ -1530,7 +1530,7 @@ static int osc_enter_cache_try(struct client_obd *cli,
5335 if (rc < 0)
5336 return 0;
5337
5338 - if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
5339 + if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
5340 atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
5341 osc_consume_write_grant(cli, &oap->oap_brw_page);
5342 if (transient) {
5343 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
5344 index 3c300f7b6a62..d607c59761cf 100644
5345 --- a/drivers/staging/rtl8192u/r8192U_core.c
5346 +++ b/drivers/staging/rtl8192u/r8192U_core.c
5347 @@ -1706,6 +1706,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
5348
5349 priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
5350 priv->oldaddr = kmalloc(16, GFP_KERNEL);
5351 + if (!priv->oldaddr)
5352 + return -ENOMEM;
5353 oldaddr = priv->oldaddr;
5354 align = ((long)oldaddr) & 3;
5355 if (align) {
5356 diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
5357 index 045d577fe4f8..0ed21dd08170 100644
5358 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
5359 +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
5360 @@ -25,6 +25,10 @@ MODULE_PARM_DESC(enable_compat_alsa,
5361 static void snd_devm_unregister_child(struct device *dev, void *res)
5362 {
5363 struct device *childdev = *(struct device **)res;
5364 + struct bcm2835_chip *chip = dev_get_drvdata(childdev);
5365 + struct snd_card *card = chip->card;
5366 +
5367 + snd_card_free(card);
5368
5369 device_unregister(childdev);
5370 }
5371 @@ -50,6 +54,13 @@ static int snd_devm_add_child(struct device *dev, struct device *child)
5372 return 0;
5373 }
5374
5375 +static void snd_bcm2835_release(struct device *dev)
5376 +{
5377 + struct bcm2835_chip *chip = dev_get_drvdata(dev);
5378 +
5379 + kfree(chip);
5380 +}
5381 +
5382 static struct device *
5383 snd_create_device(struct device *parent,
5384 struct device_driver *driver,
5385 @@ -65,6 +76,7 @@ snd_create_device(struct device *parent,
5386 device_initialize(device);
5387 device->parent = parent;
5388 device->driver = driver;
5389 + device->release = snd_bcm2835_release;
5390
5391 dev_set_name(device, "%s", name);
5392
5393 @@ -75,18 +87,19 @@ snd_create_device(struct device *parent,
5394 return device;
5395 }
5396
5397 -static int snd_bcm2835_free(struct bcm2835_chip *chip)
5398 -{
5399 - kfree(chip);
5400 - return 0;
5401 -}
5402 -
5403 /* component-destructor
5404 * (see "Management of Cards and Components")
5405 */
5406 static int snd_bcm2835_dev_free(struct snd_device *device)
5407 {
5408 - return snd_bcm2835_free(device->device_data);
5409 + struct bcm2835_chip *chip = device->device_data;
5410 + struct snd_card *card = chip->card;
5411 +
5412 + /* TODO: free pcm, ctl */
5413 +
5414 + snd_device_free(card, chip);
5415 +
5416 + return 0;
5417 }
5418
5419 /* chip-specific constructor
5420 @@ -111,7 +124,7 @@ static int snd_bcm2835_create(struct snd_card *card,
5421
5422 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
5423 if (err) {
5424 - snd_bcm2835_free(chip);
5425 + kfree(chip);
5426 return err;
5427 }
5428
5429 @@ -119,31 +132,14 @@ static int snd_bcm2835_create(struct snd_card *card,
5430 return 0;
5431 }
5432
5433 -static void snd_devm_card_free(struct device *dev, void *res)
5434 +static struct snd_card *snd_bcm2835_card_new(struct device *dev)
5435 {
5436 - struct snd_card *snd_card = *(struct snd_card **)res;
5437 -
5438 - snd_card_free(snd_card);
5439 -}
5440 -
5441 -static struct snd_card *snd_devm_card_new(struct device *dev)
5442 -{
5443 - struct snd_card **dr;
5444 struct snd_card *card;
5445 int ret;
5446
5447 - dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL);
5448 - if (!dr)
5449 - return ERR_PTR(-ENOMEM);
5450 -
5451 ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card);
5452 - if (ret) {
5453 - devres_free(dr);
5454 + if (ret)
5455 return ERR_PTR(ret);
5456 - }
5457 -
5458 - *dr = card;
5459 - devres_add(dev, dr);
5460
5461 return card;
5462 }
5463 @@ -260,7 +256,7 @@ static int snd_add_child_device(struct device *device,
5464 return PTR_ERR(child);
5465 }
5466
5467 - card = snd_devm_card_new(child);
5468 + card = snd_bcm2835_card_new(child);
5469 if (IS_ERR(card)) {
5470 dev_err(child, "Failed to create card");
5471 return PTR_ERR(card);
5472 @@ -302,7 +298,7 @@ static int snd_add_child_device(struct device *device,
5473 return err;
5474 }
5475
5476 - dev_set_drvdata(child, card);
5477 + dev_set_drvdata(child, chip);
5478 dev_info(child, "card created with %d channels\n", numchans);
5479
5480 return 0;
5481 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
5482 index 804c1af6fd33..95833cbc4338 100644
5483 --- a/drivers/tty/serial/8250/8250_port.c
5484 +++ b/drivers/tty/serial/8250/8250_port.c
5485 @@ -1867,7 +1867,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
5486
5487 status = serial_port_in(port, UART_LSR);
5488
5489 - if (status & (UART_LSR_DR | UART_LSR_BI)) {
5490 + if (status & (UART_LSR_DR | UART_LSR_BI) &&
5491 + iir & UART_IIR_RDI) {
5492 if (!up->dma || handle_rx_dma(up, iir))
5493 status = serial8250_rx_chars(up, status);
5494 }
5495 diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
5496 index b88b05f8e81e..ae30398fcf56 100644
5497 --- a/drivers/tty/serial/altera_uart.c
5498 +++ b/drivers/tty/serial/altera_uart.c
5499 @@ -327,7 +327,7 @@ static int altera_uart_startup(struct uart_port *port)
5500
5501 /* Enable RX interrupts now */
5502 pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
5503 - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
5504 + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
5505
5506 spin_unlock_irqrestore(&port->lock, flags);
5507
5508 @@ -343,7 +343,7 @@ static void altera_uart_shutdown(struct uart_port *port)
5509
5510 /* Disable all interrupts now */
5511 pp->imr = 0;
5512 - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
5513 + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
5514
5515 spin_unlock_irqrestore(&port->lock, flags);
5516
5517 @@ -432,7 +432,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c)
5518 ALTERA_UART_STATUS_TRDY_MSK))
5519 cpu_relax();
5520
5521 - writel(c, port->membase + ALTERA_UART_TXDATA_REG);
5522 + altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
5523 }
5524
5525 static void altera_uart_console_write(struct console *co, const char *s,
5526 @@ -502,13 +502,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
5527 return -ENODEV;
5528
5529 /* Enable RX interrupts now */
5530 - writel(ALTERA_UART_CONTROL_RRDY_MSK,
5531 - port->membase + ALTERA_UART_CONTROL_REG);
5532 + altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK,
5533 + ALTERA_UART_CONTROL_REG);
5534
5535 if (dev->baud) {
5536 unsigned int baudclk = port->uartclk / dev->baud;
5537
5538 - writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
5539 + altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
5540 }
5541
5542 dev->con->write = altera_uart_earlycon_write;
5543 diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
5544 index 2599f9ecccfe..d904a3a345e7 100644
5545 --- a/drivers/tty/serial/arc_uart.c
5546 +++ b/drivers/tty/serial/arc_uart.c
5547 @@ -593,6 +593,11 @@ static int arc_serial_probe(struct platform_device *pdev)
5548 if (dev_id < 0)
5549 dev_id = 0;
5550
5551 + if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
5552 + dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
5553 + return -EINVAL;
5554 + }
5555 +
5556 uart = &arc_uart_ports[dev_id];
5557 port = &uart->port;
5558
5559 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
5560 index 8cf112f2efc3..51e47a63d61a 100644
5561 --- a/drivers/tty/serial/fsl_lpuart.c
5562 +++ b/drivers/tty/serial/fsl_lpuart.c
5563 @@ -2145,6 +2145,10 @@ static int lpuart_probe(struct platform_device *pdev)
5564 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
5565 return ret;
5566 }
5567 + if (ret >= ARRAY_SIZE(lpuart_ports)) {
5568 + dev_err(&pdev->dev, "serial%d out of range\n", ret);
5569 + return -EINVAL;
5570 + }
5571 sport->port.line = ret;
5572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5573 sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
5574 diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
5575 index a33c685af990..961ab7d2add5 100644
5576 --- a/drivers/tty/serial/imx.c
5577 +++ b/drivers/tty/serial/imx.c
5578 @@ -2042,6 +2042,12 @@ static int serial_imx_probe(struct platform_device *pdev)
5579 else if (ret < 0)
5580 return ret;
5581
5582 + if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
5583 + dev_err(&pdev->dev, "serial%d out of range\n",
5584 + sport->port.line);
5585 + return -EINVAL;
5586 + }
5587 +
5588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5589 base = devm_ioremap_resource(&pdev->dev, res);
5590 if (IS_ERR(base))
5591 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
5592 index 03d26aabb0c4..2581461f92bf 100644
5593 --- a/drivers/tty/serial/mvebu-uart.c
5594 +++ b/drivers/tty/serial/mvebu-uart.c
5595 @@ -617,7 +617,7 @@ static void wait_for_xmitr(struct uart_port *port)
5596 u32 val;
5597
5598 readl_poll_timeout_atomic(port->membase + UART_STAT, val,
5599 - (val & STAT_TX_EMP), 1, 10000);
5600 + (val & STAT_TX_RDY(port)), 1, 10000);
5601 }
5602
5603 static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
5604 diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
5605 index 079dc47aa142..caa8a41b6e71 100644
5606 --- a/drivers/tty/serial/mxs-auart.c
5607 +++ b/drivers/tty/serial/mxs-auart.c
5608 @@ -1663,6 +1663,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
5609 s->port.line = pdev->id < 0 ? 0 : pdev->id;
5610 else if (ret < 0)
5611 return ret;
5612 + if (s->port.line >= ARRAY_SIZE(auart_port)) {
5613 + dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
5614 + return -EINVAL;
5615 + }
5616
5617 if (of_id) {
5618 pdev->id_entry = of_id->data;
5619 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
5620 index f9fecc5ed0ce..3f2f8c118ce0 100644
5621 --- a/drivers/tty/serial/samsung.c
5622 +++ b/drivers/tty/serial/samsung.c
5623 @@ -1818,6 +1818,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
5624
5625 dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
5626
5627 + if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
5628 + dev_err(&pdev->dev, "serial%d out of range\n", index);
5629 + return -EINVAL;
5630 + }
5631 ourport = &s3c24xx_serial_ports[index];
5632
5633 ourport->drv_data = s3c24xx_get_driver_data(pdev);
5634 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
5635 index 44adf9db38f8..ab757546c6db 100644
5636 --- a/drivers/tty/serial/sh-sci.c
5637 +++ b/drivers/tty/serial/sh-sci.c
5638 @@ -3098,6 +3098,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
5639 dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
5640 return NULL;
5641 }
5642 + if (id >= ARRAY_SIZE(sci_ports)) {
5643 + dev_err(&pdev->dev, "serial%d out of range\n", id);
5644 + return NULL;
5645 + }
5646
5647 sp = &sci_ports[id];
5648 *dev_id = id;
5649 diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
5650 index b9b2bc76bcac..abcb4d09a2d8 100644
5651 --- a/drivers/tty/serial/xilinx_uartps.c
5652 +++ b/drivers/tty/serial/xilinx_uartps.c
5653 @@ -1110,7 +1110,7 @@ static struct uart_port *cdns_uart_get_port(int id)
5654 struct uart_port *port;
5655
5656 /* Try the given port id if failed use default method */
5657 - if (cdns_uart_port[id].mapbase != 0) {
5658 + if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
5659 /* Find the next unused port */
5660 for (id = 0; id < CDNS_UART_NR_PORTS; id++)
5661 if (cdns_uart_port[id].mapbase == 0)
5662 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
5663 index cd77af3b1565..d939b24ae92a 100644
5664 --- a/drivers/usb/dwc2/core.h
5665 +++ b/drivers/usb/dwc2/core.h
5666 @@ -217,7 +217,7 @@ struct dwc2_hsotg_ep {
5667 unsigned char dir_in;
5668 unsigned char index;
5669 unsigned char mc;
5670 - unsigned char interval;
5671 + u16 interval;
5672
5673 unsigned int halted:1;
5674 unsigned int periodic:1;
5675 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
5676 index a5d72fcd1603..7ee7320d3c24 100644
5677 --- a/drivers/usb/dwc2/hcd.c
5678 +++ b/drivers/usb/dwc2/hcd.c
5679 @@ -989,6 +989,24 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
5680
5681 if (dbg_hc(chan))
5682 dev_vdbg(hsotg->dev, "%s()\n", __func__);
5683 +
5684 + /*
5685 + * In buffer DMA or external DMA mode channel can't be halted
5686 + * for non-split periodic channels. At the end of the next
5687 + * uframe/frame (in the worst case), the core generates a channel
5688 + * halted and disables the channel automatically.
5689 + */
5690 + if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
5691 + hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
5692 + if (!chan->do_split &&
5693 + (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
5694 + chan->ep_type == USB_ENDPOINT_XFER_INT)) {
5695 + dev_err(hsotg->dev, "%s() Channel can't be halted\n",
5696 + __func__);
5697 + return;
5698 + }
5699 + }
5700 +
5701 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
5702 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
5703
5704 @@ -2322,10 +2340,22 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
5705 */
5706 static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
5707 {
5708 - u32 hcfg, hfir, otgctl;
5709 + u32 hcfg, hfir, otgctl, usbcfg;
5710
5711 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
5712
5713 + /* Set HS/FS Timeout Calibration to 7 (max available value).
5714 + * The number of PHY clocks that the application programs in
5715 + * this field is added to the high/full speed interpacket timeout
5716 + * duration in the core to account for any additional delays
5717 + * introduced by the PHY. This can be required, because the delay
5718 + * introduced by the PHY in generating the linestate condition
5719 + * can vary from one PHY to another.
5720 + */
5721 + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
5722 + usbcfg |= GUSBCFG_TOUTCAL(7);
5723 + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
5724 +
5725 /* Restart the Phy Clock */
5726 dwc2_writel(0, hsotg->regs + PCGCTL);
5727
5728 diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
5729 index 7ac725038f8d..025bc68094fc 100644
5730 --- a/drivers/usb/dwc3/Makefile
5731 +++ b/drivers/usb/dwc3/Makefile
5732 @@ -6,7 +6,7 @@ obj-$(CONFIG_USB_DWC3) += dwc3.o
5733
5734 dwc3-y := core.o
5735
5736 -ifneq ($(CONFIG_FTRACE),)
5737 +ifneq ($(CONFIG_TRACING),)
5738 dwc3-y += trace.o
5739 endif
5740
5741 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
5742 index df4569df7eaf..ddef1ae0c708 100644
5743 --- a/drivers/usb/dwc3/core.c
5744 +++ b/drivers/usb/dwc3/core.c
5745 @@ -232,7 +232,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
5746 do {
5747 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
5748 if (!(reg & DWC3_DCTL_CSFTRST))
5749 - return 0;
5750 + goto done;
5751
5752 udelay(1);
5753 } while (--retries);
5754 @@ -241,6 +241,17 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
5755 phy_exit(dwc->usb2_generic_phy);
5756
5757 return -ETIMEDOUT;
5758 +
5759 +done:
5760 + /*
5761 + * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared,
5762 + * we must wait at least 50ms before accessing the PHY domain
5763 + * (synchronization delay). DWC_usb31 programming guide section 1.3.2.
5764 + */
5765 + if (dwc3_is_usb31(dwc))
5766 + msleep(50);
5767 +
5768 + return 0;
5769 }
5770
5771 /*
5772 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
5773 index 860d2bc184d1..cdd609930443 100644
5774 --- a/drivers/usb/dwc3/core.h
5775 +++ b/drivers/usb/dwc3/core.h
5776 @@ -241,6 +241,8 @@
5777 #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
5778
5779 /* Global TX Fifo Size Register */
5780 +#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */
5781 +#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */
5782 #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
5783 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
5784
5785 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
5786 index 77c7ecca816a..b8b629c615d3 100644
5787 --- a/drivers/usb/gadget/composite.c
5788 +++ b/drivers/usb/gadget/composite.c
5789 @@ -1422,7 +1422,7 @@ static int count_ext_compat(struct usb_configuration *c)
5790 return res;
5791 }
5792
5793 -static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
5794 +static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
5795 {
5796 int i, count;
5797
5798 @@ -1449,10 +1449,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
5799 buf += 23;
5800 }
5801 count += 24;
5802 - if (count >= 4096)
5803 - return;
5804 + if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
5805 + return count;
5806 }
5807 }
5808 +
5809 + return count;
5810 }
5811
5812 static int count_ext_prop(struct usb_configuration *c, int interface)
5813 @@ -1497,25 +1499,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
5814 struct usb_os_desc *d;
5815 struct usb_os_desc_ext_prop *ext_prop;
5816 int j, count, n, ret;
5817 - u8 *start = buf;
5818
5819 f = c->interface[interface];
5820 + count = 10; /* header length */
5821 for (j = 0; j < f->os_desc_n; ++j) {
5822 if (interface != f->os_desc_table[j].if_id)
5823 continue;
5824 d = f->os_desc_table[j].os_desc;
5825 if (d)
5826 list_for_each_entry(ext_prop, &d->ext_prop, entry) {
5827 - /* 4kB minus header length */
5828 - n = buf - start;
5829 - if (n >= 4086)
5830 - return 0;
5831 -
5832 - count = ext_prop->data_len +
5833 + n = ext_prop->data_len +
5834 ext_prop->name_len + 14;
5835 - if (count > 4086 - n)
5836 - return -EINVAL;
5837 - usb_ext_prop_put_size(buf, count);
5838 + if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
5839 + return count;
5840 + usb_ext_prop_put_size(buf, n);
5841 usb_ext_prop_put_type(buf, ext_prop->type);
5842 ret = usb_ext_prop_put_name(buf, ext_prop->name,
5843 ext_prop->name_len);
5844 @@ -1541,11 +1538,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
5845 default:
5846 return -EINVAL;
5847 }
5848 - buf += count;
5849 + buf += n;
5850 + count += n;
5851 }
5852 }
5853
5854 - return 0;
5855 + return count;
5856 }
5857
5858 /*
5859 @@ -1827,6 +1825,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
5860 req->complete = composite_setup_complete;
5861 buf = req->buf;
5862 os_desc_cfg = cdev->os_desc_config;
5863 + w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
5864 memset(buf, 0, w_length);
5865 buf[5] = 0x01;
5866 switch (ctrl->bRequestType & USB_RECIP_MASK) {
5867 @@ -1850,8 +1849,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
5868 count += 16; /* header */
5869 put_unaligned_le32(count, buf);
5870 buf += 16;
5871 - fill_ext_compat(os_desc_cfg, buf);
5872 - value = w_length;
5873 + value = fill_ext_compat(os_desc_cfg, buf);
5874 + value = min_t(u16, w_length, value);
5875 }
5876 break;
5877 case USB_RECIP_INTERFACE:
5878 @@ -1880,8 +1879,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
5879 interface, buf);
5880 if (value < 0)
5881 return value;
5882 -
5883 - value = w_length;
5884 + value = min_t(u16, w_length, value);
5885 }
5886 break;
5887 }
5888 @@ -2156,8 +2154,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
5889 goto end;
5890 }
5891
5892 - /* OS feature descriptor length <= 4kB */
5893 - cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
5894 + cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
5895 + GFP_KERNEL);
5896 if (!cdev->os_desc_req->buf) {
5897 ret = -ENOMEM;
5898 usb_ep_free_request(ep0, cdev->os_desc_req);
5899 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5900 index d2428a9e8900..0294e4f18873 100644
5901 --- a/drivers/usb/gadget/function/f_fs.c
5902 +++ b/drivers/usb/gadget/function/f_fs.c
5903 @@ -758,9 +758,13 @@ static void ffs_user_copy_worker(struct work_struct *work)
5904 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
5905
5906 if (io_data->read && ret > 0) {
5907 + mm_segment_t oldfs = get_fs();
5908 +
5909 + set_fs(USER_DS);
5910 use_mm(io_data->mm);
5911 ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
5912 unuse_mm(io_data->mm);
5913 + set_fs(oldfs);
5914 }
5915
5916 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
5917 @@ -3238,7 +3242,7 @@ static int ffs_func_setup(struct usb_function *f,
5918 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
5919 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
5920
5921 - return 0;
5922 + return USB_GADGET_DELAYED_STATUS;
5923 }
5924
5925 static bool ffs_func_req_match(struct usb_function *f,
5926 diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
5927 index 26601bf4e7a9..70023d401079 100644
5928 --- a/drivers/usb/gadget/udc/goku_udc.h
5929 +++ b/drivers/usb/gadget/udc/goku_udc.h
5930 @@ -25,7 +25,7 @@ struct goku_udc_regs {
5931 # define INT_EP1DATASET 0x00040
5932 # define INT_EP2DATASET 0x00080
5933 # define INT_EP3DATASET 0x00100
5934 -#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */
5935 +#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */
5936 # define INT_EP1NAK 0x00200
5937 # define INT_EP2NAK 0x00400
5938 # define INT_EP3NAK 0x00800
5939 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
5940 index 332420d10be9..e5ace8995b3b 100644
5941 --- a/drivers/usb/host/xhci-mem.c
5942 +++ b/drivers/usb/host/xhci-mem.c
5943 @@ -913,6 +913,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
5944 if (dev->out_ctx)
5945 xhci_free_container_ctx(xhci, dev->out_ctx);
5946
5947 + if (dev->udev && dev->udev->slot_id)
5948 + dev->udev->slot_id = 0;
5949 kfree(xhci->devs[slot_id]);
5950 xhci->devs[slot_id] = NULL;
5951 }
5952 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5953 index b60a02c50b89..bd281a96485c 100644
5954 --- a/drivers/usb/host/xhci.c
5955 +++ b/drivers/usb/host/xhci.c
5956 @@ -4769,6 +4769,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5957 * quirks
5958 */
5959 struct device *dev = hcd->self.sysdev;
5960 + unsigned int minor_rev;
5961 int retval;
5962
5963 /* Accept arbitrarily long scatter-gather lists */
5964 @@ -4796,12 +4797,19 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5965 */
5966 hcd->has_tt = 1;
5967 } else {
5968 - /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
5969 - if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
5970 - xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
5971 + /*
5972 + * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
5973 + * minor revision instead of sbrn
5974 + */
5975 + minor_rev = xhci->usb3_rhub.min_rev;
5976 + if (minor_rev) {
5977 hcd->speed = HCD_USB31;
5978 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5979 }
5980 + xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
5981 + minor_rev,
5982 + minor_rev ? "Enhanced" : "");
5983 +
5984 /* xHCI private pointer was set in xhci_pci_probe for the second
5985 * registered roothub.
5986 */
5987 diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
5988 index eeefa29f8aa2..a20b65cb6678 100644
5989 --- a/drivers/usb/usbip/Kconfig
5990 +++ b/drivers/usb/usbip/Kconfig
5991 @@ -27,7 +27,7 @@ config USBIP_VHCI_HCD
5992
5993 config USBIP_VHCI_HC_PORTS
5994 int "Number of ports per USB/IP virtual host controller"
5995 - range 1 31
5996 + range 1 15
5997 default 8
5998 depends on USBIP_VHCI_HCD
5999 ---help---
6000 diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
6001 index 9b2ac55ac34f..8cf2aa973b50 100644
6002 --- a/fs/ext2/inode.c
6003 +++ b/fs/ext2/inode.c
6004 @@ -1261,21 +1261,11 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
6005
6006 static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
6007 {
6008 - /*
6009 - * XXX: it seems like a bug here that we don't allow
6010 - * IS_APPEND inode to have blocks-past-i_size trimmed off.
6011 - * review and fix this.
6012 - *
6013 - * Also would be nice to be able to handle IO errors and such,
6014 - * but that's probably too much to ask.
6015 - */
6016 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
6017 S_ISLNK(inode->i_mode)))
6018 return;
6019 if (ext2_inode_is_fast_symlink(inode))
6020 return;
6021 - if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
6022 - return;
6023
6024 dax_sem_down_write(EXT2_I(inode));
6025 __ext2_truncate_blocks(inode, offset);
6026 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
6027 index 513c357c734b..a6c0f54c48c3 100644
6028 --- a/fs/hfsplus/super.c
6029 +++ b/fs/hfsplus/super.c
6030 @@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
6031 return 0;
6032
6033 out_put_hidden_dir:
6034 + cancel_delayed_work_sync(&sbi->sync_work);
6035 iput(sbi->hidden_dir);
6036 out_put_root:
6037 dput(sb->s_root);
6038 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
6039 index 1352b1b990a7..3ebb2f6ace79 100644
6040 --- a/include/linux/mlx5/driver.h
6041 +++ b/include/linux/mlx5/driver.h
6042 @@ -1271,17 +1271,7 @@ enum {
6043 static inline const struct cpumask *
6044 mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
6045 {
6046 - struct irq_desc *desc;
6047 - unsigned int irq;
6048 - int eqn;
6049 - int err;
6050 -
6051 - err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
6052 - if (err)
6053 - return NULL;
6054 -
6055 - desc = irq_to_desc(irq);
6056 - return desc->affinity_hint;
6057 + return dev->priv.irq_info[vector].mask;
6058 }
6059
6060 #endif /* MLX5_DRIVER_H */
6061 diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
6062 index cef0e44601f8..4b6b9283fa7b 100644
6063 --- a/include/linux/usb/composite.h
6064 +++ b/include/linux/usb/composite.h
6065 @@ -54,6 +54,9 @@
6066 /* big enough to hold our biggest descriptor */
6067 #define USB_COMP_EP0_BUFSIZ 1024
6068
6069 +/* OS feature descriptor length <= 4kB */
6070 +#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096
6071 +
6072 #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
6073 struct usb_configuration;
6074
6075 diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
6076 index cb85eddb47ea..eb7853c1a23b 100644
6077 --- a/include/scsi/scsi.h
6078 +++ b/include/scsi/scsi.h
6079 @@ -47,6 +47,8 @@ static inline int scsi_status_is_good(int status)
6080 */
6081 status &= 0xfe;
6082 return ((status == SAM_STAT_GOOD) ||
6083 + (status == SAM_STAT_CONDITION_MET) ||
6084 + /* Next two "intermediate" statuses are obsolete in SAM-4 */
6085 (status == SAM_STAT_INTERMEDIATE) ||
6086 (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
6087 /* FIXME: this is obsolete in SAM-3 */
6088 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
6089 index c587a61c32bf..2e08c6f3ac3e 100644
6090 --- a/include/uapi/linux/nl80211.h
6091 +++ b/include/uapi/linux/nl80211.h
6092 @@ -2618,6 +2618,8 @@ enum nl80211_attrs {
6093 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
6094 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
6095
6096 +#define NL80211_WIPHY_NAME_MAXLEN 128
6097 +
6098 #define NL80211_MAX_SUPP_RATES 32
6099 #define NL80211_MAX_SUPP_HT_RATES 77
6100 #define NL80211_MAX_SUPP_REG_RULES 64
6101 diff --git a/net/core/dev.c b/net/core/dev.c
6102 index 3e550507e9f0..ace13bea3e50 100644
6103 --- a/net/core/dev.c
6104 +++ b/net/core/dev.c
6105 @@ -2097,7 +2097,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
6106 int i, j;
6107
6108 for (i = count, j = offset; i--; j++) {
6109 - if (!remove_xps_queue(dev_maps, cpu, j))
6110 + if (!remove_xps_queue(dev_maps, tci, j))
6111 break;
6112 }
6113
6114 diff --git a/net/core/sock.c b/net/core/sock.c
6115 index 85b0b64e7f9d..81c2df84f953 100644
6116 --- a/net/core/sock.c
6117 +++ b/net/core/sock.c
6118 @@ -1603,7 +1603,7 @@ static void __sk_free(struct sock *sk)
6119 if (likely(sk->sk_net_refcnt))
6120 sock_inuse_add(sock_net(sk), -1);
6121
6122 - if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
6123 + if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
6124 sock_diag_broadcast_destroy(sk);
6125 else
6126 sk_destruct(sk);
6127 diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
6128 index adf50fbc4c13..47725250b4ca 100644
6129 --- a/net/dsa/dsa2.c
6130 +++ b/net/dsa/dsa2.c
6131 @@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
6132 static int dsa_port_setup(struct dsa_port *dp)
6133 {
6134 struct dsa_switch *ds = dp->ds;
6135 - int err;
6136 + int err = 0;
6137
6138 memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
6139
6140 - err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index);
6141 + if (dp->type != DSA_PORT_TYPE_UNUSED)
6142 + err = devlink_port_register(ds->devlink, &dp->devlink_port,
6143 + dp->index);
6144 if (err)
6145 return err;
6146
6147 @@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
6148
6149 static void dsa_port_teardown(struct dsa_port *dp)
6150 {
6151 - devlink_port_unregister(&dp->devlink_port);
6152 + if (dp->type != DSA_PORT_TYPE_UNUSED)
6153 + devlink_port_unregister(&dp->devlink_port);
6154
6155 switch (dp->type) {
6156 case DSA_PORT_TYPE_UNUSED:
6157 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
6158 index 66340ab750e6..e7daec7c7421 100644
6159 --- a/net/ipv4/ip_output.c
6160 +++ b/net/ipv4/ip_output.c
6161 @@ -1040,7 +1040,8 @@ static int __ip_append_data(struct sock *sk,
6162 if (copy > length)
6163 copy = length;
6164
6165 - if (!(rt->dst.dev->features&NETIF_F_SG)) {
6166 + if (!(rt->dst.dev->features&NETIF_F_SG) &&
6167 + skb_tailroom(skb) >= copy) {
6168 unsigned int off;
6169
6170 off = skb->len;
6171 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6172 index 6818042cd8a9..3a0211692c28 100644
6173 --- a/net/ipv4/tcp_output.c
6174 +++ b/net/ipv4/tcp_output.c
6175 @@ -2860,8 +2860,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
6176 return -EBUSY;
6177
6178 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
6179 - if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
6180 - BUG();
6181 + if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
6182 + WARN_ON_ONCE(1);
6183 + return -EINVAL;
6184 + }
6185 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
6186 return -ENOMEM;
6187 }
6188 @@ -3369,6 +3371,7 @@ static void tcp_connect_init(struct sock *sk)
6189 sock_reset_flag(sk, SOCK_DONE);
6190 tp->snd_wnd = 0;
6191 tcp_init_wl(tp, 0);
6192 + tcp_write_queue_purge(sk);
6193 tp->snd_una = tp->write_seq;
6194 tp->snd_sml = tp->write_seq;
6195 tp->snd_up = tp->write_seq;
6196 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
6197 index 197fcae855ca..9539bdb15edb 100644
6198 --- a/net/ipv6/ip6_gre.c
6199 +++ b/net/ipv6/ip6_gre.c
6200 @@ -71,6 +71,7 @@ struct ip6gre_net {
6201 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
6202
6203 struct ip6_tnl __rcu *collect_md_tun;
6204 + struct ip6_tnl __rcu *collect_md_tun_erspan;
6205 struct net_device *fb_tunnel_dev;
6206 };
6207
6208 @@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
6209 static void ip6gre_tunnel_setup(struct net_device *dev);
6210 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
6211 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
6212 +static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
6213
6214 /* Tunnel hash table */
6215
6216 @@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
6217 if (cand)
6218 return cand;
6219
6220 - t = rcu_dereference(ign->collect_md_tun);
6221 + if (gre_proto == htons(ETH_P_ERSPAN) ||
6222 + gre_proto == htons(ETH_P_ERSPAN2))
6223 + t = rcu_dereference(ign->collect_md_tun_erspan);
6224 + else
6225 + t = rcu_dereference(ign->collect_md_tun);
6226 +
6227 if (t && t->dev->flags & IFF_UP)
6228 return t;
6229
6230 @@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
6231 return &ign->tunnels[prio][h];
6232 }
6233
6234 +static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
6235 +{
6236 + if (t->parms.collect_md)
6237 + rcu_assign_pointer(ign->collect_md_tun, t);
6238 +}
6239 +
6240 +static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
6241 +{
6242 + if (t->parms.collect_md)
6243 + rcu_assign_pointer(ign->collect_md_tun_erspan, t);
6244 +}
6245 +
6246 +static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
6247 +{
6248 + if (t->parms.collect_md)
6249 + rcu_assign_pointer(ign->collect_md_tun, NULL);
6250 +}
6251 +
6252 +static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
6253 + struct ip6_tnl *t)
6254 +{
6255 + if (t->parms.collect_md)
6256 + rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
6257 +}
6258 +
6259 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
6260 const struct ip6_tnl *t)
6261 {
6262 @@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
6263 {
6264 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
6265
6266 - if (t->parms.collect_md)
6267 - rcu_assign_pointer(ign->collect_md_tun, t);
6268 -
6269 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
6270 rcu_assign_pointer(*tp, t);
6271 }
6272 @@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
6273 struct ip6_tnl __rcu **tp;
6274 struct ip6_tnl *iter;
6275
6276 - if (t->parms.collect_md)
6277 - rcu_assign_pointer(ign->collect_md_tun, NULL);
6278 -
6279 for (tp = ip6gre_bucket(ign, t);
6280 (iter = rtnl_dereference(*tp)) != NULL;
6281 tp = &iter->next) {
6282 @@ -374,11 +400,23 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
6283 return NULL;
6284 }
6285
6286 +static void ip6erspan_tunnel_uninit(struct net_device *dev)
6287 +{
6288 + struct ip6_tnl *t = netdev_priv(dev);
6289 + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
6290 +
6291 + ip6erspan_tunnel_unlink_md(ign, t);
6292 + ip6gre_tunnel_unlink(ign, t);
6293 + dst_cache_reset(&t->dst_cache);
6294 + dev_put(dev);
6295 +}
6296 +
6297 static void ip6gre_tunnel_uninit(struct net_device *dev)
6298 {
6299 struct ip6_tnl *t = netdev_priv(dev);
6300 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
6301
6302 + ip6gre_tunnel_unlink_md(ign, t);
6303 ip6gre_tunnel_unlink(ign, t);
6304 dst_cache_reset(&t->dst_cache);
6305 dev_put(dev);
6306 @@ -701,6 +739,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
6307 if (tunnel->parms.o_flags & TUNNEL_SEQ)
6308 tunnel->o_seqno++;
6309
6310 + if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
6311 + return -ENOMEM;
6312 +
6313 /* Push GRE header. */
6314 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
6315
6316 @@ -905,7 +946,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
6317 truncate = true;
6318 }
6319
6320 - if (skb_cow_head(skb, dev->needed_headroom))
6321 + if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
6322 goto tx_err;
6323
6324 t->parms.o_flags &= ~TUNNEL_KEY;
6325 @@ -1016,12 +1057,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
6326 return NETDEV_TX_OK;
6327 }
6328
6329 -static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
6330 +static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
6331 {
6332 struct net_device *dev = t->dev;
6333 struct __ip6_tnl_parm *p = &t->parms;
6334 struct flowi6 *fl6 = &t->fl.u.ip6;
6335 - int t_hlen;
6336
6337 if (dev->type != ARPHRD_ETHER) {
6338 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
6339 @@ -1048,12 +1088,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
6340 dev->flags |= IFF_POINTOPOINT;
6341 else
6342 dev->flags &= ~IFF_POINTOPOINT;
6343 +}
6344
6345 - t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
6346 -
6347 - t->hlen = t->encap_hlen + t->tun_hlen;
6348 -
6349 - t_hlen = t->hlen + sizeof(struct ipv6hdr);
6350 +static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
6351 + int t_hlen)
6352 +{
6353 + const struct __ip6_tnl_parm *p = &t->parms;
6354 + struct net_device *dev = t->dev;
6355
6356 if (p->flags & IP6_TNL_F_CAP_XMIT) {
6357 int strict = (ipv6_addr_type(&p->raddr) &
6358 @@ -1085,8 +1126,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
6359 }
6360 }
6361
6362 -static int ip6gre_tnl_change(struct ip6_tnl *t,
6363 - const struct __ip6_tnl_parm *p, int set_mtu)
6364 +static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
6365 +{
6366 + int t_hlen;
6367 +
6368 + tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
6369 + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
6370 +
6371 + t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
6372 + tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
6373 + return t_hlen;
6374 +}
6375 +
6376 +static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
6377 +{
6378 + ip6gre_tnl_link_config_common(t);
6379 + ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
6380 +}
6381 +
6382 +static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
6383 + const struct __ip6_tnl_parm *p)
6384 {
6385 t->parms.laddr = p->laddr;
6386 t->parms.raddr = p->raddr;
6387 @@ -1102,6 +1161,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
6388 t->parms.o_flags = p->o_flags;
6389 t->parms.fwmark = p->fwmark;
6390 dst_cache_reset(&t->dst_cache);
6391 +}
6392 +
6393 +static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
6394 + int set_mtu)
6395 +{
6396 + ip6gre_tnl_copy_tnl_parm(t, p);
6397 ip6gre_tnl_link_config(t, set_mtu);
6398 return 0;
6399 }
6400 @@ -1378,11 +1443,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
6401 return ret;
6402 }
6403
6404 - tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
6405 - tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
6406 - t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
6407 -
6408 - dev->hard_header_len = LL_MAX_HEADER + t_hlen;
6409 + t_hlen = ip6gre_calc_hlen(tunnel);
6410 dev->mtu = ETH_DATA_LEN - t_hlen;
6411 if (dev->type == ARPHRD_ETHER)
6412 dev->mtu -= ETH_HLEN;
6413 @@ -1723,6 +1784,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
6414 .ndo_get_iflink = ip6_tnl_get_iflink,
6415 };
6416
6417 +static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
6418 +{
6419 + int t_hlen;
6420 +
6421 + tunnel->tun_hlen = 8;
6422 + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
6423 + erspan_hdr_len(tunnel->parms.erspan_ver);
6424 +
6425 + t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
6426 + tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
6427 + return t_hlen;
6428 +}
6429 +
6430 static int ip6erspan_tap_init(struct net_device *dev)
6431 {
6432 struct ip6_tnl *tunnel;
6433 @@ -1746,12 +1820,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
6434 return ret;
6435 }
6436
6437 - tunnel->tun_hlen = 8;
6438 - tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
6439 - erspan_hdr_len(tunnel->parms.erspan_ver);
6440 - t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
6441 -
6442 - dev->hard_header_len = LL_MAX_HEADER + t_hlen;
6443 + t_hlen = ip6erspan_calc_hlen(tunnel);
6444 dev->mtu = ETH_DATA_LEN - t_hlen;
6445 if (dev->type == ARPHRD_ETHER)
6446 dev->mtu -= ETH_HLEN;
6447 @@ -1760,14 +1829,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
6448
6449 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
6450 tunnel = netdev_priv(dev);
6451 - ip6gre_tnl_link_config(tunnel, 1);
6452 + ip6erspan_tnl_link_config(tunnel, 1);
6453
6454 return 0;
6455 }
6456
6457 static const struct net_device_ops ip6erspan_netdev_ops = {
6458 .ndo_init = ip6erspan_tap_init,
6459 - .ndo_uninit = ip6gre_tunnel_uninit,
6460 + .ndo_uninit = ip6erspan_tunnel_uninit,
6461 .ndo_start_xmit = ip6erspan_tunnel_xmit,
6462 .ndo_set_mac_address = eth_mac_addr,
6463 .ndo_validate_addr = eth_validate_addr,
6464 @@ -1825,13 +1894,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
6465 return ret;
6466 }
6467
6468 -static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
6469 - struct nlattr *tb[], struct nlattr *data[],
6470 - struct netlink_ext_ack *extack)
6471 +static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
6472 + struct nlattr *tb[], struct nlattr *data[],
6473 + struct netlink_ext_ack *extack)
6474 {
6475 struct ip6_tnl *nt;
6476 - struct net *net = dev_net(dev);
6477 - struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
6478 struct ip_tunnel_encap ipencap;
6479 int err;
6480
6481 @@ -1844,16 +1911,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
6482 return err;
6483 }
6484
6485 - ip6gre_netlink_parms(data, &nt->parms);
6486 -
6487 - if (nt->parms.collect_md) {
6488 - if (rtnl_dereference(ign->collect_md_tun))
6489 - return -EEXIST;
6490 - } else {
6491 - if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
6492 - return -EEXIST;
6493 - }
6494 -
6495 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
6496 eth_hw_addr_random(dev);
6497
6498 @@ -1864,51 +1921,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
6499 if (err)
6500 goto out;
6501
6502 - ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
6503 -
6504 if (tb[IFLA_MTU])
6505 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
6506
6507 dev_hold(dev);
6508 - ip6gre_tunnel_link(ign, nt);
6509
6510 out:
6511 return err;
6512 }
6513
6514 -static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
6515 - struct nlattr *data[],
6516 - struct netlink_ext_ack *extack)
6517 +static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
6518 + struct nlattr *tb[], struct nlattr *data[],
6519 + struct netlink_ext_ack *extack)
6520 +{
6521 + struct ip6_tnl *nt = netdev_priv(dev);
6522 + struct net *net = dev_net(dev);
6523 + struct ip6gre_net *ign;
6524 + int err;
6525 +
6526 + ip6gre_netlink_parms(data, &nt->parms);
6527 + ign = net_generic(net, ip6gre_net_id);
6528 +
6529 + if (nt->parms.collect_md) {
6530 + if (rtnl_dereference(ign->collect_md_tun))
6531 + return -EEXIST;
6532 + } else {
6533 + if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
6534 + return -EEXIST;
6535 + }
6536 +
6537 + err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
6538 + if (!err) {
6539 + ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
6540 + ip6gre_tunnel_link_md(ign, nt);
6541 + ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
6542 + }
6543 + return err;
6544 +}
6545 +
6546 +static struct ip6_tnl *
6547 +ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
6548 + struct nlattr *data[], struct __ip6_tnl_parm *p_p,
6549 + struct netlink_ext_ack *extack)
6550 {
6551 struct ip6_tnl *t, *nt = netdev_priv(dev);
6552 struct net *net = nt->net;
6553 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
6554 - struct __ip6_tnl_parm p;
6555 struct ip_tunnel_encap ipencap;
6556
6557 if (dev == ign->fb_tunnel_dev)
6558 - return -EINVAL;
6559 + return ERR_PTR(-EINVAL);
6560
6561 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
6562 int err = ip6_tnl_encap_setup(nt, &ipencap);
6563
6564 if (err < 0)
6565 - return err;
6566 + return ERR_PTR(err);
6567 }
6568
6569 - ip6gre_netlink_parms(data, &p);
6570 + ip6gre_netlink_parms(data, p_p);
6571
6572 - t = ip6gre_tunnel_locate(net, &p, 0);
6573 + t = ip6gre_tunnel_locate(net, p_p, 0);
6574
6575 if (t) {
6576 if (t->dev != dev)
6577 - return -EEXIST;
6578 + return ERR_PTR(-EEXIST);
6579 } else {
6580 t = nt;
6581 }
6582
6583 + return t;
6584 +}
6585 +
6586 +static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
6587 + struct nlattr *data[],
6588 + struct netlink_ext_ack *extack)
6589 +{
6590 + struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
6591 + struct __ip6_tnl_parm p;
6592 + struct ip6_tnl *t;
6593 +
6594 + t = ip6gre_changelink_common(dev, tb, data, &p, extack);
6595 + if (IS_ERR(t))
6596 + return PTR_ERR(t);
6597 +
6598 + ip6gre_tunnel_unlink_md(ign, t);
6599 ip6gre_tunnel_unlink(ign, t);
6600 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
6601 + ip6gre_tunnel_link_md(ign, t);
6602 ip6gre_tunnel_link(ign, t);
6603 return 0;
6604 }
6605 @@ -2058,6 +2158,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
6606 netif_keep_dst(dev);
6607 }
6608
6609 +static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
6610 + struct nlattr *tb[], struct nlattr *data[],
6611 + struct netlink_ext_ack *extack)
6612 +{
6613 + struct ip6_tnl *nt = netdev_priv(dev);
6614 + struct net *net = dev_net(dev);
6615 + struct ip6gre_net *ign;
6616 + int err;
6617 +
6618 + ip6gre_netlink_parms(data, &nt->parms);
6619 + ign = net_generic(net, ip6gre_net_id);
6620 +
6621 + if (nt->parms.collect_md) {
6622 + if (rtnl_dereference(ign->collect_md_tun_erspan))
6623 + return -EEXIST;
6624 + } else {
6625 + if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
6626 + return -EEXIST;
6627 + }
6628 +
6629 + err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
6630 + if (!err) {
6631 + ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
6632 + ip6erspan_tunnel_link_md(ign, nt);
6633 + ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
6634 + }
6635 + return err;
6636 +}
6637 +
6638 +static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
6639 +{
6640 + ip6gre_tnl_link_config_common(t);
6641 + ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
6642 +}
6643 +
6644 +static int ip6erspan_tnl_change(struct ip6_tnl *t,
6645 + const struct __ip6_tnl_parm *p, int set_mtu)
6646 +{
6647 + ip6gre_tnl_copy_tnl_parm(t, p);
6648 + ip6erspan_tnl_link_config(t, set_mtu);
6649 + return 0;
6650 +}
6651 +
6652 +static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
6653 + struct nlattr *data[],
6654 + struct netlink_ext_ack *extack)
6655 +{
6656 + struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
6657 + struct __ip6_tnl_parm p;
6658 + struct ip6_tnl *t;
6659 +
6660 + t = ip6gre_changelink_common(dev, tb, data, &p, extack);
6661 + if (IS_ERR(t))
6662 + return PTR_ERR(t);
6663 +
6664 + ip6gre_tunnel_unlink_md(ign, t);
6665 + ip6gre_tunnel_unlink(ign, t);
6666 + ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
6667 + ip6erspan_tunnel_link_md(ign, t);
6668 + ip6gre_tunnel_link(ign, t);
6669 + return 0;
6670 +}
6671 +
6672 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
6673 .kind = "ip6gre",
6674 .maxtype = IFLA_GRE_MAX,
6675 @@ -2094,8 +2257,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
6676 .priv_size = sizeof(struct ip6_tnl),
6677 .setup = ip6erspan_tap_setup,
6678 .validate = ip6erspan_tap_validate,
6679 - .newlink = ip6gre_newlink,
6680 - .changelink = ip6gre_changelink,
6681 + .newlink = ip6erspan_newlink,
6682 + .changelink = ip6erspan_changelink,
6683 .get_size = ip6gre_get_size,
6684 .fill_info = ip6gre_fill_info,
6685 .get_link_net = ip6_tnl_get_link_net,
6686 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
6687 index 4065ae0c32a0..072333760a52 100644
6688 --- a/net/ipv6/ip6_output.c
6689 +++ b/net/ipv6/ip6_output.c
6690 @@ -1489,7 +1489,8 @@ static int __ip6_append_data(struct sock *sk,
6691 if (copy > length)
6692 copy = length;
6693
6694 - if (!(rt->dst.dev->features&NETIF_F_SG)) {
6695 + if (!(rt->dst.dev->features&NETIF_F_SG) &&
6696 + skb_tailroom(skb) >= copy) {
6697 unsigned int off;
6698
6699 off = skb->len;
6700 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
6701 index 3b43b1fcd618..c6a2dd890de3 100644
6702 --- a/net/packet/af_packet.c
6703 +++ b/net/packet/af_packet.c
6704 @@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
6705 if (skb == NULL)
6706 goto out_unlock;
6707
6708 - skb_set_network_header(skb, reserve);
6709 + skb_reset_network_header(skb);
6710
6711 err = -EINVAL;
6712 if (sock->type == SOCK_DGRAM) {
6713 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
6714 if (unlikely(offset < 0))
6715 goto out_free;
6716 + } else if (reserve) {
6717 + skb_push(skb, reserve);
6718 }
6719
6720 /* Returns -EFAULT on error */
6721 diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
6722 index c49cb61adedf..64ca017f2e00 100644
6723 --- a/net/sched/act_vlan.c
6724 +++ b/net/sched/act_vlan.c
6725 @@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
6726 case htons(ETH_P_8021AD):
6727 break;
6728 default:
6729 + if (exists)
6730 + tcf_idr_release(*a, bind);
6731 return -EPROTONOSUPPORT;
6732 }
6733 } else {
6734 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
6735 index 16644b3d2362..56c181c3feeb 100644
6736 --- a/net/sched/sch_red.c
6737 +++ b/net/sched/sch_red.c
6738 @@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
6739 extack);
6740 if (IS_ERR(child))
6741 return PTR_ERR(child);
6742 - }
6743
6744 - if (child != &noop_qdisc)
6745 + /* child is fifo, no need to check for noop_qdisc */
6746 qdisc_hash_add(child, true);
6747 + }
6748 +
6749 sch_tree_lock(sch);
6750 q->flags = ctl->flags;
6751 q->limit = ctl->limit;
6752 diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
6753 index 03225a8df973..6f74a426f159 100644
6754 --- a/net/sched/sch_tbf.c
6755 +++ b/net/sched/sch_tbf.c
6756 @@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
6757 err = PTR_ERR(child);
6758 goto done;
6759 }
6760 +
6761 + /* child is fifo, no need to check for noop_qdisc */
6762 + qdisc_hash_add(child, true);
6763 }
6764
6765 sch_tree_lock(sch);
6766 @@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
6767 q->qdisc->qstats.backlog);
6768 qdisc_destroy(q->qdisc);
6769 q->qdisc = child;
6770 - if (child != &noop_qdisc)
6771 - qdisc_hash_add(child, true);
6772 }
6773 q->limit = qopt->limit;
6774 if (tb[TCA_TBF_PBURST])
6775 diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
6776 index 74568cdbca70..d7b88b2d1b22 100644
6777 --- a/net/smc/smc_pnet.c
6778 +++ b/net/smc/smc_pnet.c
6779 @@ -245,40 +245,45 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
6780 static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
6781 struct nlattr *tb[])
6782 {
6783 - char *string, *ibname = NULL;
6784 - int rc = 0;
6785 + char *string, *ibname;
6786 + int rc;
6787
6788 memset(pnetelem, 0, sizeof(*pnetelem));
6789 INIT_LIST_HEAD(&pnetelem->list);
6790 - if (tb[SMC_PNETID_NAME]) {
6791 - string = (char *)nla_data(tb[SMC_PNETID_NAME]);
6792 - if (!smc_pnetid_valid(string, pnetelem->pnet_name)) {
6793 - rc = -EINVAL;
6794 - goto error;
6795 - }
6796 - }
6797 - if (tb[SMC_PNETID_ETHNAME]) {
6798 - string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
6799 - pnetelem->ndev = dev_get_by_name(net, string);
6800 - if (!pnetelem->ndev)
6801 - return -ENOENT;
6802 - }
6803 - if (tb[SMC_PNETID_IBNAME]) {
6804 - ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
6805 - ibname = strim(ibname);
6806 - pnetelem->smcibdev = smc_pnet_find_ib(ibname);
6807 - if (!pnetelem->smcibdev) {
6808 - rc = -ENOENT;
6809 - goto error;
6810 - }
6811 - }
6812 - if (tb[SMC_PNETID_IBPORT]) {
6813 - pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
6814 - if (pnetelem->ib_port > SMC_MAX_PORTS) {
6815 - rc = -EINVAL;
6816 - goto error;
6817 - }
6818 - }
6819 +
6820 + rc = -EINVAL;
6821 + if (!tb[SMC_PNETID_NAME])
6822 + goto error;
6823 + string = (char *)nla_data(tb[SMC_PNETID_NAME]);
6824 + if (!smc_pnetid_valid(string, pnetelem->pnet_name))
6825 + goto error;
6826 +
6827 + rc = -EINVAL;
6828 + if (!tb[SMC_PNETID_ETHNAME])
6829 + goto error;
6830 + rc = -ENOENT;
6831 + string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
6832 + pnetelem->ndev = dev_get_by_name(net, string);
6833 + if (!pnetelem->ndev)
6834 + goto error;
6835 +
6836 + rc = -EINVAL;
6837 + if (!tb[SMC_PNETID_IBNAME])
6838 + goto error;
6839 + rc = -ENOENT;
6840 + ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
6841 + ibname = strim(ibname);
6842 + pnetelem->smcibdev = smc_pnet_find_ib(ibname);
6843 + if (!pnetelem->smcibdev)
6844 + goto error;
6845 +
6846 + rc = -EINVAL;
6847 + if (!tb[SMC_PNETID_IBPORT])
6848 + goto error;
6849 + pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
6850 + if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
6851 + goto error;
6852 +
6853 return 0;
6854
6855 error:
6856 @@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
6857 void *hdr;
6858 int rc;
6859
6860 + if (!info->attrs[SMC_PNETID_NAME])
6861 + return -EINVAL;
6862 pnetelem = smc_pnet_find_pnetid(
6863 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
6864 if (!pnetelem)
6865 @@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
6866
6867 static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
6868 {
6869 + if (!info->attrs[SMC_PNETID_NAME])
6870 + return -EINVAL;
6871 return smc_pnet_remove_by_pnetid(
6872 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
6873 }
6874 diff --git a/net/wireless/core.c b/net/wireless/core.c
6875 index a6f3cac8c640..c0fd8a85e7f7 100644
6876 --- a/net/wireless/core.c
6877 +++ b/net/wireless/core.c
6878 @@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
6879
6880 ASSERT_RTNL();
6881
6882 + if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
6883 + return -EINVAL;
6884 +
6885 /* prohibit calling the thing phy%d when %d is not its number */
6886 sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
6887 if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
6888 diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
6889 index b0825370d262..957046ac6c8c 100644
6890 --- a/sound/soc/rockchip/Kconfig
6891 +++ b/sound/soc/rockchip/Kconfig
6892 @@ -56,6 +56,9 @@ config SND_SOC_RK3288_HDMI_ANALOG
6893 depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
6894 select SND_SOC_ROCKCHIP_I2S
6895 select SND_SOC_HDMI_CODEC
6896 + select SND_SOC_ES8328_I2C
6897 + select SND_SOC_ES8328_SPI if SPI_MASTER
6898 + select DRM_DW_HDMI_I2S_AUDIO if DRM_DW_HDMI
6899 help
6900 Say Y or M here if you want to add support for SoC audio on Rockchip
6901 RK3288 boards using an analog output and the built-in HDMI audio.
6902 diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
6903 index 233f1c9a4b6c..aeba0ae890ea 100644
6904 --- a/sound/soc/samsung/i2s.c
6905 +++ b/sound/soc/samsung/i2s.c
6906 @@ -656,8 +656,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
6907 tmp |= mod_slave;
6908 break;
6909 case SND_SOC_DAIFMT_CBS_CFS:
6910 - /* Set default source clock in Master mode */
6911 - if (i2s->rclk_srcrate == 0)
6912 + /*
6913 + * Set default source clock in Master mode, only when the
6914 + * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
6915 + * clock configuration assigned in DT is not overwritten.
6916 + */
6917 + if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
6918 i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
6919 0, SND_SOC_CLOCK_IN);
6920 break;
6921 @@ -881,6 +885,11 @@ static int config_setup(struct i2s_dai *i2s)
6922 return 0;
6923
6924 if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
6925 + struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
6926 +
6927 + if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
6928 + i2s->rclk_srcrate = clk_get_rate(rclksrc);
6929 +
6930 psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
6931 writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
6932 dev_dbg(&i2s->pdev->dev,
6933 diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
6934 index 44b6de5a331a..06a31a9585a0 100644
6935 --- a/sound/soc/samsung/odroid.c
6936 +++ b/sound/soc/samsung/odroid.c
6937 @@ -36,23 +36,26 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
6938 {
6939 struct snd_soc_pcm_runtime *rtd = substream->private_data;
6940 struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
6941 - unsigned int pll_freq, rclk_freq;
6942 + unsigned int pll_freq, rclk_freq, rfs;
6943 int ret;
6944
6945 switch (params_rate(params)) {
6946 - case 32000:
6947 case 64000:
6948 - pll_freq = 131072006U;
6949 + pll_freq = 196608001U;
6950 + rfs = 384;
6951 break;
6952 case 44100:
6953 case 88200:
6954 case 176400:
6955 pll_freq = 180633609U;
6956 + rfs = 512;
6957 break;
6958 + case 32000:
6959 case 48000:
6960 case 96000:
6961 case 192000:
6962 pll_freq = 196608001U;
6963 + rfs = 512;
6964 break;
6965 default:
6966 return -EINVAL;
6967 @@ -67,7 +70,7 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
6968 * frequency values due to the EPLL output frequency not being exact
6969 * multiple of the audio sampling rate.
6970 */
6971 - rclk_freq = params_rate(params) * 256 + 1;
6972 + rclk_freq = params_rate(params) * rfs + 1;
6973
6974 ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
6975 if (ret < 0)
6976 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
6977 index 782c580b7aa3..e5049fbfc4f1 100644
6978 --- a/sound/soc/soc-topology.c
6979 +++ b/sound/soc/soc-topology.c
6980 @@ -1276,6 +1276,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
6981 kfree(sm);
6982 continue;
6983 }
6984 +
6985 + /* create any TLV data */
6986 + soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
6987 }
6988 return kc;
6989
6990 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
6991 index 794224e1d6df..006da37ad0d9 100644
6992 --- a/sound/usb/quirks.c
6993 +++ b/sound/usb/quirks.c
6994 @@ -1149,24 +1149,27 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
6995 return false;
6996 }
6997
6998 -/* Marantz/Denon USB DACs need a vendor cmd to switch
6999 +/* ITF-USB DSD based DACs need a vendor cmd to switch
7000 * between PCM and native DSD mode
7001 + * (2 altsets version)
7002 */
7003 -static bool is_marantz_denon_dac(unsigned int id)
7004 +static bool is_itf_usb_dsd_2alts_dac(unsigned int id)
7005 {
7006 switch (id) {
7007 case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
7008 case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
7009 case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
7010 + case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */
7011 return true;
7012 }
7013 return false;
7014 }
7015
7016 -/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
7017 - * between PCM/DOP and native DSD mode
7018 +/* ITF-USB DSD based DACs need a vendor cmd to switch
7019 + * between PCM and native DSD mode
7020 + * (3 altsets version)
7021 */
7022 -static bool is_teac_dsd_dac(unsigned int id)
7023 +static bool is_itf_usb_dsd_3alts_dac(unsigned int id)
7024 {
7025 switch (id) {
7026 case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
7027 @@ -1183,7 +1186,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
7028 struct usb_device *dev = subs->dev;
7029 int err;
7030
7031 - if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
7032 + if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) {
7033 /* First switch to alt set 0, otherwise the mode switch cmd
7034 * will not be accepted by the DAC
7035 */
7036 @@ -1204,7 +1207,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
7037 break;
7038 }
7039 mdelay(20);
7040 - } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
7041 + } else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) {
7042 /* Vendor mode switch cmd is required. */
7043 switch (fmt->altsetting) {
7044 case 3: /* DSD mode (DSD_U32) requested */
7045 @@ -1300,10 +1303,10 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
7046 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
7047 mdelay(20);
7048
7049 - /* Marantz/Denon devices with USB DAC functionality need a delay
7050 + /* ITF-USB DSD based DACs functionality need a delay
7051 * after each class compliant request
7052 */
7053 - if (is_marantz_denon_dac(chip->usb_id)
7054 + if (is_itf_usb_dsd_2alts_dac(chip->usb_id)
7055 && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
7056 mdelay(20);
7057
7058 @@ -1390,14 +1393,14 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
7059 break;
7060 }
7061
7062 - /* Denon/Marantz devices with USB DAC functionality */
7063 - if (is_marantz_denon_dac(chip->usb_id)) {
7064 + /* ITF-USB DSD based DACs (2 altsets version) */
7065 + if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) {
7066 if (fp->altsetting == 2)
7067 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
7068 }
7069
7070 - /* TEAC devices with USB DAC functionality */
7071 - if (is_teac_dsd_dac(chip->usb_id)) {
7072 + /* ITF-USB DSD based DACs (3 altsets version) */
7073 + if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) {
7074 if (fp->altsetting == 3)
7075 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
7076 }