Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0288-4.9.189-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3563 - (show annotations) (download)
Thu Aug 13 10:21:07 2020 UTC (3 years, 9 months ago) by niro
File size: 48069 byte(s)
linux-189
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 55a9bbbcf5e1..f4f0a1b9ba29 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -2484,6 +2484,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 improves system performance, but it may also
7 expose users to several CPU vulnerabilities.
8 Equivalent to: nopti [X86]
9 + nospectre_v1 [X86]
10 nospectre_v2 [X86]
11 spectre_v2_user=off [X86]
12 spec_store_bypass_disable=off [X86]
13 @@ -2819,10 +2820,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
14
15 nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
16
17 - nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
18 - check bypass). With this option data leaks are possible
19 - in the system.
20 -
21 nosmt [KNL,S390] Disable symmetric multithreading (SMT).
22 Equivalent to smt=1.
23
24 @@ -2830,6 +2827,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
25 nosmt=force: Force disable SMT, cannot be undone
26 via the sysfs control file.
27
28 + nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
29 + (bounds check bypass). With this option data leaks are
30 + possible in the system.
31 +
32 nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
33 (indirect branch prediction) vulnerability. System may
34 allow data leaks with this option, which is equivalent
35 diff --git a/Makefile b/Makefile
36 index b6b54e6f67e8..4fdc9d984f80 100644
37 --- a/Makefile
38 +++ b/Makefile
39 @@ -1,6 +1,6 @@
40 VERSION = 4
41 PATCHLEVEL = 9
42 -SUBLEVEL = 188
43 +SUBLEVEL = 189
44 EXTRAVERSION =
45 NAME = Roaring Lionus
46
47 diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
48 index 876ed5f2922c..f82f193b8856 100644
49 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
50 +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
51 @@ -108,16 +108,21 @@
52 twl_audio: audio {
53 compatible = "ti,twl4030-audio";
54 codec {
55 + ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
56 };
57 };
58 };
59 };
60
61 &i2c2 {
62 + pinctrl-names = "default";
63 + pinctrl-0 = <&i2c2_pins>;
64 clock-frequency = <400000>;
65 };
66
67 &i2c3 {
68 + pinctrl-names = "default";
69 + pinctrl-0 = <&i2c3_pins>;
70 clock-frequency = <400000>;
71 };
72
73 @@ -221,6 +226,7 @@
74 pinctrl-single,pins = <
75 OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
76 OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
77 + OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */
78 >;
79 };
80 };
81 @@ -239,6 +245,18 @@
82 OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
83 >;
84 };
85 + i2c2_pins: pinmux_i2c2_pins {
86 + pinctrl-single,pins = <
87 + OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
88 + OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
89 + >;
90 + };
91 + i2c3_pins: pinmux_i2c3_pins {
92 + pinctrl-single,pins = <
93 + OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
94 + OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
95 + >;
96 + };
97 };
98
99 &omap3_pmx_core2 {
100 diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
101 index 08f0a35dc0d1..ceb49d15d243 100644
102 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
103 +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
104 @@ -117,10 +117,14 @@
105 };
106
107 &i2c2 {
108 + pinctrl-names = "default";
109 + pinctrl-0 = <&i2c2_pins>;
110 clock-frequency = <400000>;
111 };
112
113 &i2c3 {
114 + pinctrl-names = "default";
115 + pinctrl-0 = <&i2c3_pins>;
116 clock-frequency = <400000>;
117 at24@50 {
118 compatible = "atmel,24c64";
119 @@ -215,6 +219,18 @@
120 OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
121 >;
122 };
123 + i2c2_pins: pinmux_i2c2_pins {
124 + pinctrl-single,pins = <
125 + OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
126 + OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
127 + >;
128 + };
129 + i2c3_pins: pinmux_i2c3_pins {
130 + pinctrl-single,pins = <
131 + OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
132 + OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
133 + >;
134 + };
135 };
136
137 &uart2 {
138 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
139 index 15868eca58de..e7bef3d936d8 100644
140 --- a/arch/arm64/include/asm/cpufeature.h
141 +++ b/arch/arm64/include/asm/cpufeature.h
142 @@ -31,9 +31,10 @@
143
144 /* CPU feature register tracking */
145 enum ftr_type {
146 - FTR_EXACT, /* Use a predefined safe value */
147 - FTR_LOWER_SAFE, /* Smaller value is safe */
148 - FTR_HIGHER_SAFE,/* Bigger value is safe */
149 + FTR_EXACT, /* Use a predefined safe value */
150 + FTR_LOWER_SAFE, /* Smaller value is safe */
151 + FTR_HIGHER_SAFE, /* Bigger value is safe */
152 + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
153 };
154
155 #define FTR_STRICT true /* SANITY check strict matching required */
156 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
157 index a3ab7dfad50a..9a8e45dc36bd 100644
158 --- a/arch/arm64/kernel/cpufeature.c
159 +++ b/arch/arm64/kernel/cpufeature.c
160 @@ -148,10 +148,12 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
161 };
162
163 static const struct arm64_ftr_bits ftr_ctr[] = {
164 - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
165 - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
166 - ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
167 - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
168 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
169 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
170 + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
171 + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
172 + ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
173 + ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
174 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
175 /*
176 * Linux can handle differing I-cache policies. Userspace JITs will
177 @@ -390,6 +392,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
178 case FTR_LOWER_SAFE:
179 ret = new < cur ? new : cur;
180 break;
181 + case FTR_HIGHER_OR_ZERO_SAFE:
182 + if (!cur || !new)
183 + break;
184 + /* Fallthrough */
185 case FTR_HIGHER_SAFE:
186 ret = new > cur ? new : cur;
187 break;
188 diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
189 index 9a9e5884066c..8af8c070f213 100644
190 --- a/arch/x86/entry/calling.h
191 +++ b/arch/x86/entry/calling.h
192 @@ -1,4 +1,5 @@
193 #include <linux/jump_label.h>
194 +#include <asm/cpufeatures.h>
195
196 /*
197
198 @@ -201,6 +202,23 @@ For 32-bit we have the following conventions - kernel is built with
199 .byte 0xf1
200 .endm
201
202 +/*
203 + * Mitigate Spectre v1 for conditional swapgs code paths.
204 + *
205 + * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
206 + * prevent a speculative swapgs when coming from kernel space.
207 + *
208 + * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
209 + * to prevent the swapgs from getting speculatively skipped when coming from
210 + * user space.
211 + */
212 +.macro FENCE_SWAPGS_USER_ENTRY
213 + ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
214 +.endm
215 +.macro FENCE_SWAPGS_KERNEL_ENTRY
216 + ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
217 +.endm
218 +
219 #endif /* CONFIG_X86_64 */
220
221 /*
222 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
223 index 8252d9dc48eb..10ecfba43dff 100644
224 --- a/arch/x86/entry/entry_64.S
225 +++ b/arch/x86/entry/entry_64.S
226 @@ -420,6 +420,7 @@ END(irq_entries_start)
227 * tracking that we're in kernel mode.
228 */
229 SWAPGS
230 + FENCE_SWAPGS_USER_ENTRY
231 SWITCH_KERNEL_CR3
232
233 /*
234 @@ -433,8 +434,10 @@ END(irq_entries_start)
235 TRACE_IRQS_OFF
236
237 CALL_enter_from_user_mode
238 -
239 + jmp 2f
240 1:
241 + FENCE_SWAPGS_KERNEL_ENTRY
242 +2:
243 /*
244 * Save previous stack pointer, optionally switch to interrupt stack.
245 * irq_count is used to check if a CPU is already on an interrupt stack
246 @@ -1004,6 +1007,13 @@ ENTRY(paranoid_entry)
247 movq %rax, %cr3
248 2:
249 #endif
250 + /*
251 + * The above doesn't do an unconditional CR3 write, even in the PTI
252 + * case. So do an lfence to prevent GS speculation, regardless of
253 + * whether PTI is enabled.
254 + */
255 + FENCE_SWAPGS_KERNEL_ENTRY
256 +
257 ret
258 END(paranoid_entry)
259
260 @@ -1065,6 +1075,7 @@ ENTRY(error_entry)
261 * from user mode due to an IRET fault.
262 */
263 SWAPGS
264 + FENCE_SWAPGS_USER_ENTRY
265
266 .Lerror_entry_from_usermode_after_swapgs:
267 /*
268 @@ -1076,6 +1087,8 @@ ENTRY(error_entry)
269 CALL_enter_from_user_mode
270 ret
271
272 +.Lerror_entry_done_lfence:
273 + FENCE_SWAPGS_KERNEL_ENTRY
274 .Lerror_entry_done:
275 TRACE_IRQS_OFF
276 ret
277 @@ -1094,7 +1107,7 @@ ENTRY(error_entry)
278 cmpq %rax, RIP+8(%rsp)
279 je .Lbstep_iret
280 cmpq $.Lgs_change, RIP+8(%rsp)
281 - jne .Lerror_entry_done
282 + jne .Lerror_entry_done_lfence
283
284 /*
285 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
286 @@ -1102,6 +1115,7 @@ ENTRY(error_entry)
287 * .Lgs_change's error handler with kernel gsbase.
288 */
289 SWAPGS
290 + FENCE_SWAPGS_USER_ENTRY
291 jmp .Lerror_entry_done
292
293 .Lbstep_iret:
294 @@ -1115,6 +1129,7 @@ ENTRY(error_entry)
295 * Switch to kernel gsbase:
296 */
297 SWAPGS
298 + FENCE_SWAPGS_USER_ENTRY
299
300 /*
301 * Pretend that the exception came from user mode: set up pt_regs
302 @@ -1211,6 +1226,7 @@ ENTRY(nmi)
303 * to switch CR3 here.
304 */
305 cld
306 + FENCE_SWAPGS_USER_ENTRY
307 movq %rsp, %rdx
308 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
309 pushq 5*8(%rdx) /* pt_regs->ss */
310 @@ -1499,6 +1515,7 @@ end_repeat_nmi:
311 movq %rax, %cr3
312 2:
313 #endif
314 + FENCE_SWAPGS_KERNEL_ENTRY
315
316 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
317 call do_nmi
318 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
319 index 06de338be0d8..3a972da155d6 100644
320 --- a/arch/x86/include/asm/cpufeatures.h
321 +++ b/arch/x86/include/asm/cpufeatures.h
322 @@ -192,7 +192,8 @@
323
324 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
325 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
326 -
327 +#define X86_FEATURE_FENCE_SWAPGS_USER ( 7*32+10) /* "" LFENCE in user entry SWAPGS path */
328 +#define X86_FEATURE_FENCE_SWAPGS_KERNEL ( 7*32+11) /* "" LFENCE in kernel entry SWAPGS path */
329 #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
330 #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
331
332 @@ -201,9 +202,6 @@
333
334 #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
335
336 -/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
337 -#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
338 -
339 #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
340 #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
341 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
342 @@ -214,6 +212,7 @@
343 #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
344 #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
345 #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
346 +#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
347
348 /* Virtualization flags: Linux defined, word 8 */
349 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
350 @@ -357,5 +356,6 @@
351 #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
352 #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
353 #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
354 +#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
355
356 #endif /* _ASM_X86_CPUFEATURES_H */
357 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
358 index a4f343ac042e..2a42fef275ad 100644
359 --- a/arch/x86/kernel/cpu/bugs.c
360 +++ b/arch/x86/kernel/cpu/bugs.c
361 @@ -31,6 +31,7 @@
362 #include <asm/intel-family.h>
363 #include <asm/e820.h>
364
365 +static void __init spectre_v1_select_mitigation(void);
366 static void __init spectre_v2_select_mitigation(void);
367 static void __init ssb_select_mitigation(void);
368 static void __init l1tf_select_mitigation(void);
369 @@ -95,17 +96,11 @@ void __init check_bugs(void)
370 if (boot_cpu_has(X86_FEATURE_STIBP))
371 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
372
373 - /* Select the proper spectre mitigation before patching alternatives */
374 + /* Select the proper CPU mitigations before patching alternatives: */
375 + spectre_v1_select_mitigation();
376 spectre_v2_select_mitigation();
377 -
378 - /*
379 - * Select proper mitigation for any exposure to the Speculative Store
380 - * Bypass vulnerability.
381 - */
382 ssb_select_mitigation();
383 -
384 l1tf_select_mitigation();
385 -
386 mds_select_mitigation();
387
388 arch_smt_update();
389 @@ -270,6 +265,98 @@ static int __init mds_cmdline(char *str)
390 }
391 early_param("mds", mds_cmdline);
392
393 +#undef pr_fmt
394 +#define pr_fmt(fmt) "Spectre V1 : " fmt
395 +
396 +enum spectre_v1_mitigation {
397 + SPECTRE_V1_MITIGATION_NONE,
398 + SPECTRE_V1_MITIGATION_AUTO,
399 +};
400 +
401 +static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
402 + SPECTRE_V1_MITIGATION_AUTO;
403 +
404 +static const char * const spectre_v1_strings[] = {
405 + [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
406 + [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
407 +};
408 +
409 +/*
410 + * Does SMAP provide full mitigation against speculative kernel access to
411 + * userspace?
412 + */
413 +static bool smap_works_speculatively(void)
414 +{
415 + if (!boot_cpu_has(X86_FEATURE_SMAP))
416 + return false;
417 +
418 + /*
419 + * On CPUs which are vulnerable to Meltdown, SMAP does not
420 + * prevent speculative access to user data in the L1 cache.
421 + * Consider SMAP to be non-functional as a mitigation on these
422 + * CPUs.
423 + */
424 + if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
425 + return false;
426 +
427 + return true;
428 +}
429 +
430 +static void __init spectre_v1_select_mitigation(void)
431 +{
432 + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
433 + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
434 + return;
435 + }
436 +
437 + if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
438 + /*
439 + * With Spectre v1, a user can speculatively control either
440 + * path of a conditional swapgs with a user-controlled GS
441 + * value. The mitigation is to add lfences to both code paths.
442 + *
443 + * If FSGSBASE is enabled, the user can put a kernel address in
444 + * GS, in which case SMAP provides no protection.
445 + *
446 + * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
447 + * FSGSBASE enablement patches have been merged. ]
448 + *
449 + * If FSGSBASE is disabled, the user can only put a user space
450 + * address in GS. That makes an attack harder, but still
451 + * possible if there's no SMAP protection.
452 + */
453 + if (!smap_works_speculatively()) {
454 + /*
455 + * Mitigation can be provided from SWAPGS itself or
456 + * PTI as the CR3 write in the Meltdown mitigation
457 + * is serializing.
458 + *
459 + * If neither is there, mitigate with an LFENCE to
460 + * stop speculation through swapgs.
461 + */
462 + if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
463 + !boot_cpu_has(X86_FEATURE_KAISER))
464 + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
465 +
466 + /*
467 + * Enable lfences in the kernel entry (non-swapgs)
468 + * paths, to prevent user entry from speculatively
469 + * skipping swapgs.
470 + */
471 + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
472 + }
473 + }
474 +
475 + pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
476 +}
477 +
478 +static int __init nospectre_v1_cmdline(char *str)
479 +{
480 + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
481 + return 0;
482 +}
483 +early_param("nospectre_v1", nospectre_v1_cmdline);
484 +
485 #undef pr_fmt
486 #define pr_fmt(fmt) "Spectre V2 : " fmt
487
488 @@ -1265,7 +1352,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
489 break;
490
491 case X86_BUG_SPECTRE_V1:
492 - return sprintf(buf, "Mitigation: __user pointer sanitization\n");
493 + return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
494
495 case X86_BUG_SPECTRE_V2:
496 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
497 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
498 index cda130dc56b9..12fa16051871 100644
499 --- a/arch/x86/kernel/cpu/common.c
500 +++ b/arch/x86/kernel/cpu/common.c
501 @@ -897,6 +897,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
502 #define NO_L1TF BIT(3)
503 #define NO_MDS BIT(4)
504 #define MSBDS_ONLY BIT(5)
505 +#define NO_SWAPGS BIT(6)
506
507 #define VULNWL(_vendor, _family, _model, _whitelist) \
508 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
509 @@ -920,29 +921,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
510 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
511 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
512
513 - VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
514 - VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
515 - VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
516 - VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
517 - VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
518 - VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
519 + VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
520 + VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
521 + VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
522 + VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
523 + VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
524 + VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
525
526 VULNWL_INTEL(CORE_YONAH, NO_SSB),
527
528 - VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
529 + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
530
531 - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
532 - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
533 - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
534 + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
535 + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
536 + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
537 +
538 + /*
539 + * Technically, swapgs isn't serializing on AMD (despite it previously
540 + * being documented as such in the APM). But according to AMD, %gs is
541 + * updated non-speculatively, and the issuing of %gs-relative memory
542 + * operands will be blocked until the %gs update completes, which is
543 + * good enough for our purposes.
544 + */
545
546 /* AMD Family 0xf - 0x12 */
547 - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
548 - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
549 - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
550 - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
551 + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
552 + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
553 + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
554 + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
555
556 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
557 - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
558 + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
559 {}
560 };
561
562 @@ -979,6 +988,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
563 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
564 }
565
566 + if (!cpu_matches(NO_SWAPGS))
567 + setup_force_cpu_bug(X86_BUG_SWAPGS);
568 +
569 if (cpu_matches(NO_MELTDOWN))
570 return;
571
572 diff --git a/block/blk-core.c b/block/blk-core.c
573 index 77b99bf16c83..bdb906bbfe19 100644
574 --- a/block/blk-core.c
575 +++ b/block/blk-core.c
576 @@ -881,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
577
578 fail:
579 blk_free_flush_queue(q->fq);
580 + q->fq = NULL;
581 return NULL;
582 }
583 EXPORT_SYMBOL(blk_init_allocated_queue);
584 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
585 index b2756765950e..fe47c924dc64 100644
586 --- a/drivers/atm/iphase.c
587 +++ b/drivers/atm/iphase.c
588 @@ -63,6 +63,7 @@
589 #include <asm/byteorder.h>
590 #include <linux/vmalloc.h>
591 #include <linux/jiffies.h>
592 +#include <linux/nospec.h>
593 #include "iphase.h"
594 #include "suni.h"
595 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
596 @@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
597 }
598 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
599 board = ia_cmds.status;
600 - if ((board < 0) || (board > iadev_count))
601 - board = 0;
602 +
603 + if ((board < 0) || (board > iadev_count))
604 + board = 0;
605 + board = array_index_nospec(board, iadev_count + 1);
606 +
607 iadev = ia_dev[board];
608 switch (ia_cmds.cmd) {
609 case MEMDUMP:
610 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
611 index 6f4c84d824e6..25c006338100 100644
612 --- a/drivers/hid/hid-ids.h
613 +++ b/drivers/hid/hid-ids.h
614 @@ -509,6 +509,7 @@
615 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
616 #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
617 #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
618 +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
619
620 #define USB_VENDOR_ID_HUION 0x256c
621 #define USB_DEVICE_ID_HUION_TABLET 0x006e
622 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
623 index 617ae294a318..e851926be8b0 100644
624 --- a/drivers/hid/usbhid/hid-quirks.c
625 +++ b/drivers/hid/usbhid/hid-quirks.c
626 @@ -98,6 +98,7 @@ static const struct hid_blacklist {
627 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
628 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
629 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
630 + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641, HID_QUIRK_ALWAYS_POLL },
631 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
632 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
633 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
634 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
635 index b1ad378cb2a6..6c3bf8846b52 100644
636 --- a/drivers/hid/wacom_wac.c
637 +++ b/drivers/hid/wacom_wac.c
638 @@ -529,14 +529,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
639 */
640 buttons = (data[4] << 1) | (data[3] & 0x01);
641 } else if (features->type == CINTIQ_COMPANION_2) {
642 - /* d-pad right -> data[4] & 0x10
643 - * d-pad up -> data[4] & 0x20
644 - * d-pad left -> data[4] & 0x40
645 - * d-pad down -> data[4] & 0x80
646 - * d-pad center -> data[3] & 0x01
647 + /* d-pad right -> data[2] & 0x10
648 + * d-pad up -> data[2] & 0x20
649 + * d-pad left -> data[2] & 0x40
650 + * d-pad down -> data[2] & 0x80
651 + * d-pad center -> data[1] & 0x01
652 */
653 buttons = ((data[2] >> 4) << 7) |
654 - ((data[1] & 0x04) << 6) |
655 + ((data[1] & 0x04) << 4) |
656 ((data[2] & 0x0F) << 2) |
657 (data[1] & 0x03);
658 } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
659 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
660 index 978b8d94f9a4..1baa25e82bdd 100644
661 --- a/drivers/infiniband/core/addr.c
662 +++ b/drivers/infiniband/core/addr.c
663 @@ -735,14 +735,13 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
664 struct net_device *dev;
665
666 union {
667 - struct sockaddr _sockaddr;
668 struct sockaddr_in _sockaddr_in;
669 struct sockaddr_in6 _sockaddr_in6;
670 } sgid_addr, dgid_addr;
671
672
673 - rdma_gid2ip(&sgid_addr._sockaddr, sgid);
674 - rdma_gid2ip(&dgid_addr._sockaddr, dgid);
675 + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
676 + rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
677
678 memset(&dev_addr, 0, sizeof(dev_addr));
679 if (if_index)
680 @@ -751,8 +750,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
681
682 ctx.addr = &dev_addr;
683 init_completion(&ctx.comp);
684 - ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
685 - &dev_addr, 1000, resolve_cb, &ctx);
686 + ret = rdma_resolve_ip(&self, (struct sockaddr *)&sgid_addr,
687 + (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
688 + resolve_cb, &ctx);
689 if (ret)
690 return ret;
691
692 @@ -782,16 +782,15 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
693 int ret = 0;
694 struct rdma_dev_addr dev_addr;
695 union {
696 - struct sockaddr _sockaddr;
697 struct sockaddr_in _sockaddr_in;
698 struct sockaddr_in6 _sockaddr_in6;
699 } gid_addr;
700
701 - rdma_gid2ip(&gid_addr._sockaddr, sgid);
702 + rdma_gid2ip((struct sockaddr *)&gid_addr, sgid);
703
704 memset(&dev_addr, 0, sizeof(dev_addr));
705 dev_addr.net = &init_net;
706 - ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
707 + ret = rdma_translate_ip((struct sockaddr *)&gid_addr, &dev_addr, vlan_id);
708 if (ret)
709 return ret;
710
711 diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
712 index 4baf3b864a57..5879a06ada93 100644
713 --- a/drivers/infiniband/core/sa_query.c
714 +++ b/drivers/infiniband/core/sa_query.c
715 @@ -1109,7 +1109,6 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
716 .net = rec->net ? rec->net :
717 &init_net};
718 union {
719 - struct sockaddr _sockaddr;
720 struct sockaddr_in _sockaddr_in;
721 struct sockaddr_in6 _sockaddr_in6;
722 } sgid_addr, dgid_addr;
723 @@ -1117,12 +1116,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
724 if (!device->get_netdev)
725 return -EOPNOTSUPP;
726
727 - rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
728 - rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
729 + rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
730 + rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
731
732 /* validate the route */
733 - ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
734 - &dgid_addr._sockaddr, &dev_addr);
735 + ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
736 + (struct sockaddr *)&dgid_addr,
737 + &dev_addr);
738 if (ret)
739 return ret;
740
741 diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
742 index 797362a297b2..35efd40ba47f 100644
743 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
744 +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
745 @@ -82,7 +82,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
746 u8 nxthdr = 0x11;
747 struct iphdr ipv4;
748 union {
749 - struct sockaddr _sockaddr;
750 struct sockaddr_in _sockaddr_in;
751 struct sockaddr_in6 _sockaddr_in6;
752 } sgid_addr, dgid_addr;
753 @@ -131,9 +130,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
754 ipv4.tot_len = htons(0);
755 ipv4.ttl = attr->grh.hop_limit;
756 ipv4.protocol = nxthdr;
757 - rdma_gid2ip(&sgid_addr._sockaddr, sgid);
758 + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
759 ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
760 - rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
761 + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
762 ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
763 memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
764 } else {
765 diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
766 index 67fc0b6857e1..edfa22847724 100644
767 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
768 +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
769 @@ -2505,7 +2505,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
770 u32 vlan_id = 0xFFFF;
771 u8 mac_addr[6], hdr_type;
772 union {
773 - struct sockaddr _sockaddr;
774 struct sockaddr_in _sockaddr_in;
775 struct sockaddr_in6 _sockaddr_in6;
776 } sgid_addr, dgid_addr;
777 @@ -2550,8 +2549,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
778
779 hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
780 if (hdr_type == RDMA_NETWORK_IPV4) {
781 - rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
782 - rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
783 + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
784 + rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
785 memcpy(&cmd->params.dgid[0],
786 &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
787 memcpy(&cmd->params.sgid[0],
788 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
789 index 6167bb0c71ed..53a71166e784 100644
790 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
791 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
792 @@ -1939,7 +1939,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
793 }
794
795 /* select a non-FCoE queue */
796 - return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
797 + return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
798 }
799
800 void bnx2x_set_num_queues(struct bnx2x *bp)
801 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
802 index 524fff2b3dc6..2e6d6dfdcc80 100644
803 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
804 +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
805 @@ -207,7 +207,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
806 struct mlx5_interface *intf;
807
808 mutex_lock(&mlx5_intf_mutex);
809 - list_for_each_entry(intf, &intf_list, list)
810 + list_for_each_entry_reverse(intf, &intf_list, list)
811 mlx5_remove_device(intf, priv);
812 list_del(&priv->dev_list);
813 mutex_unlock(&mlx5_intf_mutex);
814 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
815 index 8c93ed5c9763..fa8f7c40a384 100644
816 --- a/drivers/net/ppp/pppoe.c
817 +++ b/drivers/net/ppp/pppoe.c
818 @@ -1134,6 +1134,9 @@ static const struct proto_ops pppoe_ops = {
819 .recvmsg = pppoe_recvmsg,
820 .mmap = sock_no_mmap,
821 .ioctl = pppox_ioctl,
822 +#ifdef CONFIG_COMPAT
823 + .compat_ioctl = pppox_compat_ioctl,
824 +#endif
825 };
826
827 static const struct pppox_proto pppoe_proto = {
828 diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
829 index b9c8be6283d3..50856f9fe08a 100644
830 --- a/drivers/net/ppp/pppox.c
831 +++ b/drivers/net/ppp/pppox.c
832 @@ -22,6 +22,7 @@
833 #include <linux/string.h>
834 #include <linux/module.h>
835 #include <linux/kernel.h>
836 +#include <linux/compat.h>
837 #include <linux/errno.h>
838 #include <linux/netdevice.h>
839 #include <linux/net.h>
840 @@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
841
842 EXPORT_SYMBOL(pppox_ioctl);
843
844 +#ifdef CONFIG_COMPAT
845 +int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
846 +{
847 + if (cmd == PPPOEIOCSFWD32)
848 + cmd = PPPOEIOCSFWD;
849 +
850 + return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
851 +}
852 +
853 +EXPORT_SYMBOL(pppox_compat_ioctl);
854 +#endif
855 +
856 static int pppox_create(struct net *net, struct socket *sock, int protocol,
857 int kern)
858 {
859 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
860 index 5a8befdfa5e4..fa14a67fb09a 100644
861 --- a/drivers/net/ppp/pptp.c
862 +++ b/drivers/net/ppp/pptp.c
863 @@ -638,6 +638,9 @@ static const struct proto_ops pptp_ops = {
864 .recvmsg = sock_no_recvmsg,
865 .mmap = sock_no_mmap,
866 .ioctl = pppox_ioctl,
867 +#ifdef CONFIG_COMPAT
868 + .compat_ioctl = pppox_compat_ioctl,
869 +#endif
870 };
871
872 static const struct pppox_proto pppox_pptp_proto = {
873 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
874 index cc3994d4e7bc..3c2f34db937b 100644
875 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
876 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
877 @@ -1984,7 +1984,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
878 */
879 static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
880 {
881 - return (struct fcoe_rport *)(rdata + 1);
882 + return container_of(rdata, struct fcoe_rport, rdata);
883 }
884
885 /**
886 @@ -2244,7 +2244,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
887 */
888 static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
889 struct sk_buff *skb,
890 - struct fc_rport_priv *rdata)
891 + struct fcoe_rport *frport)
892 {
893 struct fip_header *fiph;
894 struct fip_desc *desc = NULL;
895 @@ -2252,16 +2252,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
896 struct fip_wwn_desc *wwn = NULL;
897 struct fip_vn_desc *vn = NULL;
898 struct fip_size_desc *size = NULL;
899 - struct fcoe_rport *frport;
900 size_t rlen;
901 size_t dlen;
902 u32 desc_mask = 0;
903 u32 dtype;
904 u8 sub;
905
906 - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
907 - frport = fcoe_ctlr_rport(rdata);
908 -
909 fiph = (struct fip_header *)skb->data;
910 frport->flags = ntohs(fiph->fip_flags);
911
912 @@ -2324,15 +2320,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
913 if (dlen != sizeof(struct fip_wwn_desc))
914 goto len_err;
915 wwn = (struct fip_wwn_desc *)desc;
916 - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
917 + frport->rdata.ids.node_name =
918 + get_unaligned_be64(&wwn->fd_wwn);
919 break;
920 case FIP_DT_VN_ID:
921 if (dlen != sizeof(struct fip_vn_desc))
922 goto len_err;
923 vn = (struct fip_vn_desc *)desc;
924 memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
925 - rdata->ids.port_id = ntoh24(vn->fd_fc_id);
926 - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
927 + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
928 + frport->rdata.ids.port_name =
929 + get_unaligned_be64(&vn->fd_wwpn);
930 break;
931 case FIP_DT_FC4F:
932 if (dlen != sizeof(struct fip_fc4_feat))
933 @@ -2670,16 +2668,13 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
934 {
935 struct fip_header *fiph;
936 enum fip_vn2vn_subcode sub;
937 - struct {
938 - struct fc_rport_priv rdata;
939 - struct fcoe_rport frport;
940 - } buf;
941 + struct fcoe_rport frport = { };
942 int rc;
943
944 fiph = (struct fip_header *)skb->data;
945 sub = fiph->fip_subcode;
946
947 - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
948 + rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
949 if (rc) {
950 LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
951 goto drop;
952 @@ -2688,19 +2683,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
953 mutex_lock(&fip->ctlr_mutex);
954 switch (sub) {
955 case FIP_SC_VN_PROBE_REQ:
956 - fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
957 + fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
958 break;
959 case FIP_SC_VN_PROBE_REP:
960 - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
961 + fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
962 break;
963 case FIP_SC_VN_CLAIM_NOTIFY:
964 - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
965 + fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
966 break;
967 case FIP_SC_VN_CLAIM_REP:
968 - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
969 + fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
970 break;
971 case FIP_SC_VN_BEACON:
972 - fcoe_ctlr_vn_beacon(fip, &buf.rdata);
973 + fcoe_ctlr_vn_beacon(fip, &frport.rdata);
974 break;
975 default:
976 LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
977 @@ -2724,22 +2719,18 @@ drop:
978 */
979 static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
980 struct sk_buff *skb,
981 - struct fc_rport_priv *rdata)
982 + struct fcoe_rport *frport)
983 {
984 struct fip_header *fiph;
985 struct fip_desc *desc = NULL;
986 struct fip_mac_desc *macd = NULL;
987 struct fip_wwn_desc *wwn = NULL;
988 - struct fcoe_rport *frport;
989 size_t rlen;
990 size_t dlen;
991 u32 desc_mask = 0;
992 u32 dtype;
993 u8 sub;
994
995 - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
996 - frport = fcoe_ctlr_rport(rdata);
997 -
998 fiph = (struct fip_header *)skb->data;
999 frport->flags = ntohs(fiph->fip_flags);
1000
1001 @@ -2793,7 +2784,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
1002 if (dlen != sizeof(struct fip_wwn_desc))
1003 goto len_err;
1004 wwn = (struct fip_wwn_desc *)desc;
1005 - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
1006 + frport->rdata.ids.node_name =
1007 + get_unaligned_be64(&wwn->fd_wwn);
1008 break;
1009 default:
1010 LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
1011 @@ -2904,22 +2896,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1012 {
1013 struct fip_header *fiph;
1014 enum fip_vlan_subcode sub;
1015 - struct {
1016 - struct fc_rport_priv rdata;
1017 - struct fcoe_rport frport;
1018 - } buf;
1019 + struct fcoe_rport frport = { };
1020 int rc;
1021
1022 fiph = (struct fip_header *)skb->data;
1023 sub = fiph->fip_subcode;
1024 - rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
1025 + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
1026 if (rc) {
1027 LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
1028 goto drop;
1029 }
1030 mutex_lock(&fip->ctlr_mutex);
1031 if (sub == FIP_SC_VL_REQ)
1032 - fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
1033 + fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
1034 mutex_unlock(&fip->ctlr_mutex);
1035
1036 drop:
1037 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
1038 index 97aeaddd600d..70e2958a69a0 100644
1039 --- a/drivers/scsi/libfc/fc_rport.c
1040 +++ b/drivers/scsi/libfc/fc_rport.c
1041 @@ -127,12 +127,15 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
1042 u32 port_id)
1043 {
1044 struct fc_rport_priv *rdata;
1045 + size_t rport_priv_size = sizeof(*rdata);
1046
1047 rdata = lport->tt.rport_lookup(lport, port_id);
1048 if (rdata)
1049 return rdata;
1050
1051 - rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
1052 + if (lport->rport_priv_size > 0)
1053 + rport_priv_size = lport->rport_priv_size;
1054 + rdata = kzalloc(rport_priv_size, GFP_KERNEL);
1055 if (!rdata)
1056 return NULL;
1057
1058 diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
1059 index 25abf2d1732a..eab27d41ba83 100644
1060 --- a/drivers/spi/spi-bcm2835.c
1061 +++ b/drivers/spi/spi-bcm2835.c
1062 @@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
1063 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
1064
1065 /* handle all the 3-wire mode */
1066 - if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
1067 + if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
1068 + tfr->rx_buf != master->dummy_rx)
1069 cs |= BCM2835_SPI_CS_REN;
1070 else
1071 cs &= ~BCM2835_SPI_CS_REN;
1072 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
1073 index 93c8e4a4bbd3..4b7da4409c60 100644
1074 --- a/fs/compat_ioctl.c
1075 +++ b/fs/compat_ioctl.c
1076 @@ -1038,9 +1038,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
1077 COMPATIBLE_IOCTL(PPPIOCATTCHAN)
1078 COMPATIBLE_IOCTL(PPPIOCGCHAN)
1079 COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
1080 -/* PPPOX */
1081 -COMPATIBLE_IOCTL(PPPOEIOCSFWD)
1082 -COMPATIBLE_IOCTL(PPPOEIOCDFWD)
1083 /* Big A */
1084 /* sparc only */
1085 /* Big Q for sound/OSS */
1086 diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
1087 index aa2e19182d99..51c5bd64bd00 100644
1088 --- a/include/linux/ceph/ceph_debug.h
1089 +++ b/include/linux/ceph/ceph_debug.h
1090 @@ -3,6 +3,8 @@
1091
1092 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1093
1094 +#include <linux/string.h>
1095 +
1096 #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
1097
1098 /*
1099 @@ -12,12 +14,10 @@
1100 */
1101
1102 # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
1103 -extern const char *ceph_file_part(const char *s, int len);
1104 # define dout(fmt, ...) \
1105 pr_debug("%.*s %12.12s:%-4d : " fmt, \
1106 8 - (int)sizeof(KBUILD_MODNAME), " ", \
1107 - ceph_file_part(__FILE__, sizeof(__FILE__)), \
1108 - __LINE__, ##__VA_ARGS__)
1109 + kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
1110 # else
1111 /* faux printk call just to see any compiler warnings. */
1112 # define dout(fmt, ...) do { \
1113 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
1114 index ba7a9b0c7c57..24e9b360da65 100644
1115 --- a/include/linux/if_pppox.h
1116 +++ b/include/linux/if_pppox.h
1117 @@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
1118 extern void unregister_pppox_proto(int proto_num);
1119 extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
1120 extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
1121 +extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
1122 +
1123 +#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
1124
1125 /* PPPoX socket states */
1126 enum {
1127 diff --git a/include/net/tcp.h b/include/net/tcp.h
1128 index 1eda31f7f013..a474213ca015 100644
1129 --- a/include/net/tcp.h
1130 +++ b/include/net/tcp.h
1131 @@ -1595,6 +1595,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
1132 tcp_sk(sk)->highest_sack = NULL;
1133 }
1134
1135 +static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1136 +{
1137 + struct sk_buff *skb = tcp_write_queue_head(sk);
1138 +
1139 + if (skb == tcp_send_head(sk))
1140 + skb = NULL;
1141 +
1142 + return skb;
1143 +}
1144 +
1145 +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1146 +{
1147 + struct sk_buff *skb = tcp_send_head(sk);
1148 +
1149 + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
1150 +}
1151 +
1152 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1153 {
1154 __skb_queue_tail(&sk->sk_write_queue, skb);
1155 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
1156 index 722d3264d3bf..6be92eede5c0 100644
1157 --- a/include/scsi/libfcoe.h
1158 +++ b/include/scsi/libfcoe.h
1159 @@ -241,6 +241,7 @@ struct fcoe_fcf {
1160 * @vn_mac: VN_Node assigned MAC address for data
1161 */
1162 struct fcoe_rport {
1163 + struct fc_rport_priv rdata;
1164 unsigned long time;
1165 u16 fcoe_len;
1166 u16 flags;
1167 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1168 index 3626174456b7..80c81c7e3cf9 100644
1169 --- a/net/bridge/br_multicast.c
1170 +++ b/net/bridge/br_multicast.c
1171 @@ -1489,6 +1489,9 @@ br_multicast_leave_group(struct net_bridge *br,
1172 if (p->port != port)
1173 continue;
1174
1175 + if (p->flags & MDB_PG_FLAGS_PERMANENT)
1176 + break;
1177 +
1178 rcu_assign_pointer(*pp, p->next);
1179 hlist_del_init(&p->mglist);
1180 del_timer(&p->timer);
1181 diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
1182 index b6de4f457161..5172caac645c 100644
1183 --- a/net/bridge/br_vlan.c
1184 +++ b/net/bridge/br_vlan.c
1185 @@ -622,6 +622,11 @@ void br_vlan_flush(struct net_bridge *br)
1186
1187 ASSERT_RTNL();
1188
1189 + /* delete auto-added default pvid local fdb before flushing vlans
1190 + * otherwise it will be leaked on bridge device init failure
1191 + */
1192 + br_fdb_delete_by_port(br, NULL, 0, 1);
1193 +
1194 vg = br_vlan_group(br);
1195 __vlan_flush(vg);
1196 RCU_INIT_POINTER(br->vlgrp, NULL);
1197 diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
1198 index bf0294cf4d22..18c4b34bd6e0 100644
1199 --- a/net/ceph/ceph_common.c
1200 +++ b/net/ceph/ceph_common.c
1201 @@ -45,19 +45,6 @@ bool libceph_compatible(void *data)
1202 }
1203 EXPORT_SYMBOL(libceph_compatible);
1204
1205 -/*
1206 - * find filename portion of a path (/foo/bar/baz -> baz)
1207 - */
1208 -const char *ceph_file_part(const char *s, int len)
1209 -{
1210 - const char *e = s + len;
1211 -
1212 - while (e != s && *(e-1) != '/')
1213 - e--;
1214 - return e;
1215 -}
1216 -EXPORT_SYMBOL(ceph_file_part);
1217 -
1218 const char *ceph_msg_type_name(int type)
1219 {
1220 switch (type) {
1221 diff --git a/net/core/dev.c b/net/core/dev.c
1222 index f693afe608d7..08bcbce16e12 100644
1223 --- a/net/core/dev.c
1224 +++ b/net/core/dev.c
1225 @@ -8296,6 +8296,8 @@ static void __net_exit default_device_exit(struct net *net)
1226
1227 /* Push remaining network devices to init_net */
1228 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
1229 + if (__dev_get_by_name(&init_net, fb_name))
1230 + snprintf(fb_name, IFNAMSIZ, "dev%%d");
1231 err = dev_change_net_namespace(dev, &init_net, fb_name);
1232 if (err) {
1233 pr_emerg("%s: failed to move %s to init_net: %d\n",
1234 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1235 index 0c195b0f4216..9ddb05b98312 100644
1236 --- a/net/ipv4/tcp_output.c
1237 +++ b/net/ipv4/tcp_output.c
1238 @@ -1175,6 +1175,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1239 struct tcp_sock *tp = tcp_sk(sk);
1240 struct sk_buff *buff;
1241 int nsize, old_factor;
1242 + long limit;
1243 int nlen;
1244 u8 flags;
1245
1246 @@ -1185,7 +1186,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1247 if (nsize < 0)
1248 nsize = 0;
1249
1250 - if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) {
1251 + /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1252 + * We need some allowance to not penalize applications setting small
1253 + * SO_SNDBUF values.
1254 + * Also allow first and last skb in retransmit queue to be split.
1255 + */
1256 + limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1257 + if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1258 + skb != tcp_rtx_queue_head(sk) &&
1259 + skb != tcp_rtx_queue_tail(sk))) {
1260 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1261 return -ENOMEM;
1262 }
1263 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1264 index 42f363661d25..cc28b8646986 100644
1265 --- a/net/ipv6/ip6_tunnel.c
1266 +++ b/net/ipv6/ip6_tunnel.c
1267 @@ -1275,11 +1275,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1268 fl6.flowi6_mark = skb->mark;
1269 }
1270
1271 + dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1272 +
1273 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1274 return -1;
1275
1276 - dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1277 -
1278 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1279
1280 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1281 @@ -1362,11 +1362,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1282 fl6.flowi6_mark = skb->mark;
1283 }
1284
1285 + dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1286 +
1287 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1288 return -1;
1289
1290 - dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1291 -
1292 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1293
1294 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1295 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1296 index 9b214f313cc0..16b63e60396f 100644
1297 --- a/net/l2tp/l2tp_ppp.c
1298 +++ b/net/l2tp/l2tp_ppp.c
1299 @@ -1790,6 +1790,9 @@ static const struct proto_ops pppol2tp_ops = {
1300 .recvmsg = pppol2tp_recvmsg,
1301 .mmap = sock_no_mmap,
1302 .ioctl = pppox_ioctl,
1303 +#ifdef CONFIG_COMPAT
1304 + .compat_ioctl = pppox_compat_ioctl,
1305 +#endif
1306 };
1307
1308 static const struct pppox_proto pppol2tp_proto = {
1309 diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1310 index d2932dc4c83d..36e4dcdac8dc 100644
1311 --- a/net/sched/act_ife.c
1312 +++ b/net/sched/act_ife.c
1313 @@ -477,6 +477,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1314 int ret = 0;
1315 int err;
1316
1317 + if (!nla)
1318 + return -EINVAL;
1319 +
1320 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
1321 if (err < 0)
1322 return err;
1323 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
1324 index 5bfa79ee657c..17a0838f8074 100644
1325 --- a/net/sched/sch_codel.c
1326 +++ b/net/sched/sch_codel.c
1327 @@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
1328 struct Qdisc *sch = ctx;
1329 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1330
1331 - if (skb)
1332 + if (skb) {
1333 sch->qstats.backlog -= qdisc_pkt_len(skb);
1334 -
1335 - prefetch(&skb->end); /* we'll need skb_shinfo() */
1336 + prefetch(&skb->end); /* we'll need skb_shinfo() */
1337 + }
1338 return skb;
1339 }
1340
1341 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1342 index b7c539a51da3..63a913b23873 100644
1343 --- a/net/tipc/netlink_compat.c
1344 +++ b/net/tipc/netlink_compat.c
1345 @@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
1346 int rep_type;
1347 int rep_size;
1348 int req_type;
1349 + int req_size;
1350 struct net *net;
1351 struct sk_buff *rep;
1352 struct tlv_desc *req;
1353 @@ -252,7 +253,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
1354 int err;
1355 struct sk_buff *arg;
1356
1357 - if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
1358 + if (msg->req_type && (!msg->req_size ||
1359 + !TLV_CHECK_TYPE(msg->req, msg->req_type)))
1360 return -EINVAL;
1361
1362 msg->rep = tipc_tlv_alloc(msg->rep_size);
1363 @@ -345,7 +347,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
1364 {
1365 int err;
1366
1367 - if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
1368 + if (msg->req_type && (!msg->req_size ||
1369 + !TLV_CHECK_TYPE(msg->req, msg->req_type)))
1370 return -EINVAL;
1371
1372 err = __tipc_nl_compat_doit(cmd, msg);
1373 @@ -1267,8 +1270,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1374 goto send;
1375 }
1376
1377 - len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1378 - if (!len || !TLV_OK(msg.req, len)) {
1379 + msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1380 + if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
1381 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1382 err = -EOPNOTSUPP;
1383 goto send;
1384 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
1385 index 95326c6a7a24..09782ff427d0 100644
1386 --- a/tools/objtool/check.c
1387 +++ b/tools/objtool/check.c
1388 @@ -165,6 +165,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
1389 "__reiserfs_panic",
1390 "lbug_with_loc",
1391 "fortify_panic",
1392 + "machine_real_restart",
1393 + "rewind_stack_do_exit",
1394 };
1395
1396 if (func->bind == STB_WEAK)