Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0235-4.9.136-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3245 - (show annotations) (download)
Mon Nov 12 12:10:50 2018 UTC (5 years, 5 months ago) by niro
File size: 182625 byte(s)
-linux-4.9.136
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index f9f67be8d3c3..c708a50b060e 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -313,7 +313,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 This facility can be used to prevent such uncontrolled
7 GPE floodings.
8 Format: <int>
9 - Support masking of GPEs numbered from 0x00 to 0x7f.
10
11 acpi_no_auto_serialize [HW,ACPI]
12 Disable auto-serialization of AML methods
13 diff --git a/Makefile b/Makefile
14 index 3678e4d19ebc..79b8f3a44f74 100644
15 --- a/Makefile
16 +++ b/Makefile
17 @@ -1,6 +1,6 @@
18 VERSION = 4
19 PATCHLEVEL = 9
20 -SUBLEVEL = 135
21 +SUBLEVEL = 136
22 EXTRAVERSION =
23 NAME = Roaring Lionus
24
25 diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
26 index 9d5dc4fda3c1..3f7d1b74c5e0 100644
27 --- a/arch/arm/boot/compressed/efi-header.S
28 +++ b/arch/arm/boot/compressed/efi-header.S
29 @@ -17,14 +17,12 @@
30 @ there.
31 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
32 #else
33 - mov r0, r0
34 + W(mov) r0, r0
35 #endif
36 .endm
37
38 .macro __EFI_HEADER
39 #ifdef CONFIG_EFI_STUB
40 - b __efi_start
41 -
42 .set start_offset, __efi_start - start
43 .org start + 0x3c
44 @
45 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
46 index fc6d541549a2..2d7f2bb0d66a 100644
47 --- a/arch/arm/boot/compressed/head.S
48 +++ b/arch/arm/boot/compressed/head.S
49 @@ -130,19 +130,22 @@ start:
50 .rept 7
51 __nop
52 .endr
53 - ARM( mov r0, r0 )
54 - ARM( b 1f )
55 - THUMB( badr r12, 1f )
56 - THUMB( bx r12 )
57 +#ifndef CONFIG_THUMB2_KERNEL
58 + mov r0, r0
59 +#else
60 + AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
61 + M_CLASS( nop.w ) @ M: already in Thumb2 mode
62 + .thumb
63 +#endif
64 + W(b) 1f
65
66 .word _magic_sig @ Magic numbers to help the loader
67 .word _magic_start @ absolute load/run zImage address
68 .word _magic_end @ zImage end address
69 .word 0x04030201 @ endianness flag
70
71 - THUMB( .thumb )
72 -1: __EFI_HEADER
73 -
74 + __EFI_HEADER
75 +1:
76 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
77 AR_CLASS( mrs r9, cpsr )
78 #ifdef CONFIG_ARM_VIRT_EXT
79 diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
80 index c51b88ee3cec..31563007772c 100644
81 --- a/arch/arm/boot/dts/bcm283x.dtsi
82 +++ b/arch/arm/boot/dts/bcm283x.dtsi
83 @@ -3,6 +3,11 @@
84 #include <dt-bindings/clock/bcm2835-aux.h>
85 #include <dt-bindings/gpio/gpio.h>
86
87 +/* firmware-provided startup stubs live here, where the secondary CPUs are
88 + * spinning.
89 + */
90 +/memreserve/ 0x00000000 0x00001000;
91 +
92 /* This include file covers the common peripherals and configuration between
93 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
94 * bcm2835.dtsi and bcm2836.dtsi.
95 diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
96 index d0560e8cd6de..547369c69e96 100644
97 --- a/arch/arm/boot/dts/bcm63138.dtsi
98 +++ b/arch/arm/boot/dts/bcm63138.dtsi
99 @@ -105,21 +105,23 @@
100 global_timer: timer@1e200 {
101 compatible = "arm,cortex-a9-global-timer";
102 reg = <0x1e200 0x20>;
103 - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
104 + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
105 clocks = <&axi_clk>;
106 };
107
108 local_timer: local-timer@1e600 {
109 compatible = "arm,cortex-a9-twd-timer";
110 reg = <0x1e600 0x20>;
111 - interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
112 + interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
113 + IRQ_TYPE_EDGE_RISING)>;
114 clocks = <&axi_clk>;
115 };
116
117 twd_watchdog: watchdog@1e620 {
118 compatible = "arm,cortex-a9-twd-wdt";
119 reg = <0x1e620 0x20>;
120 - interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
121 + interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
122 + IRQ_TYPE_LEVEL_HIGH)>;
123 };
124
125 armpll: armpll {
126 @@ -157,7 +159,7 @@
127 serial0: serial@600 {
128 compatible = "brcm,bcm6345-uart";
129 reg = <0x600 0x1b>;
130 - interrupts = <GIC_SPI 32 0>;
131 + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
132 clocks = <&periph_clk>;
133 clock-names = "periph";
134 status = "disabled";
135 @@ -166,7 +168,7 @@
136 serial1: serial@620 {
137 compatible = "brcm,bcm6345-uart";
138 reg = <0x620 0x1b>;
139 - interrupts = <GIC_SPI 33 0>;
140 + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
141 clocks = <&periph_clk>;
142 clock-names = "periph";
143 status = "disabled";
144 @@ -179,7 +181,7 @@
145 reg = <0x2000 0x600>, <0xf0 0x10>;
146 reg-names = "nand", "nand-int-base";
147 status = "disabled";
148 - interrupts = <GIC_SPI 38 0>;
149 + interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
150 interrupt-names = "nand";
151 };
152
153 diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
154 index c05e7cfd0cbc..c8a6a6868c46 100644
155 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
156 +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
157 @@ -130,6 +130,17 @@
158 };
159 };
160
161 +&cpu0 {
162 + /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
163 + operating-points = <
164 + /* kHz uV */
165 + 166666 850000
166 + 400000 900000
167 + 800000 1050000
168 + 1000000 1200000
169 + >;
170 +};
171 +
172 &esdhc1 {
173 pinctrl-names = "default";
174 pinctrl-0 = <&pinctrl_esdhc1>;
175 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
176 index ff0eed23ddf1..66e5d8765601 100644
177 --- a/arch/arm/mm/ioremap.c
178 +++ b/arch/arm/mm/ioremap.c
179 @@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
180
181 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
182 {
183 - BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
184 + BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
185
186 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
187 PCI_IO_VIRT_BASE + offset + SZ_64K,
188 diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
189 index 77429d1622b3..711d9b8465b8 100644
190 --- a/arch/mips/include/uapi/asm/inst.h
191 +++ b/arch/mips/include/uapi/asm/inst.h
192 @@ -964,7 +964,7 @@ struct mm16_r3_format { /* Load from global pointer format */
193 struct mm16_r5_format { /* Load/store from stack pointer format */
194 __BITFIELD_FIELD(unsigned int opcode : 6,
195 __BITFIELD_FIELD(unsigned int rt : 5,
196 - __BITFIELD_FIELD(signed int simmediate : 5,
197 + __BITFIELD_FIELD(unsigned int imm : 5,
198 __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
199 ;))))
200 };
201 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
202 index ba315e523b33..1cc133e7026f 100644
203 --- a/arch/mips/kernel/process.c
204 +++ b/arch/mips/kernel/process.c
205 @@ -212,7 +212,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
206 if (ip->mm16_r5_format.rt != 31)
207 return 0;
208
209 - *poff = ip->mm16_r5_format.simmediate;
210 + *poff = ip->mm16_r5_format.imm;
211 *poff = (*poff << 2) / sizeof(ulong);
212 return 1;
213
214 @@ -346,6 +346,7 @@ static int get_frame_info(struct mips_frame_info *info)
215 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
216 union mips_instruction insn, *ip, *ip_end;
217 const unsigned int max_insns = 128;
218 + unsigned int last_insn_size = 0;
219 unsigned int i;
220
221 info->pc_offset = -1;
222 @@ -357,15 +358,19 @@ static int get_frame_info(struct mips_frame_info *info)
223
224 ip_end = (void *)ip + info->func_size;
225
226 - for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
227 + for (i = 0; i < max_insns && ip < ip_end; i++) {
228 + ip = (void *)ip + last_insn_size;
229 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
230 insn.halfword[0] = 0;
231 insn.halfword[1] = ip->halfword[0];
232 + last_insn_size = 2;
233 } else if (is_mmips) {
234 insn.halfword[0] = ip->halfword[1];
235 insn.halfword[1] = ip->halfword[0];
236 + last_insn_size = 4;
237 } else {
238 insn.word = ip->word;
239 + last_insn_size = 4;
240 }
241
242 if (is_jump_ins(&insn))
243 @@ -387,8 +392,6 @@ static int get_frame_info(struct mips_frame_info *info)
244 tmp = (ip->halfword[0] >> 1);
245 info->frame_size = -(signed short)(tmp & 0xf);
246 }
247 - ip = (void *) &ip->halfword[1];
248 - ip--;
249 } else
250 #endif
251 info->frame_size = - ip->i_format.simmediate;
252 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
253 index 8b4152f3a764..cef42d4be292 100644
254 --- a/arch/sparc/Kconfig
255 +++ b/arch/sparc/Kconfig
256 @@ -290,9 +290,13 @@ config NUMA
257 depends on SPARC64 && SMP
258
259 config NODES_SHIFT
260 - int
261 - default "4"
262 + int "Maximum NUMA Nodes (as a power of 2)"
263 + range 4 5 if SPARC64
264 + default "5"
265 depends on NEED_MULTIPLE_NODES
266 + help
267 + Specify the maximum number of NUMA Nodes available on the target
268 + system. Increases memory reserved to accommodate various tables.
269
270 # Some NUMA nodes have memory ranges that span
271 # other nodes. Even though a pfn is valid and
272 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
273 index b2722ed31053..349cb83f7b5f 100644
274 --- a/arch/sparc/mm/tlb.c
275 +++ b/arch/sparc/mm/tlb.c
276 @@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
277 pte_unmap(pte);
278 }
279
280 -void set_pmd_at(struct mm_struct *mm, unsigned long addr,
281 - pmd_t *pmdp, pmd_t pmd)
282 -{
283 - pmd_t orig = *pmdp;
284 -
285 - *pmdp = pmd;
286
287 +static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
288 + pmd_t orig, pmd_t pmd)
289 +{
290 if (mm == &init_mm)
291 return;
292
293 @@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
294 }
295 }
296
297 +void set_pmd_at(struct mm_struct *mm, unsigned long addr,
298 + pmd_t *pmdp, pmd_t pmd)
299 +{
300 + pmd_t orig = *pmdp;
301 +
302 + *pmdp = pmd;
303 + __set_pmd_acct(mm, addr, orig, pmd);
304 +}
305 +
306 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
307 unsigned long address, pmd_t *pmdp, pmd_t pmd)
308 {
309 @@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
310 do {
311 old = *pmdp;
312 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
313 + __set_pmd_acct(vma->vm_mm, address, old, pmd);
314
315 return old;
316 }
317 diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
318 index 6bc36944a8c1..8c2a9fa0caf3 100644
319 --- a/arch/x86/events/intel/uncore_snbep.c
320 +++ b/arch/x86/events/intel/uncore_snbep.c
321 @@ -3767,16 +3767,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
322 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
323 },
324 { /* M3UPI0 Link 0 */
325 - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
326 - .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
327 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
328 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
329 },
330 { /* M3UPI0 Link 1 */
331 - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
332 - .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
333 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
334 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
335 },
336 { /* M3UPI1 Link 2 */
337 - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
338 - .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
339 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
340 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
341 },
342 { /* end: all zeroes */ }
343 };
344 diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
345 index 25152843dd1f..8554f960e21b 100644
346 --- a/arch/x86/include/asm/fixmap.h
347 +++ b/arch/x86/include/asm/fixmap.h
348 @@ -14,16 +14,6 @@
349 #ifndef _ASM_X86_FIXMAP_H
350 #define _ASM_X86_FIXMAP_H
351
352 -/*
353 - * Exposed to assembly code for setting up initial page tables. Cannot be
354 - * calculated in assembly code (fixmap entries are an enum), but is sanity
355 - * checked in the actual fixmap C code to make sure that the fixmap is
356 - * covered fully.
357 - */
358 -#define FIXMAP_PMD_NUM 2
359 -/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
360 -#define FIXMAP_PMD_TOP 507
361 -
362 #ifndef __ASSEMBLY__
363 #include <linux/kernel.h>
364 #include <asm/acpi.h>
365 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
366 index 84f58de08c2b..2cb5d0f13641 100644
367 --- a/arch/x86/include/asm/percpu.h
368 +++ b/arch/x86/include/asm/percpu.h
369 @@ -184,22 +184,22 @@ do { \
370 typeof(var) pfo_ret__; \
371 switch (sizeof(var)) { \
372 case 1: \
373 - asm(op "b "__percpu_arg(1)",%0" \
374 + asm volatile(op "b "__percpu_arg(1)",%0"\
375 : "=q" (pfo_ret__) \
376 : "m" (var)); \
377 break; \
378 case 2: \
379 - asm(op "w "__percpu_arg(1)",%0" \
380 + asm volatile(op "w "__percpu_arg(1)",%0"\
381 : "=r" (pfo_ret__) \
382 : "m" (var)); \
383 break; \
384 case 4: \
385 - asm(op "l "__percpu_arg(1)",%0" \
386 + asm volatile(op "l "__percpu_arg(1)",%0"\
387 : "=r" (pfo_ret__) \
388 : "m" (var)); \
389 break; \
390 case 8: \
391 - asm(op "q "__percpu_arg(1)",%0" \
392 + asm volatile(op "q "__percpu_arg(1)",%0"\
393 : "=r" (pfo_ret__) \
394 : "m" (var)); \
395 break; \
396 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
397 index d5c4df98aac3..221a32ed1372 100644
398 --- a/arch/x86/include/asm/pgtable_64.h
399 +++ b/arch/x86/include/asm/pgtable_64.h
400 @@ -13,14 +13,13 @@
401 #include <asm/processor.h>
402 #include <linux/bitops.h>
403 #include <linux/threads.h>
404 -#include <asm/fixmap.h>
405
406 extern pud_t level3_kernel_pgt[512];
407 extern pud_t level3_ident_pgt[512];
408 extern pmd_t level2_kernel_pgt[512];
409 extern pmd_t level2_fixmap_pgt[512];
410 extern pmd_t level2_ident_pgt[512];
411 -extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
412 +extern pte_t level1_fixmap_pgt[512];
413 extern pgd_t init_level4_pgt[];
414
415 #define swapper_pg_dir init_level4_pgt
416 diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
417 index 455d8ada9b9a..d39cfb2c6b63 100644
418 --- a/arch/x86/kernel/cpu/cyrix.c
419 +++ b/arch/x86/kernel/cpu/cyrix.c
420 @@ -253,6 +253,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
421 break;
422
423 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
424 + case 11: /* GX1 with inverted Device ID */
425 #ifdef CONFIG_PCI
426 {
427 u32 vendor, device;
428 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
429 index b0d6697ab153..9d72cf547c88 100644
430 --- a/arch/x86/kernel/head_64.S
431 +++ b/arch/x86/kernel/head_64.S
432 @@ -23,7 +23,6 @@
433 #include "../entry/calling.h"
434 #include <asm/export.h>
435 #include <asm/nospec-branch.h>
436 -#include <asm/fixmap.h>
437
438 #ifdef CONFIG_PARAVIRT
439 #include <asm/asm-offsets.h>
440 @@ -494,20 +493,13 @@ NEXT_PAGE(level2_kernel_pgt)
441 KERNEL_IMAGE_SIZE/PMD_SIZE)
442
443 NEXT_PAGE(level2_fixmap_pgt)
444 - .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
445 - pgtno = 0
446 - .rept (FIXMAP_PMD_NUM)
447 - .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
448 - + _PAGE_TABLE;
449 - pgtno = pgtno + 1
450 - .endr
451 - /* 6 MB reserved space + a 2MB hole */
452 - .fill 4,8,0
453 + .fill 506,8,0
454 + .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
455 + /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
456 + .fill 5,8,0
457
458 NEXT_PAGE(level1_fixmap_pgt)
459 - .rept (FIXMAP_PMD_NUM)
460 .fill 512,8,0
461 - .endr
462
463 #undef PMDS
464
465 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
466 index 29d465627919..bf9552bebb3c 100644
467 --- a/arch/x86/kernel/paravirt.c
468 +++ b/arch/x86/kernel/paravirt.c
469 @@ -90,7 +90,7 @@ unsigned paravirt_patch_call(void *insnbuf,
470
471 if (len < 5) {
472 #ifdef CONFIG_RETPOLINE
473 - WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
474 + WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
475 #endif
476 return len; /* call too long for patch site */
477 }
478 @@ -110,7 +110,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
479
480 if (len < 5) {
481 #ifdef CONFIG_RETPOLINE
482 - WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
483 + WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
484 #endif
485 return len; /* call too long for patch site */
486 }
487 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
488 index f8a0518d2810..89d1190b9d94 100644
489 --- a/arch/x86/kernel/time.c
490 +++ b/arch/x86/kernel/time.c
491 @@ -24,7 +24,7 @@
492 #include <asm/time.h>
493
494 #ifdef CONFIG_X86_64
495 -__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
496 +__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
497 #endif
498
499 unsigned long profile_pc(struct pt_regs *regs)
500 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
501 index 8cbed30feb67..e30baa8ad94f 100644
502 --- a/arch/x86/mm/pgtable.c
503 +++ b/arch/x86/mm/pgtable.c
504 @@ -536,15 +536,6 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
505 {
506 unsigned long address = __fix_to_virt(idx);
507
508 -#ifdef CONFIG_X86_64
509 - /*
510 - * Ensure that the static initial page tables are covering the
511 - * fixmap completely.
512 - */
513 - BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
514 - (FIXMAP_PMD_NUM * PTRS_PER_PTE));
515 -#endif
516 -
517 if (idx >= __end_of_fixed_addresses) {
518 BUG();
519 return;
520 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
521 index ebceaba20ad1..c92f75f7ae33 100644
522 --- a/arch/x86/xen/mmu.c
523 +++ b/arch/x86/xen/mmu.c
524 @@ -1936,7 +1936,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
525 * L3_k[511] -> level2_fixmap_pgt */
526 convert_pfn_mfn(level3_kernel_pgt);
527
528 - /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
529 + /* L3_k[511][506] -> level1_fixmap_pgt */
530 convert_pfn_mfn(level2_fixmap_pgt);
531 }
532 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
533 @@ -1970,11 +1970,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
534 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
535 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
536 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
537 -
538 - for (i = 0; i < FIXMAP_PMD_NUM; i++) {
539 - set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
540 - PAGE_KERNEL_RO);
541 - }
542 + set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
543
544 /* Pin down new L4 */
545 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
546 diff --git a/crypto/shash.c b/crypto/shash.c
547 index d5bd2f05d036..4f047c7eeca7 100644
548 --- a/crypto/shash.c
549 +++ b/crypto/shash.c
550 @@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
551 int err;
552
553 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
554 - buffer = kmalloc(absize, GFP_KERNEL);
555 + buffer = kmalloc(absize, GFP_ATOMIC);
556 if (!buffer)
557 return -ENOMEM;
558
559 diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
560 index cf05ae973381..a36d0739dbfe 100644
561 --- a/drivers/acpi/sysfs.c
562 +++ b/drivers/acpi/sysfs.c
563 @@ -724,14 +724,8 @@ end:
564 * interface:
565 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
566 */
567 -
568 -/*
569 - * Currently, the GPE flooding prevention only supports to mask the GPEs
570 - * numbered from 00 to 7f.
571 - */
572 -#define ACPI_MASKABLE_GPE_MAX 0x80
573 -
574 -static u64 __initdata acpi_masked_gpes;
575 +#define ACPI_MASKABLE_GPE_MAX 0xFF
576 +static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
577
578 static int __init acpi_gpe_set_masked_gpes(char *val)
579 {
580 @@ -739,7 +733,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
581
582 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
583 return -EINVAL;
584 - acpi_masked_gpes |= ((u64)1<<gpe);
585 + set_bit(gpe, acpi_masked_gpes_map);
586
587 return 1;
588 }
589 @@ -751,15 +745,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
590 acpi_status status;
591 u8 gpe;
592
593 - for (gpe = 0;
594 - gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
595 - gpe++) {
596 - if (acpi_masked_gpes & ((u64)1<<gpe)) {
597 - status = acpi_get_gpe_device(gpe, &handle);
598 - if (ACPI_SUCCESS(status)) {
599 - pr_info("Masking GPE 0x%x.\n", gpe);
600 - (void)acpi_mask_gpe(handle, gpe, TRUE);
601 - }
602 + for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
603 + status = acpi_get_gpe_device(gpe, &handle);
604 + if (ACPI_SUCCESS(status)) {
605 + pr_info("Masking GPE 0x%x.\n", gpe);
606 + (void)acpi_mask_gpe(handle, gpe, TRUE);
607 }
608 }
609 }
610 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
611 index faa91f8a17a5..5408a292078b 100644
612 --- a/drivers/ata/ahci.c
613 +++ b/drivers/ata/ahci.c
614 @@ -624,8 +624,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
615 static int ahci_pci_reset_controller(struct ata_host *host)
616 {
617 struct pci_dev *pdev = to_pci_dev(host->dev);
618 + int rc;
619
620 - ahci_reset_controller(host);
621 + rc = ahci_reset_controller(host);
622 + if (rc)
623 + return rc;
624
625 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
626 struct ahci_host_priv *hpriv = host->private_data;
627 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
628 index 73d636d35961..a166359ad5d4 100644
629 --- a/drivers/ata/libata-core.c
630 +++ b/drivers/ata/libata-core.c
631 @@ -6781,7 +6781,7 @@ static int __init ata_parse_force_one(char **cur,
632 }
633
634 force_ent->port = simple_strtoul(id, &endp, 10);
635 - if (p == endp || *endp != '\0') {
636 + if (id == endp || *endp != '\0') {
637 *reason = "invalid port/link";
638 return -EINVAL;
639 }
640 diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
641 index f72d601e300a..e83a3d3421b9 100644
642 --- a/drivers/ata/sata_rcar.c
643 +++ b/drivers/ata/sata_rcar.c
644 @@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
645 dev_err(&pdev->dev, "failed to get access to sata clock\n");
646 return PTR_ERR(priv->clk);
647 }
648 - clk_prepare_enable(priv->clk);
649 +
650 + ret = clk_prepare_enable(priv->clk);
651 + if (ret)
652 + return ret;
653
654 host = ata_host_alloc(&pdev->dev, 1);
655 if (!host) {
656 @@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
657 struct ata_host *host = dev_get_drvdata(dev);
658 struct sata_rcar_priv *priv = host->private_data;
659 void __iomem *base = priv->base;
660 + int ret;
661
662 - clk_prepare_enable(priv->clk);
663 + ret = clk_prepare_enable(priv->clk);
664 + if (ret)
665 + return ret;
666
667 /* ack and mask */
668 iowrite32(0, base + SATAINTSTAT_REG);
669 @@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
670 {
671 struct ata_host *host = dev_get_drvdata(dev);
672 struct sata_rcar_priv *priv = host->private_data;
673 + int ret;
674
675 - clk_prepare_enable(priv->clk);
676 + ret = clk_prepare_enable(priv->clk);
677 + if (ret)
678 + return ret;
679
680 sata_rcar_setup_port(host);
681
682 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
683 index 4d30da269060..42a53956aefe 100644
684 --- a/drivers/block/nbd.c
685 +++ b/drivers/block/nbd.c
686 @@ -269,7 +269,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
687 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
688 {
689 struct request *req = blk_mq_rq_from_pdu(cmd);
690 - int result, flags;
691 + int result;
692 struct nbd_request request;
693 unsigned long size = blk_rq_bytes(req);
694 struct bio *bio;
695 @@ -309,7 +309,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
696 if (type != NBD_CMD_WRITE)
697 return 0;
698
699 - flags = 0;
700 bio = req->bio;
701 while (bio) {
702 struct bio *next = bio->bi_next;
703 @@ -318,9 +317,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
704
705 bio_for_each_segment(bvec, bio, iter) {
706 bool is_last = !next && bio_iter_last(bvec, iter);
707 + int flags = is_last ? 0 : MSG_MORE;
708
709 - if (is_last)
710 - flags = MSG_MORE;
711 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
712 cmd, bvec.bv_len);
713 result = sock_send_bvec(nbd, &bvec, flags);
714 diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
715 index cdc092a1d9ef..07fb667e258f 100644
716 --- a/drivers/clk/samsung/clk-exynos5420.c
717 +++ b/drivers/clk/samsung/clk-exynos5420.c
718 @@ -987,7 +987,7 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
719 GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
720 GATE_BUS_TOP, 16, 0, 0),
721 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
722 - GATE_BUS_TOP, 17, 0, 0),
723 + GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
724 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
725 GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
726 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
727 diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
728 index ee1724806f46..ab8dcfea0680 100644
729 --- a/drivers/gpio/gpio-mxs.c
730 +++ b/drivers/gpio/gpio-mxs.c
731 @@ -32,8 +32,6 @@
732 #include <linux/platform_device.h>
733 #include <linux/slab.h>
734 #include <linux/gpio/driver.h>
735 -/* FIXME: for gpio_get_value(), replace this by direct register read */
736 -#include <linux/gpio.h>
737 #include <linux/module.h>
738
739 #define MXS_SET 0x4
740 @@ -94,7 +92,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
741 port->both_edges &= ~pin_mask;
742 switch (type) {
743 case IRQ_TYPE_EDGE_BOTH:
744 - val = gpio_get_value(port->gc.base + d->hwirq);
745 + val = port->gc.get(&port->gc, d->hwirq);
746 if (val)
747 edge = GPIO_INT_FALL_EDGE;
748 else
749 diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
750 index e1ec498a6b6e..35f40255644d 100644
751 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c
752 +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
753 @@ -138,6 +138,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
754 info->fix.smem_start = 0;
755 info->fix.smem_len = size;
756
757 + bochs->fb.initialized = true;
758 return 0;
759 }
760
761 @@ -155,7 +156,6 @@ static int bochs_fbdev_destroy(struct bochs_device *bochs)
762 gfb->obj = NULL;
763 }
764
765 - drm_fb_helper_fini(&bochs->fb.helper);
766 drm_framebuffer_unregister_private(&gfb->base);
767 drm_framebuffer_cleanup(&gfb->base);
768
769 @@ -188,7 +188,6 @@ int bochs_fbdev_init(struct bochs_device *bochs)
770 if (ret)
771 goto fini;
772
773 - bochs->fb.initialized = true;
774 return 0;
775
776 fini:
777 @@ -198,9 +197,9 @@ fini:
778
779 void bochs_fbdev_fini(struct bochs_device *bochs)
780 {
781 - if (!bochs->fb.initialized)
782 - return;
783 + if (bochs->fb.initialized)
784 + bochs_fbdev_destroy(bochs);
785
786 - bochs_fbdev_destroy(bochs);
787 + drm_fb_helper_fini(&bochs->fb.helper);
788 bochs->fb.initialized = false;
789 }
790 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
791 index 37ba5f51378e..83d2f43b5a2f 100644
792 --- a/drivers/gpu/drm/drm_edid.c
793 +++ b/drivers/gpu/drm/drm_edid.c
794 @@ -107,6 +107,9 @@ static const struct edid_quirk {
795 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
796 { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
797
798 + /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
799 + { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
800 +
801 /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
802 { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
803
804 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
805 index 7145127513c4..795660e29b2c 100644
806 --- a/drivers/gpu/drm/msm/msm_gem.c
807 +++ b/drivers/gpu/drm/msm/msm_gem.c
808 @@ -118,17 +118,19 @@ static void put_pages(struct drm_gem_object *obj)
809 struct msm_gem_object *msm_obj = to_msm_bo(obj);
810
811 if (msm_obj->pages) {
812 - /* For non-cached buffers, ensure the new pages are clean
813 - * because display controller, GPU, etc. are not coherent:
814 - */
815 - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
816 - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
817 - msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
818 + if (msm_obj->sgt) {
819 + /* For non-cached buffers, ensure the new
820 + * pages are clean because display controller,
821 + * GPU, etc. are not coherent:
822 + */
823 + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
824 + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
825 + msm_obj->sgt->nents,
826 + DMA_BIDIRECTIONAL);
827
828 - if (msm_obj->sgt)
829 sg_free_table(msm_obj->sgt);
830 -
831 - kfree(msm_obj->sgt);
832 + kfree(msm_obj->sgt);
833 + }
834
835 if (use_pages(obj))
836 drm_gem_put_pages(obj, msm_obj->pages, true, false);
837 diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
838 index b9539f7c5e9a..99c813a4ec1f 100644
839 --- a/drivers/gpu/ipu-v3/ipu-common.c
840 +++ b/drivers/gpu/ipu-v3/ipu-common.c
841 @@ -715,15 +715,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
842 spin_lock_irqsave(&ipu->lock, flags);
843
844 val = ipu_cm_read(ipu, IPU_CONF);
845 - if (vdi) {
846 + if (vdi)
847 val |= IPU_CONF_IC_INPUT;
848 - } else {
849 + else
850 val &= ~IPU_CONF_IC_INPUT;
851 - if (csi_id == 1)
852 - val |= IPU_CONF_CSI_SEL;
853 - else
854 - val &= ~IPU_CONF_CSI_SEL;
855 - }
856 +
857 + if (csi_id == 1)
858 + val |= IPU_CONF_CSI_SEL;
859 + else
860 + val &= ~IPU_CONF_CSI_SEL;
861 +
862 ipu_cm_write(ipu, val, IPU_CONF);
863
864 spin_unlock_irqrestore(&ipu->lock, flags);
865 diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
866 index f283b714aa79..7ed09865cb4b 100644
867 --- a/drivers/i2c/busses/i2c-bcm2835.c
868 +++ b/drivers/i2c/busses/i2c-bcm2835.c
869 @@ -128,7 +128,9 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
870 }
871
872 if (val & BCM2835_I2C_S_DONE) {
873 - if (i2c_dev->curr_msg->flags & I2C_M_RD) {
874 + if (!i2c_dev->curr_msg) {
875 + dev_err(i2c_dev->dev, "Got unexpected interrupt (from firmware?)\n");
876 + } else if (i2c_dev->curr_msg->flags & I2C_M_RD) {
877 bcm2835_drain_rxfifo(i2c_dev);
878 val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
879 }
880 diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
881 index 64799ad7ebad..7fd24949c0c1 100644
882 --- a/drivers/iio/adc/axp288_adc.c
883 +++ b/drivers/iio/adc/axp288_adc.c
884 @@ -28,6 +28,8 @@
885 #include <linux/iio/driver.h>
886
887 #define AXP288_ADC_EN_MASK 0xF1
888 +#define AXP288_ADC_TS_PIN_GPADC 0xF2
889 +#define AXP288_ADC_TS_PIN_ON 0xF3
890
891 enum axp288_adc_id {
892 AXP288_ADC_TS,
893 @@ -121,6 +123,16 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
894 return IIO_VAL_INT;
895 }
896
897 +static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
898 + unsigned long address)
899 +{
900 + /* channels other than GPADC do not need to switch TS pin */
901 + if (address != AXP288_GP_ADC_H)
902 + return 0;
903 +
904 + return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
905 +}
906 +
907 static int axp288_adc_read_raw(struct iio_dev *indio_dev,
908 struct iio_chan_spec const *chan,
909 int *val, int *val2, long mask)
910 @@ -131,7 +143,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
911 mutex_lock(&indio_dev->mlock);
912 switch (mask) {
913 case IIO_CHAN_INFO_RAW:
914 + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
915 + chan->address)) {
916 + dev_err(&indio_dev->dev, "GPADC mode\n");
917 + ret = -EINVAL;
918 + break;
919 + }
920 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
921 + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
922 + chan->address))
923 + dev_err(&indio_dev->dev, "TS pin restore\n");
924 break;
925 default:
926 ret = -EINVAL;
927 @@ -141,6 +162,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
928 return ret;
929 }
930
931 +static int axp288_adc_set_state(struct regmap *regmap)
932 +{
933 + /* ADC should be always enabled for internal FG to function */
934 + if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
935 + return -EIO;
936 +
937 + return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
938 +}
939 +
940 static const struct iio_info axp288_adc_iio_info = {
941 .read_raw = &axp288_adc_read_raw,
942 .driver_module = THIS_MODULE,
943 @@ -169,7 +199,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
944 * Set ADC to enabled state at all time, including system suspend.
945 * otherwise internal fuel gauge functionality may be affected.
946 */
947 - ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
948 + ret = axp288_adc_set_state(axp20x->regmap);
949 if (ret) {
950 dev_err(&pdev->dev, "unable to enable ADC device\n");
951 return ret;
952 diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
953 index 2a4a62ebfd8d..cc002b958f7e 100644
954 --- a/drivers/iio/pressure/zpa2326.c
955 +++ b/drivers/iio/pressure/zpa2326.c
956 @@ -869,7 +869,6 @@ complete:
957 static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
958 struct zpa2326_private *private)
959 {
960 - int ret;
961 unsigned int val;
962 long timeout;
963
964 @@ -891,14 +890,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
965 /* Timed out. */
966 zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
967 timeout);
968 - ret = -ETIME;
969 - } else if (timeout < 0) {
970 - zpa2326_warn(indio_dev,
971 - "wait for one shot interrupt cancelled");
972 - ret = -ERESTARTSYS;
973 + return -ETIME;
974 }
975
976 - return ret;
977 + zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
978 + return -ERESTARTSYS;
979 }
980
981 static int zpa2326_init_managed_irq(struct device *parent,
982 diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
983 index 7713ef089c3c..85be45e75710 100644
984 --- a/drivers/infiniband/core/ucm.c
985 +++ b/drivers/infiniband/core/ucm.c
986 @@ -46,6 +46,8 @@
987 #include <linux/mutex.h>
988 #include <linux/slab.h>
989
990 +#include <linux/nospec.h>
991 +
992 #include <asm/uaccess.h>
993
994 #include <rdma/ib.h>
995 @@ -1115,6 +1117,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
996
997 if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
998 return -EINVAL;
999 + hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
1000
1001 if (hdr.in + sizeof(hdr) > len)
1002 return -EINVAL;
1003 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1004 index fa9ef8ed5712..a4f4cd493265 100644
1005 --- a/drivers/infiniband/core/ucma.c
1006 +++ b/drivers/infiniband/core/ucma.c
1007 @@ -44,6 +44,8 @@
1008 #include <linux/module.h>
1009 #include <linux/nsproxy.h>
1010
1011 +#include <linux/nospec.h>
1012 +
1013 #include <rdma/rdma_user_cm.h>
1014 #include <rdma/ib_marshall.h>
1015 #include <rdma/rdma_cm.h>
1016 @@ -1637,6 +1639,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1017
1018 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1019 return -EINVAL;
1020 + hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1021
1022 if (hdr.in + sizeof(hdr) > len)
1023 return -EINVAL;
1024 diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
1025 index 0e64b52af5b2..d28c4cf7c1ee 100644
1026 --- a/drivers/infiniband/core/verbs.c
1027 +++ b/drivers/infiniband/core/verbs.c
1028 @@ -1510,6 +1510,44 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
1029
1030 /* Multicast groups */
1031
1032 +static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1033 +{
1034 + struct ib_qp_init_attr init_attr = {};
1035 + struct ib_qp_attr attr = {};
1036 + int num_eth_ports = 0;
1037 + int port;
1038 +
1039 + /* If QP state >= init, it is assigned to a port and we can check this
1040 + * port only.
1041 + */
1042 + if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1043 + if (attr.qp_state >= IB_QPS_INIT) {
1044 + if (qp->device->get_link_layer(qp->device, attr.port_num) !=
1045 + IB_LINK_LAYER_INFINIBAND)
1046 + return true;
1047 + goto lid_check;
1048 + }
1049 + }
1050 +
1051 + /* Can't get a quick answer, iterate over all ports */
1052 + for (port = 0; port < qp->device->phys_port_cnt; port++)
1053 + if (qp->device->get_link_layer(qp->device, port) !=
1054 + IB_LINK_LAYER_INFINIBAND)
1055 + num_eth_ports++;
1056 +
1057 + /* If we have at lease one Ethernet port, RoCE annex declares that
1058 + * multicast LID should be ignored. We can't tell at this step if the
1059 + * QP belongs to an IB or Ethernet port.
1060 + */
1061 + if (num_eth_ports)
1062 + return true;
1063 +
1064 + /* If all the ports are IB, we can check according to IB spec. */
1065 +lid_check:
1066 + return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1067 + lid == be16_to_cpu(IB_LID_PERMISSIVE));
1068 +}
1069 +
1070 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1071 {
1072 int ret;
1073 @@ -1517,8 +1555,7 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1074 if (!qp->device->attach_mcast)
1075 return -ENOSYS;
1076 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1077 - lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1078 - lid == be16_to_cpu(IB_LID_PERMISSIVE))
1079 + !is_valid_mcast_lid(qp, lid))
1080 return -EINVAL;
1081
1082 ret = qp->device->attach_mcast(qp, gid, lid);
1083 @@ -1535,8 +1572,7 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1084 if (!qp->device->detach_mcast)
1085 return -ENOSYS;
1086 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1087 - lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1088 - lid == be16_to_cpu(IB_LID_PERMISSIVE))
1089 + !is_valid_mcast_lid(qp, lid))
1090 return -EINVAL;
1091
1092 ret = qp->device->detach_mcast(qp, gid, lid);
1093 diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
1094 index 0d4878efd643..ddd3182138ac 100644
1095 --- a/drivers/infiniband/hw/mlx4/mr.c
1096 +++ b/drivers/infiniband/hw/mlx4/mr.c
1097 @@ -247,8 +247,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
1098 }
1099
1100 if (flags & IB_MR_REREG_ACCESS) {
1101 - if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
1102 - return -EPERM;
1103 + if (ib_access_writable(mr_access_flags) &&
1104 + !mmr->umem->writable) {
1105 + err = -EPERM;
1106 + goto release_mpt_entry;
1107 + }
1108
1109 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
1110 convert_access(mr_access_flags));
1111 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1112 index abb47e780070..f8f7a2191b98 100644
1113 --- a/drivers/infiniband/hw/mlx5/qp.c
1114 +++ b/drivers/infiniband/hw/mlx5/qp.c
1115 @@ -1523,6 +1523,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1116 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1117 struct mlx5_ib_create_qp ucmd;
1118 struct mlx5_ib_qp_base *base;
1119 + int mlx5_st;
1120 void *qpc;
1121 u32 *in;
1122 int err;
1123 @@ -1538,6 +1539,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1124 spin_lock_init(&qp->sq.lock);
1125 spin_lock_init(&qp->rq.lock);
1126
1127 + mlx5_st = to_mlx5_st(init_attr->qp_type);
1128 + if (mlx5_st < 0)
1129 + return -EINVAL;
1130 +
1131 if (init_attr->rwq_ind_tbl) {
1132 if (!udata)
1133 return -ENOSYS;
1134 @@ -1665,7 +1670,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1135
1136 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1137
1138 - MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
1139 + MLX5_SET(qpc, qpc, st, mlx5_st);
1140 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1141
1142 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
1143 diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
1144 index 1c4e5b2e6835..527ca662da69 100644
1145 --- a/drivers/infiniband/sw/rxe/rxe_pool.c
1146 +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
1147 @@ -402,23 +402,25 @@ void *rxe_alloc(struct rxe_pool *pool)
1148
1149 kref_get(&pool->rxe->ref_cnt);
1150
1151 - if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
1152 - atomic_dec(&pool->num_elem);
1153 - rxe_dev_put(pool->rxe);
1154 - rxe_pool_put(pool);
1155 - return NULL;
1156 - }
1157 + if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
1158 + goto out_put_pool;
1159
1160 elem = kmem_cache_zalloc(pool_cache(pool),
1161 (pool->flags & RXE_POOL_ATOMIC) ?
1162 GFP_ATOMIC : GFP_KERNEL);
1163 if (!elem)
1164 - return NULL;
1165 + goto out_put_pool;
1166
1167 elem->pool = pool;
1168 kref_init(&elem->ref_cnt);
1169
1170 return elem;
1171 +
1172 +out_put_pool:
1173 + atomic_dec(&pool->num_elem);
1174 + rxe_dev_put(pool->rxe);
1175 + rxe_pool_put(pool);
1176 + return NULL;
1177 }
1178
1179 void rxe_elem_release(struct kref *kref)
1180 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
1181 index ced416f5dffb..ef13082d6ca1 100644
1182 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
1183 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
1184 @@ -729,13 +729,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
1185
1186 sge = ibwr->sg_list;
1187 for (i = 0; i < num_sge; i++, sge++) {
1188 - if (qp->is_user && copy_from_user(p, (__user void *)
1189 - (uintptr_t)sge->addr, sge->length))
1190 - return -EFAULT;
1191 -
1192 - else if (!qp->is_user)
1193 - memcpy(p, (void *)(uintptr_t)sge->addr,
1194 - sge->length);
1195 + memcpy(p, (void *)(uintptr_t)sge->addr,
1196 + sge->length);
1197
1198 p += sge->length;
1199 }
1200 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1201 index 09396bd7b02d..63be3bcdc0e3 100644
1202 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1203 +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1204 @@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
1205 {
1206 struct ipoib_dev_priv *priv = netdev_priv(dev);
1207
1208 - WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
1209 - WARN_ONCE(!priv->path_dentry, "null path debug file\n");
1210 debugfs_remove(priv->mcg_dentry);
1211 debugfs_remove(priv->path_dentry);
1212 priv->mcg_dentry = priv->path_dentry = NULL;
1213 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1214 index 34122c96522b..ad3089c23e18 100644
1215 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1216 +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1217 @@ -974,19 +974,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1218 */
1219 priv->dev->broadcast[8] = priv->pkey >> 8;
1220 priv->dev->broadcast[9] = priv->pkey & 0xff;
1221 -
1222 - /*
1223 - * Update the broadcast address in the priv->broadcast object,
1224 - * in case it already exists, otherwise no one will do that.
1225 - */
1226 - if (priv->broadcast) {
1227 - spin_lock_irq(&priv->lock);
1228 - memcpy(priv->broadcast->mcmember.mgid.raw,
1229 - priv->dev->broadcast + 4,
1230 - sizeof(union ib_gid));
1231 - spin_unlock_irq(&priv->lock);
1232 - }
1233 -
1234 return 0;
1235 }
1236
1237 @@ -1190,13 +1177,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1238 ipoib_ib_dev_down(dev);
1239
1240 if (level == IPOIB_FLUSH_HEAVY) {
1241 - rtnl_lock();
1242 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1243 ipoib_ib_dev_stop(dev);
1244
1245 - result = ipoib_ib_dev_open(dev);
1246 - rtnl_unlock();
1247 - if (result)
1248 + if (ipoib_ib_dev_open(dev))
1249 return;
1250
1251 if (netif_queue_stopped(dev))
1252 @@ -1236,7 +1220,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1253 struct ipoib_dev_priv *priv =
1254 container_of(work, struct ipoib_dev_priv, flush_heavy);
1255
1256 + rtnl_lock();
1257 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1258 + rtnl_unlock();
1259 }
1260
1261 void ipoib_ib_dev_cleanup(struct net_device *dev)
1262 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1263 index a716482774db..b3119589a444 100644
1264 --- a/drivers/input/mouse/elan_i2c_core.c
1265 +++ b/drivers/input/mouse/elan_i2c_core.c
1266 @@ -1251,6 +1251,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1267 { "ELAN0611", 0 },
1268 { "ELAN0612", 0 },
1269 { "ELAN0618", 0 },
1270 + { "ELAN061C", 0 },
1271 { "ELAN061D", 0 },
1272 { "ELAN0622", 0 },
1273 { "ELAN1000", 0 },
1274 diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
1275 index 21dde5249085..c42523b7d5ed 100644
1276 --- a/drivers/mtd/spi-nor/spi-nor.c
1277 +++ b/drivers/mtd/spi-nor/spi-nor.c
1278 @@ -858,6 +858,12 @@ static const struct flash_info spi_nor_ids[] = {
1279
1280 /* ISSI */
1281 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
1282 + { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
1283 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1284 + { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
1285 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1286 + { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
1287 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1288
1289 /* Macronix */
1290 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
1291 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1292 index b1ea29d8ad1a..389d1db69a32 100644
1293 --- a/drivers/net/bonding/bond_main.c
1294 +++ b/drivers/net/bonding/bond_main.c
1295 @@ -2132,9 +2132,10 @@ static void bond_miimon_commit(struct bonding *bond)
1296 if (bond_update_speed_duplex(slave) &&
1297 bond_needs_speed_duplex(bond)) {
1298 slave->link = BOND_LINK_DOWN;
1299 - netdev_warn(bond->dev,
1300 - "failed to get link speed/duplex for %s\n",
1301 - slave->dev->name);
1302 + if (net_ratelimit())
1303 + netdev_warn(bond->dev,
1304 + "failed to get link speed/duplex for %s\n",
1305 + slave->dev->name);
1306 continue;
1307 }
1308 bond_set_slave_link_state(slave, BOND_LINK_UP,
1309 diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
1310 index b8df0f5e8c25..3f320f470345 100644
1311 --- a/drivers/net/bonding/bond_netlink.c
1312 +++ b/drivers/net/bonding/bond_netlink.c
1313 @@ -628,8 +628,7 @@ static int bond_fill_info(struct sk_buff *skb,
1314 goto nla_put_failure;
1315
1316 if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
1317 - sizeof(bond->params.ad_actor_system),
1318 - &bond->params.ad_actor_system))
1319 + ETH_ALEN, &bond->params.ad_actor_system))
1320 goto nla_put_failure;
1321 }
1322 if (!bond_3ad_get_active_agg_info(bond, &info)) {
1323 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1324 index 1d92e034febc..0c298878bf46 100644
1325 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1326 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1327 @@ -1482,8 +1482,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
1328 if (rc)
1329 return rc;
1330
1331 - ena_init_napi(adapter);
1332 -
1333 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1334
1335 ena_refill_all_rx_bufs(adapter);
1336 @@ -1643,6 +1641,13 @@ static int ena_up(struct ena_adapter *adapter)
1337
1338 ena_setup_io_intr(adapter);
1339
1340 + /* napi poll functions should be initialized before running
1341 + * request_irq(), to handle a rare condition where there is a pending
1342 + * interrupt, causing the ISR to fire immediately while the poll
1343 + * function wasn't set yet, causing a null dereference
1344 + */
1345 + ena_init_napi(adapter);
1346 +
1347 rc = ena_request_io_irq(adapter);
1348 if (rc)
1349 goto err_req_irq;
1350 diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
1351 index b799c7ac899b..9e80a76c3dfe 100644
1352 --- a/drivers/net/ethernet/amd/declance.c
1353 +++ b/drivers/net/ethernet/amd/declance.c
1354 @@ -1030,6 +1030,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1355 int i, ret;
1356 unsigned long esar_base;
1357 unsigned char *esar;
1358 + const char *desc;
1359
1360 if (dec_lance_debug && version_printed++ == 0)
1361 printk(version);
1362 @@ -1215,19 +1216,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1363 */
1364 switch (type) {
1365 case ASIC_LANCE:
1366 - printk("%s: IOASIC onboard LANCE", name);
1367 + desc = "IOASIC onboard LANCE";
1368 break;
1369 case PMAD_LANCE:
1370 - printk("%s: PMAD-AA", name);
1371 + desc = "PMAD-AA";
1372 break;
1373 case PMAX_LANCE:
1374 - printk("%s: PMAX onboard LANCE", name);
1375 + desc = "PMAX onboard LANCE";
1376 break;
1377 }
1378 for (i = 0; i < 6; i++)
1379 dev->dev_addr[i] = esar[i * 4];
1380
1381 - printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1382 + printk("%s: %s, addr = %pM, irq = %d\n",
1383 + name, desc, dev->dev_addr, dev->irq);
1384
1385 dev->netdev_ops = &lance_netdev_ops;
1386 dev->watchdog_timeo = 5*HZ;
1387 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1388 index 208e9dacfd34..a036f7039d76 100644
1389 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1390 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1391 @@ -5580,7 +5580,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
1392 }
1393
1394 if (link_re_init) {
1395 + mutex_lock(&bp->link_lock);
1396 rc = bnxt_update_phy_setting(bp);
1397 + mutex_unlock(&bp->link_lock);
1398 if (rc)
1399 netdev_warn(bp->dev, "failed to update phy settings\n");
1400 }
1401 @@ -6230,30 +6232,28 @@ static void bnxt_sp_task(struct work_struct *work)
1402 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
1403 bnxt_hwrm_port_qstats(bp);
1404
1405 - /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
1406 - * must be the last functions to be called before exiting.
1407 - */
1408 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
1409 - int rc = 0;
1410 + int rc;
1411
1412 + mutex_lock(&bp->link_lock);
1413 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
1414 &bp->sp_event))
1415 bnxt_hwrm_phy_qcaps(bp);
1416
1417 - bnxt_rtnl_lock_sp(bp);
1418 - if (test_bit(BNXT_STATE_OPEN, &bp->state))
1419 - rc = bnxt_update_link(bp, true);
1420 - bnxt_rtnl_unlock_sp(bp);
1421 + rc = bnxt_update_link(bp, true);
1422 + mutex_unlock(&bp->link_lock);
1423 if (rc)
1424 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
1425 rc);
1426 }
1427 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
1428 - bnxt_rtnl_lock_sp(bp);
1429 - if (test_bit(BNXT_STATE_OPEN, &bp->state))
1430 - bnxt_get_port_module_status(bp);
1431 - bnxt_rtnl_unlock_sp(bp);
1432 + mutex_lock(&bp->link_lock);
1433 + bnxt_get_port_module_status(bp);
1434 + mutex_unlock(&bp->link_lock);
1435 }
1436 + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
1437 + * must be the last functions to be called before exiting.
1438 + */
1439 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
1440 bnxt_reset(bp, false);
1441
1442 @@ -6788,6 +6788,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
1443 rc);
1444 return rc;
1445 }
1446 + mutex_init(&bp->link_lock);
1447
1448 rc = bnxt_update_link(bp, false);
1449 if (rc) {
1450 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1451 index 666bc0608ed7..017c10c53715 100644
1452 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1453 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1454 @@ -1109,6 +1109,10 @@ struct bnxt {
1455 unsigned long *ntp_fltr_bmap;
1456 int ntp_fltr_count;
1457
1458 + /* To protect link related settings during link changes and
1459 + * ethtool settings changes.
1460 + */
1461 + struct mutex link_lock;
1462 struct bnxt_link_info link_info;
1463 struct ethtool_eee eee;
1464 u32 lpi_tmr_lo;
1465 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1466 index cde4b96f3153..3a352f76e633 100644
1467 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1468 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1469 @@ -793,6 +793,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1470 u32 ethtool_speed;
1471
1472 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1473 + mutex_lock(&bp->link_lock);
1474 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1475
1476 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1477 @@ -840,6 +841,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1478 base->port = PORT_FIBRE;
1479 }
1480 base->phy_address = link_info->phy_addr;
1481 + mutex_unlock(&bp->link_lock);
1482
1483 return 0;
1484 }
1485 @@ -926,6 +928,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1486 if (!BNXT_SINGLE_PF(bp))
1487 return -EOPNOTSUPP;
1488
1489 + mutex_lock(&bp->link_lock);
1490 if (base->autoneg == AUTONEG_ENABLE) {
1491 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1492 advertising);
1493 @@ -970,6 +973,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1494 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1495
1496 set_setting_exit:
1497 + mutex_unlock(&bp->link_lock);
1498 return rc;
1499 }
1500
1501 diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
1502 index 8f55c23e9821..a0d640243df2 100644
1503 --- a/drivers/net/ethernet/cadence/macb.c
1504 +++ b/drivers/net/ethernet/cadence/macb.c
1505 @@ -1737,6 +1737,7 @@ static void macb_configure_dma(struct macb *bp)
1506 else
1507 dmacfg &= ~GEM_BIT(TXCOEN);
1508
1509 + dmacfg &= ~GEM_BIT(ADDR64);
1510 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1511 dmacfg |= GEM_BIT(ADDR64);
1512 #endif
1513 diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1514 index dc0efbd91c32..ddd1ec8f7bd0 100644
1515 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1516 +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1517 @@ -2150,6 +2150,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1518 return -EPERM;
1519 if (copy_from_user(&t, useraddr, sizeof(t)))
1520 return -EFAULT;
1521 + if (t.cmd != CHELSIO_SET_QSET_PARAMS)
1522 + return -EINVAL;
1523 if (t.qset_idx >= SGE_QSETS)
1524 return -EINVAL;
1525 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1526 @@ -2249,6 +2251,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1527 if (copy_from_user(&t, useraddr, sizeof(t)))
1528 return -EFAULT;
1529
1530 + if (t.cmd != CHELSIO_GET_QSET_PARAMS)
1531 + return -EINVAL;
1532 +
1533 /* Display qsets for all ports when offload enabled */
1534 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1535 q1 = 0;
1536 @@ -2294,6 +2299,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1537 return -EBUSY;
1538 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1539 return -EFAULT;
1540 + if (edata.cmd != CHELSIO_SET_QSET_NUM)
1541 + return -EINVAL;
1542 if (edata.val < 1 ||
1543 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1544 return -EINVAL;
1545 @@ -2334,6 +2341,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1546 return -EPERM;
1547 if (copy_from_user(&t, useraddr, sizeof(t)))
1548 return -EFAULT;
1549 + if (t.cmd != CHELSIO_LOAD_FW)
1550 + return -EINVAL;
1551 /* Check t.len sanity ? */
1552 fw_data = memdup_user(useraddr + sizeof(t), t.len);
1553 if (IS_ERR(fw_data))
1554 @@ -2357,6 +2366,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1555 return -EBUSY;
1556 if (copy_from_user(&m, useraddr, sizeof(m)))
1557 return -EFAULT;
1558 + if (m.cmd != CHELSIO_SETMTUTAB)
1559 + return -EINVAL;
1560 if (m.nmtus != NMTUS)
1561 return -EINVAL;
1562 if (m.mtus[0] < 81) /* accommodate SACK */
1563 @@ -2398,6 +2409,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1564 return -EBUSY;
1565 if (copy_from_user(&m, useraddr, sizeof(m)))
1566 return -EFAULT;
1567 + if (m.cmd != CHELSIO_SET_PM)
1568 + return -EINVAL;
1569 if (!is_power_of_2(m.rx_pg_sz) ||
1570 !is_power_of_2(m.tx_pg_sz))
1571 return -EINVAL; /* not power of 2 */
1572 @@ -2431,6 +2444,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1573 return -EIO; /* need the memory controllers */
1574 if (copy_from_user(&t, useraddr, sizeof(t)))
1575 return -EFAULT;
1576 + if (t.cmd != CHELSIO_GET_MEM)
1577 + return -EINVAL;
1578 if ((t.addr & 7) || (t.len & 7))
1579 return -EINVAL;
1580 if (t.mem_id == MEM_CM)
1581 @@ -2483,6 +2498,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1582 return -EAGAIN;
1583 if (copy_from_user(&t, useraddr, sizeof(t)))
1584 return -EFAULT;
1585 + if (t.cmd != CHELSIO_SET_TRACE_FILTER)
1586 + return -EINVAL;
1587
1588 tp = (const struct trace_params *)&t.sip;
1589 if (t.config_tx)
1590 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
1591 index f314be07ec58..07282eb76867 100644
1592 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
1593 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
1594 @@ -1708,7 +1708,7 @@ static int enic_open(struct net_device *netdev)
1595 {
1596 struct enic *enic = netdev_priv(netdev);
1597 unsigned int i;
1598 - int err;
1599 + int err, ret;
1600
1601 err = enic_request_intr(enic);
1602 if (err) {
1603 @@ -1766,10 +1766,9 @@ static int enic_open(struct net_device *netdev)
1604
1605 err_out_free_rq:
1606 for (i = 0; i < enic->rq_count; i++) {
1607 - err = vnic_rq_disable(&enic->rq[i]);
1608 - if (err)
1609 - return err;
1610 - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1611 + ret = vnic_rq_disable(&enic->rq[i]);
1612 + if (!ret)
1613 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1614 }
1615 enic_dev_notify_unset(enic);
1616 err_out_free_intr:
1617 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1618 index fe00f71bc6b4..051ecc76a7ef 100644
1619 --- a/drivers/net/ethernet/freescale/fec_main.c
1620 +++ b/drivers/net/ethernet/freescale/fec_main.c
1621 @@ -1152,7 +1152,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
1622 napi_disable(&fep->napi);
1623 netif_tx_lock_bh(ndev);
1624 fec_restart(ndev);
1625 - netif_wake_queue(ndev);
1626 + netif_tx_wake_all_queues(ndev);
1627 netif_tx_unlock_bh(ndev);
1628 napi_enable(&fep->napi);
1629 }
1630 @@ -1267,7 +1267,7 @@ skb_done:
1631
1632 /* Since we have freed up a buffer, the ring is no longer full
1633 */
1634 - if (netif_queue_stopped(ndev)) {
1635 + if (netif_tx_queue_stopped(nq)) {
1636 entries_free = fec_enet_get_free_txdesc_num(txq);
1637 if (entries_free >= txq->tx_wake_threshold)
1638 netif_tx_wake_queue(nq);
1639 @@ -1744,7 +1744,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1640 napi_disable(&fep->napi);
1641 netif_tx_lock_bh(ndev);
1642 fec_restart(ndev);
1643 - netif_wake_queue(ndev);
1644 + netif_tx_wake_all_queues(ndev);
1645 netif_tx_unlock_bh(ndev);
1646 napi_enable(&fep->napi);
1647 }
1648 @@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1649 napi_disable(&fep->napi);
1650 netif_tx_lock_bh(ndev);
1651 fec_restart(ndev);
1652 - netif_wake_queue(ndev);
1653 + netif_tx_wake_all_queues(ndev);
1654 netif_tx_unlock_bh(ndev);
1655 napi_enable(&fep->napi);
1656 }
1657 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
1658 index 4b86260584a0..8b66551511f5 100644
1659 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
1660 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
1661 @@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1662 return NETDEV_TX_OK;
1663 }
1664
1665 -static void fs_timeout(struct net_device *dev)
1666 +static void fs_timeout_work(struct work_struct *work)
1667 {
1668 - struct fs_enet_private *fep = netdev_priv(dev);
1669 + struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
1670 + timeout_work);
1671 + struct net_device *dev = fep->ndev;
1672 unsigned long flags;
1673 int wake = 0;
1674
1675 @@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
1676 phy_stop(dev->phydev);
1677 (*fep->ops->stop)(dev);
1678 (*fep->ops->restart)(dev);
1679 - phy_start(dev->phydev);
1680 }
1681
1682 phy_start(dev->phydev);
1683 @@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
1684 netif_wake_queue(dev);
1685 }
1686
1687 +static void fs_timeout(struct net_device *dev)
1688 +{
1689 + struct fs_enet_private *fep = netdev_priv(dev);
1690 +
1691 + schedule_work(&fep->timeout_work);
1692 +}
1693 +
1694 /*-----------------------------------------------------------------------------
1695 * generic link-change handler - should be sufficient for most cases
1696 *-----------------------------------------------------------------------------*/
1697 @@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
1698 netif_stop_queue(dev);
1699 netif_carrier_off(dev);
1700 napi_disable(&fep->napi);
1701 + cancel_work_sync(&fep->timeout_work);
1702 phy_stop(dev->phydev);
1703
1704 spin_lock_irqsave(&fep->lock, flags);
1705 @@ -1033,6 +1042,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
1706
1707 ndev->netdev_ops = &fs_enet_netdev_ops;
1708 ndev->watchdog_timeo = 2 * HZ;
1709 + INIT_WORK(&fep->timeout_work, fs_timeout_work);
1710 netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
1711
1712 ndev->ethtool_ops = &fs_ethtool_ops;
1713 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
1714 index fee24c822fad..0e4e024449ec 100644
1715 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
1716 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
1717 @@ -124,6 +124,7 @@ struct fs_enet_private {
1718 spinlock_t lock; /* during all ops except TX pckt processing */
1719 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
1720 struct fs_platform_info *fpi;
1721 + struct work_struct timeout_work;
1722 const struct fs_ops *ops;
1723 int rx_ring, tx_ring;
1724 dma_addr_t ring_mem_addr;
1725 diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
1726 index abe290bfc638..8408682efd86 100644
1727 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
1728 +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
1729 @@ -266,7 +266,7 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
1730 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
1731 * @data: word read from the Shadow RAM
1732 *
1733 - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
1734 + * Reads one 16 bit word from the Shadow RAM using the AdminQ
1735 **/
1736 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
1737 u16 *data)
1738 @@ -280,27 +280,49 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
1739 }
1740
1741 /**
1742 - * i40e_read_nvm_word - Reads Shadow RAM
1743 + * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
1744 * @hw: pointer to the HW structure
1745 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
1746 * @data: word read from the Shadow RAM
1747 *
1748 - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
1749 + * Reads one 16 bit word from the Shadow RAM.
1750 + *
1751 + * Do not use this function except in cases where the nvm lock is already
1752 + * taken via i40e_acquire_nvm().
1753 + **/
1754 +static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
1755 + u16 offset, u16 *data)
1756 +{
1757 + i40e_status ret_code = 0;
1758 +
1759 + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
1760 + ret_code = i40e_read_nvm_word_aq(hw, offset, data);
1761 + else
1762 + ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
1763 + return ret_code;
1764 +}
1765 +
1766 +/**
1767 + * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
1768 + * @hw: pointer to the HW structure
1769 + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
1770 + * @data: word read from the Shadow RAM
1771 + *
1772 + * Reads one 16 bit word from the Shadow RAM.
1773 **/
1774 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
1775 u16 *data)
1776 {
1777 - enum i40e_status_code ret_code = 0;
1778 + i40e_status ret_code = 0;
1779
1780 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1781 - if (!ret_code) {
1782 - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
1783 - ret_code = i40e_read_nvm_word_aq(hw, offset, data);
1784 - } else {
1785 - ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
1786 - }
1787 - i40e_release_nvm(hw);
1788 - }
1789 + if (ret_code)
1790 + return ret_code;
1791 +
1792 + ret_code = __i40e_read_nvm_word(hw, offset, data);
1793 +
1794 + i40e_release_nvm(hw);
1795 +
1796 return ret_code;
1797 }
1798
1799 @@ -393,31 +415,25 @@ read_nvm_buffer_aq_exit:
1800 }
1801
1802 /**
1803 - * i40e_read_nvm_buffer - Reads Shadow RAM buffer
1804 + * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
1805 * @hw: pointer to the HW structure
1806 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
1807 * @words: (in) number of words to read; (out) number of words actually read
1808 * @data: words read from the Shadow RAM
1809 *
1810 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
1811 - * method. The buffer read is preceded by the NVM ownership take
1812 - * and followed by the release.
1813 + * method.
1814 **/
1815 -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
1816 - u16 *words, u16 *data)
1817 +static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
1818 + u16 offset, u16 *words,
1819 + u16 *data)
1820 {
1821 - enum i40e_status_code ret_code = 0;
1822 + i40e_status ret_code = 0;
1823
1824 - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
1825 - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1826 - if (!ret_code) {
1827 - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
1828 - data);
1829 - i40e_release_nvm(hw);
1830 - }
1831 - } else {
1832 + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
1833 + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
1834 + else
1835 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
1836 - }
1837 return ret_code;
1838 }
1839
1840 @@ -499,15 +515,15 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
1841 data = (u16 *)vmem.va;
1842
1843 /* read pointer to VPD area */
1844 - ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
1845 + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
1846 if (ret_code) {
1847 ret_code = I40E_ERR_NVM_CHECKSUM;
1848 goto i40e_calc_nvm_checksum_exit;
1849 }
1850
1851 /* read pointer to PCIe Alt Auto-load module */
1852 - ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
1853 - &pcie_alt_module);
1854 + ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
1855 + &pcie_alt_module);
1856 if (ret_code) {
1857 ret_code = I40E_ERR_NVM_CHECKSUM;
1858 goto i40e_calc_nvm_checksum_exit;
1859 @@ -521,7 +537,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
1860 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
1861 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
1862
1863 - ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
1864 + ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
1865 if (ret_code) {
1866 ret_code = I40E_ERR_NVM_CHECKSUM;
1867 goto i40e_calc_nvm_checksum_exit;
1868 @@ -593,14 +609,19 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
1869 u16 checksum_sr = 0;
1870 u16 checksum_local = 0;
1871
1872 + /* We must acquire the NVM lock in order to correctly synchronize the
1873 + * NVM accesses across multiple PFs. Without doing so it is possible
1874 + * for one of the PFs to read invalid data potentially indicating that
1875 + * the checksum is invalid.
1876 + */
1877 + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1878 + if (ret_code)
1879 + return ret_code;
1880 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
1881 + __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
1882 + i40e_release_nvm(hw);
1883 if (ret_code)
1884 - goto i40e_validate_nvm_checksum_exit;
1885 -
1886 - /* Do not use i40e_read_nvm_word() because we do not want to take
1887 - * the synchronization semaphores twice here.
1888 - */
1889 - i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
1890 + return ret_code;
1891
1892 /* Verify read checksum from EEPROM is the same as
1893 * calculated checksum
1894 @@ -612,7 +633,6 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
1895 if (checksum)
1896 *checksum = checksum_local;
1897
1898 -i40e_validate_nvm_checksum_exit:
1899 return ret_code;
1900 }
1901
1902 @@ -986,6 +1006,7 @@ retry:
1903 break;
1904
1905 case I40E_NVMUPD_CSUM_CON:
1906 + /* Assumes the caller has acquired the nvm */
1907 status = i40e_update_nvm_checksum(hw);
1908 if (status) {
1909 *perrno = hw->aq.asq_last_status ?
1910 @@ -1000,6 +1021,7 @@ retry:
1911 break;
1912
1913 case I40E_NVMUPD_CSUM_LCB:
1914 + /* Assumes the caller has acquired the nvm */
1915 status = i40e_update_nvm_checksum(hw);
1916 if (status) {
1917 *perrno = hw->aq.asq_last_status ?
1918 diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
1919 index 4660c5abc855..6b364118badd 100644
1920 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
1921 +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
1922 @@ -311,8 +311,6 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
1923 void i40e_release_nvm(struct i40e_hw *hw);
1924 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
1925 u16 *data);
1926 -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
1927 - u16 *words, u16 *data);
1928 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
1929 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
1930 u16 *checksum);
1931 diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
1932 index 4a50870e0fa7..a61447fd778e 100644
1933 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c
1934 +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
1935 @@ -245,19 +245,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
1936 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
1937 E1000_STATUS_FUNC_SHIFT;
1938
1939 - /* Make sure the PHY is in a good state. Several people have reported
1940 - * firmware leaving the PHY's page select register set to something
1941 - * other than the default of zero, which causes the PHY ID read to
1942 - * access something other than the intended register.
1943 - */
1944 - ret_val = hw->phy.ops.reset(hw);
1945 - if (ret_val) {
1946 - hw_dbg("Error resetting the PHY.\n");
1947 - goto out;
1948 - }
1949 -
1950 /* Set phy->phy_addr and phy->id. */
1951 - igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
1952 ret_val = igb_get_phy_id_82575(hw);
1953 if (ret_val)
1954 return ret_val;
1955 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1956 index 9680c8805178..1d5263c46eee 100644
1957 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1958 +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1959 @@ -965,7 +965,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1960
1961 err = wait_func(dev, ent);
1962 if (err == -ETIMEDOUT)
1963 - goto out_free;
1964 + goto out;
1965
1966 ds = ent->ts2 - ent->ts1;
1967 op = MLX5_GET(mbox_in, in->first.data, opcode);
1968 @@ -1428,6 +1428,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1969 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1970 ent->idx);
1971 free_ent(cmd, ent->idx);
1972 + free_cmd(ent);
1973 }
1974 continue;
1975 }
1976 @@ -1486,7 +1487,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1977 free_msg(dev, ent->in);
1978
1979 err = err ? err : ent->status;
1980 - free_cmd(ent);
1981 + if (!forced)
1982 + free_cmd(ent);
1983 callback(err, context);
1984 } else {
1985 complete(&ent->done);
1986 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1987 index 23ccec4cb7f5..a1f3556307c7 100644
1988 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1989 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
1990 @@ -197,9 +197,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
1991 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
1992 MLX5E_AM_STATS_WORSE;
1993
1994 + if (!prev->ppms)
1995 + return curr->ppms ? MLX5E_AM_STATS_BETTER :
1996 + MLX5E_AM_STATS_SAME;
1997 +
1998 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
1999 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
2000 MLX5E_AM_STATS_WORSE;
2001 + if (!prev->epms)
2002 + return MLX5E_AM_STATS_SAME;
2003
2004 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
2005 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
2006 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
2007 index 448e71e07668..264f51b3409d 100644
2008 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
2009 +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
2010 @@ -369,10 +369,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
2011 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
2012 {
2013 struct mlx5_core_health *health = &dev->priv.health;
2014 + unsigned long flags;
2015
2016 - spin_lock(&health->wq_lock);
2017 + spin_lock_irqsave(&health->wq_lock, flags);
2018 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
2019 - spin_unlock(&health->wq_lock);
2020 + spin_unlock_irqrestore(&health->wq_lock, flags);
2021 cancel_delayed_work_sync(&dev->priv.health.recover_work);
2022 }
2023
2024 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2025 index 6698a3a07406..d676088512cf 100644
2026 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2027 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2028 @@ -957,7 +957,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
2029 if (err) {
2030 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
2031 FW_PRE_INIT_TIMEOUT_MILI);
2032 - goto out;
2033 + goto out_err;
2034 }
2035
2036 err = mlx5_cmd_init(dev);
2037 diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
2038 index 6e4fae9b1430..944749cfe092 100644
2039 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
2040 +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
2041 @@ -34,6 +34,7 @@ struct qed_ptt {
2042 struct list_head list_entry;
2043 unsigned int idx;
2044 struct pxp_ptt_entry pxp;
2045 + u8 hwfn_id;
2046 };
2047
2048 struct qed_ptt_pool {
2049 @@ -55,6 +56,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
2050 p_pool->ptts[i].idx = i;
2051 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
2052 p_pool->ptts[i].pxp.pretend.control = 0;
2053 + p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
2054 if (i >= RESERVED_PTT_MAX)
2055 list_add(&p_pool->ptts[i].list_entry,
2056 &p_pool->free_list);
2057 @@ -169,6 +171,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
2058
2059 offset = hw_addr - win_hw_addr;
2060
2061 + if (p_ptt->hwfn_id != p_hwfn->my_id)
2062 + DP_NOTICE(p_hwfn,
2063 + "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
2064 + p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
2065 +
2066 /* Verify the address is within the window */
2067 if (hw_addr < win_hw_addr ||
2068 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
2069 diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2070 index d9dcb0d1714c..07783d13df71 100644
2071 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
2072 +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2073 @@ -1059,23 +1059,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
2074
2075 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
2076 {
2077 - enum roce_flavor flavor;
2078 -
2079 switch (roce_mode) {
2080 case ROCE_V1:
2081 - flavor = PLAIN_ROCE;
2082 - break;
2083 + return PLAIN_ROCE;
2084 case ROCE_V2_IPV4:
2085 - flavor = RROCE_IPV4;
2086 - break;
2087 + return RROCE_IPV4;
2088 case ROCE_V2_IPV6:
2089 - flavor = ROCE_V2_IPV6;
2090 - break;
2091 + return RROCE_IPV6;
2092 default:
2093 - flavor = MAX_ROCE_MODE;
2094 - break;
2095 + return MAX_ROCE_FLAVOR;
2096 }
2097 - return flavor;
2098 }
2099
2100 static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
2101 diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2102 index faf8215872de..9cc02b94328a 100644
2103 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2104 +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2105 @@ -295,7 +295,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2106 }
2107
2108 if (!p_iov->b_pre_fp_hsi &&
2109 - ETH_HSI_VER_MINOR &&
2110 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
2111 DP_INFO(p_hwfn,
2112 "PF is using older fastpath HSI; %02x.%02x is configured\n",
2113 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2114 index 5ddadcd0c8db..f1242ab32ca6 100644
2115 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2116 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
2117 @@ -1825,22 +1825,44 @@ struct qlcnic_hardware_ops {
2118 u32 (*get_cap_size)(void *, int);
2119 void (*set_sys_info)(void *, int, u32);
2120 void (*store_cap_mask)(void *, u32);
2121 + bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
2122 + bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
2123 };
2124
2125 extern struct qlcnic_nic_template qlcnic_vf_ops;
2126
2127 -static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
2128 +static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
2129 {
2130 return adapter->ahw->extra_capability[0] &
2131 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
2132 }
2133
2134 -static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
2135 +static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
2136 {
2137 return adapter->ahw->extra_capability[0] &
2138 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
2139 }
2140
2141 +static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
2142 +{
2143 + return false;
2144 +}
2145 +
2146 +static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
2147 +{
2148 + return false;
2149 +}
2150 +
2151 +static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
2152 +{
2153 + return adapter->ahw->hw_ops->encap_rx_offload(adapter);
2154 +}
2155 +
2156 +static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
2157 +{
2158 + return adapter->ahw->hw_ops->encap_tx_offload(adapter);
2159 +}
2160 +
2161 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
2162 {
2163 return adapter->nic_ops->start_firmware(adapter);
2164 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2165 index 05d32e86bcf7..35c5ac41c0a1 100644
2166 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2167 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
2168 @@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
2169 .get_cap_size = qlcnic_83xx_get_cap_size,
2170 .set_sys_info = qlcnic_83xx_set_sys_info,
2171 .store_cap_mask = qlcnic_83xx_store_cap_mask,
2172 + .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
2173 + .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
2174 };
2175
2176 static struct qlcnic_nic_template qlcnic_83xx_ops = {
2177 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2178 index 3ae3968b0edf..ebf5ead16939 100644
2179 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2180 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2181 @@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
2182 .get_cap_size = qlcnic_82xx_get_cap_size,
2183 .set_sys_info = qlcnic_82xx_set_sys_info,
2184 .store_cap_mask = qlcnic_82xx_store_cap_mask,
2185 + .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
2186 + .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
2187 };
2188
2189 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
2190 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2191 index 2f656f395f39..c58180f40844 100644
2192 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2193 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2194 @@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
2195 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
2196 .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
2197 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
2198 + .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
2199 + .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
2200 };
2201
2202 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
2203 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2204 index 20f5c0cabc89..24754d3fb0ac 100644
2205 --- a/drivers/net/ethernet/realtek/r8169.c
2206 +++ b/drivers/net/ethernet/realtek/r8169.c
2207 @@ -7559,17 +7559,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
2208 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
2209 struct net_device *dev = tp->dev;
2210 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
2211 - int work_done= 0;
2212 + int work_done;
2213 u16 status;
2214
2215 status = rtl_get_events(tp);
2216 rtl_ack_events(tp, status & ~tp->event_slow);
2217
2218 - if (status & RTL_EVENT_NAPI_RX)
2219 - work_done = rtl_rx(dev, tp, (u32) budget);
2220 + work_done = rtl_rx(dev, tp, (u32) budget);
2221
2222 - if (status & RTL_EVENT_NAPI_TX)
2223 - rtl_tx(dev, tp);
2224 + rtl_tx(dev, tp);
2225
2226 if (status & tp->event_slow) {
2227 enable_mask &= ~tp->event_slow;
2228 diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
2229 index 489ef146201e..6a9c954492f2 100644
2230 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
2231 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
2232 @@ -37,6 +37,7 @@
2233 #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
2234 #define TSE_PCS_CONTROL_REG 0x00
2235 #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
2236 +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
2237 #define TSE_PCS_IF_MODE_REG 0x28
2238 #define TSE_PCS_LINK_TIMER_0_REG 0x24
2239 #define TSE_PCS_LINK_TIMER_1_REG 0x26
2240 @@ -65,6 +66,7 @@
2241 #define TSE_PCS_SW_RESET_TIMEOUT 100
2242 #define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
2243 #define TSE_PCS_USE_SGMII_ENA BIT(0)
2244 +#define TSE_PCS_IF_USE_SGMII 0x03
2245
2246 #define SGMII_ADAPTER_CTRL_REG 0x00
2247 #define SGMII_ADAPTER_DISABLE 0x0001
2248 @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
2249 {
2250 int ret = 0;
2251
2252 - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
2253 + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
2254 +
2255 + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
2256
2257 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
2258 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
2259 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2260 index 0df71865fab1..65ed02bc3ea3 100644
2261 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2262 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2263 @@ -2199,7 +2199,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2264 unsigned int nopaged_len = skb_headlen(skb);
2265 int i, csum_insertion = 0, is_jumbo = 0;
2266 int nfrags = skb_shinfo(skb)->nr_frags;
2267 - unsigned int entry, first_entry;
2268 + int entry;
2269 + unsigned int first_entry;
2270 struct dma_desc *desc, *first;
2271 unsigned int enh_desc;
2272 unsigned int des;
2273 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
2274 index ec295851812b..2abeba41c0af 100644
2275 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
2276 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
2277 @@ -216,7 +216,7 @@ static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
2278 */
2279 int stmmac_mdio_reset(struct mii_bus *bus)
2280 {
2281 -#if defined(CONFIG_STMMAC_PLATFORM)
2282 +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
2283 struct net_device *ndev = bus->priv;
2284 struct stmmac_priv *priv = netdev_priv(ndev);
2285 unsigned int mii_address = priv->hw->mii.addr;
2286 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
2287 index 365a48cfcbbf..653f0b185a68 100644
2288 --- a/drivers/net/macsec.c
2289 +++ b/drivers/net/macsec.c
2290 @@ -744,6 +744,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
2291 sg_init_table(sg, ret);
2292 ret = skb_to_sgvec(skb, sg, 0, skb->len);
2293 if (unlikely(ret < 0)) {
2294 + aead_request_free(req);
2295 macsec_txsa_put(tx_sa);
2296 kfree_skb(skb);
2297 return ERR_PTR(ret);
2298 @@ -956,6 +957,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
2299 sg_init_table(sg, ret);
2300 ret = skb_to_sgvec(skb, sg, 0, skb->len);
2301 if (unlikely(ret < 0)) {
2302 + aead_request_free(req);
2303 kfree_skb(skb);
2304 return ERR_PTR(ret);
2305 }
2306 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
2307 index c60c147708c4..520352327104 100644
2308 --- a/drivers/net/phy/marvell.c
2309 +++ b/drivers/net/phy/marvell.c
2310 @@ -1610,7 +1610,7 @@ static struct phy_driver marvell_drivers[] = {
2311 .flags = PHY_HAS_INTERRUPT,
2312 .probe = marvell_probe,
2313 .config_init = &m88e1145_config_init,
2314 - .config_aneg = &marvell_config_aneg,
2315 + .config_aneg = &m88e1101_config_aneg,
2316 .read_status = &genphy_read_status,
2317 .ack_interrupt = &marvell_ack_interrupt,
2318 .config_intr = &marvell_config_intr,
2319 diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
2320 index 125cff57c759..3dbb0646b024 100644
2321 --- a/drivers/net/usb/asix_common.c
2322 +++ b/drivers/net/usb/asix_common.c
2323 @@ -575,6 +575,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
2324 struct usbnet *dev = netdev_priv(net);
2325 u8 opt = 0;
2326
2327 + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
2328 + return -EINVAL;
2329 +
2330 if (wolinfo->wolopts & WAKE_PHY)
2331 opt |= AX_MONITOR_LINK;
2332 if (wolinfo->wolopts & WAKE_MAGIC)
2333 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2334 index 8a6675d92b98..559af8e6ad90 100644
2335 --- a/drivers/net/usb/ax88179_178a.c
2336 +++ b/drivers/net/usb/ax88179_178a.c
2337 @@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
2338 struct usbnet *dev = netdev_priv(net);
2339 u8 opt = 0;
2340
2341 + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
2342 + return -EINVAL;
2343 +
2344 if (wolinfo->wolopts & WAKE_PHY)
2345 opt |= AX_MONITOR_MODE_RWLC;
2346 if (wolinfo->wolopts & WAKE_MAGIC)
2347 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2348 index 3086cae62fdc..7b158674ceed 100644
2349 --- a/drivers/net/usb/cdc_ncm.c
2350 +++ b/drivers/net/usb/cdc_ncm.c
2351 @@ -772,7 +772,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
2352 int err;
2353 u8 iface_no;
2354 struct usb_cdc_parsed_header hdr;
2355 - u16 curr_ntb_format;
2356 + __le16 curr_ntb_format;
2357
2358 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2359 if (!ctx)
2360 @@ -890,7 +890,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
2361 goto error2;
2362 }
2363
2364 - if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
2365 + if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
2366 dev_info(&intf->dev, "resetting NTB format to 16-bit");
2367 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
2368 USB_TYPE_CLASS | USB_DIR_OUT
2369 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2370 index c5e04d1ad73a..0cbcd3f77341 100644
2371 --- a/drivers/net/usb/lan78xx.c
2372 +++ b/drivers/net/usb/lan78xx.c
2373 @@ -1311,19 +1311,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
2374 if (ret < 0)
2375 return ret;
2376
2377 - pdata->wol = 0;
2378 - if (wol->wolopts & WAKE_UCAST)
2379 - pdata->wol |= WAKE_UCAST;
2380 - if (wol->wolopts & WAKE_MCAST)
2381 - pdata->wol |= WAKE_MCAST;
2382 - if (wol->wolopts & WAKE_BCAST)
2383 - pdata->wol |= WAKE_BCAST;
2384 - if (wol->wolopts & WAKE_MAGIC)
2385 - pdata->wol |= WAKE_MAGIC;
2386 - if (wol->wolopts & WAKE_PHY)
2387 - pdata->wol |= WAKE_PHY;
2388 - if (wol->wolopts & WAKE_ARP)
2389 - pdata->wol |= WAKE_ARP;
2390 + if (wol->wolopts & ~WAKE_ALL)
2391 + return -EINVAL;
2392 +
2393 + pdata->wol = wol->wolopts;
2394
2395 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
2396
2397 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
2398 index 5988674818ed..02e29562d254 100644
2399 --- a/drivers/net/usb/r8152.c
2400 +++ b/drivers/net/usb/r8152.c
2401 @@ -3776,6 +3776,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2402 if (!rtl_can_wakeup(tp))
2403 return -EOPNOTSUPP;
2404
2405 + if (wol->wolopts & ~WAKE_ANY)
2406 + return -EINVAL;
2407 +
2408 ret = usb_autopm_get_interface(tp->intf);
2409 if (ret < 0)
2410 goto out_set_wol;
2411 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2412 index 8d3f938c6a51..977d9c772554 100644
2413 --- a/drivers/net/usb/smsc75xx.c
2414 +++ b/drivers/net/usb/smsc75xx.c
2415 @@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
2416 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
2417 int ret;
2418
2419 + if (wolinfo->wolopts & ~SUPPORTED_WAKE)
2420 + return -EINVAL;
2421 +
2422 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
2423
2424 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
2425 diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
2426 index 831aa33d078a..a167116ceeee 100644
2427 --- a/drivers/net/usb/smsc95xx.c
2428 +++ b/drivers/net/usb/smsc95xx.c
2429 @@ -775,6 +775,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
2430 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
2431 int ret;
2432
2433 + if (wolinfo->wolopts & ~SUPPORTED_WAKE)
2434 + return -EINVAL;
2435 +
2436 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
2437
2438 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
2439 diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
2440 index a50df0d8fb9a..004c955c1fd1 100644
2441 --- a/drivers/net/usb/sr9800.c
2442 +++ b/drivers/net/usb/sr9800.c
2443 @@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
2444 struct usbnet *dev = netdev_priv(net);
2445 u8 opt = 0;
2446
2447 + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
2448 + return -EINVAL;
2449 +
2450 if (wolinfo->wolopts & WAKE_PHY)
2451 opt |= SR_MONITOR_LINK;
2452 if (wolinfo->wolopts & WAKE_MAGIC)
2453 diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
2454 index 45226dbee5ce..da770af83036 100644
2455 --- a/drivers/net/wireless/ath/ath10k/ahb.c
2456 +++ b/drivers/net/wireless/ath/ath10k/ahb.c
2457 @@ -640,6 +640,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
2458 {
2459 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
2460
2461 + napi_enable(&ar->napi);
2462 ath10k_ce_enable_interrupts(ar);
2463 ath10k_pci_enable_legacy_irq(ar);
2464
2465 @@ -692,7 +693,6 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
2466 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2467 goto err_ce_deinit;
2468 }
2469 - napi_enable(&ar->napi);
2470
2471 return 0;
2472
2473 diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
2474 index 65ad7a130ca1..1e41d6c6de36 100644
2475 --- a/drivers/net/wireless/ath/ath10k/core.c
2476 +++ b/drivers/net/wireless/ath/ath10k/core.c
2477 @@ -698,7 +698,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
2478
2479 if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
2480 (board_id == 0)) {
2481 - ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
2482 + ath10k_dbg(ar, ATH10K_DBG_BOOT,
2483 + "board id does not exist in otp, ignore it\n");
2484 return -EOPNOTSUPP;
2485 }
2486
2487 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2488 index 4bb36dc73433..cbb3e902e347 100644
2489 --- a/drivers/net/wireless/mac80211_hwsim.c
2490 +++ b/drivers/net/wireless/mac80211_hwsim.c
2491 @@ -2665,8 +2665,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2492 list_add_tail(&data->list, &hwsim_radios);
2493 spin_unlock_bh(&hwsim_radio_lock);
2494
2495 - if (idx > 0)
2496 - hwsim_mcast_new_radio(idx, info, param);
2497 + hwsim_mcast_new_radio(idx, info, param);
2498
2499 return idx;
2500
2501 diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
2502 index a0ae8d8763bb..06a57c708992 100644
2503 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c
2504 +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
2505 @@ -1368,6 +1368,10 @@ static int if_sdio_suspend(struct device *dev)
2506 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
2507 dev_info(dev, "Suspend without wake params -- powering down card\n");
2508 if (priv->fw_ready) {
2509 + ret = lbs_suspend(priv);
2510 + if (ret)
2511 + return ret;
2512 +
2513 priv->power_up_on_resume = true;
2514 if_sdio_power_off(card);
2515 }
2516 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2517 index 3c1adb38412b..aceae791baf3 100644
2518 --- a/drivers/net/xen-netfront.c
2519 +++ b/drivers/net/xen-netfront.c
2520 @@ -1848,7 +1848,7 @@ static int talk_to_netback(struct xenbus_device *dev,
2521 err = xen_net_read_mac(dev, info->netdev->dev_addr);
2522 if (err) {
2523 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2524 - goto out;
2525 + goto out_unlocked;
2526 }
2527
2528 rtnl_lock();
2529 @@ -1963,6 +1963,7 @@ abort_transaction_no_dev_fatal:
2530 xennet_destroy_queues(info);
2531 out:
2532 rtnl_unlock();
2533 +out_unlocked:
2534 device_unregister(&dev->dev);
2535 return err;
2536 }
2537 @@ -1994,10 +1995,6 @@ static int xennet_connect(struct net_device *dev)
2538 /* talk_to_netback() sets the correct number of queues */
2539 num_queues = dev->real_num_tx_queues;
2540
2541 - rtnl_lock();
2542 - netdev_update_features(dev);
2543 - rtnl_unlock();
2544 -
2545 if (dev->reg_state == NETREG_UNINITIALIZED) {
2546 err = register_netdev(dev);
2547 if (err) {
2548 @@ -2007,6 +2004,10 @@ static int xennet_connect(struct net_device *dev)
2549 }
2550 }
2551
2552 + rtnl_lock();
2553 + netdev_update_features(dev);
2554 + rtnl_unlock();
2555 +
2556 /*
2557 * All public and private state should now be sane. Get
2558 * ready to start sending and receiving packets and give the driver
2559 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2560 index fadf151ce830..1ac4cec5f4f7 100644
2561 --- a/drivers/nvme/host/pci.c
2562 +++ b/drivers/nvme/host/pci.c
2563 @@ -1393,11 +1393,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
2564 if (dev->cmb) {
2565 iounmap(dev->cmb);
2566 dev->cmb = NULL;
2567 - if (dev->cmbsz) {
2568 - sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
2569 - &dev_attr_cmb.attr, NULL);
2570 - dev->cmbsz = 0;
2571 - }
2572 + sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
2573 + &dev_attr_cmb.attr, NULL);
2574 + dev->cmbsz = 0;
2575 }
2576 }
2577
2578 @@ -1632,16 +1630,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
2579
2580 /*
2581 * CMBs can currently only exist on >=1.2 PCIe devices. We only
2582 - * populate sysfs if a CMB is implemented. Note that we add the
2583 - * CMB attribute to the nvme_ctrl kobj which removes the need to remove
2584 - * it on exit. Since nvme_dev_attrs_group has no name we can pass
2585 - * NULL as final argument to sysfs_add_file_to_group.
2586 + * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
2587 + * has no name we can pass NULL as final argument to
2588 + * sysfs_add_file_to_group.
2589 */
2590
2591 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
2592 dev->cmb = nvme_map_cmb(dev);
2593 -
2594 - if (dev->cmbsz) {
2595 + if (dev->cmb) {
2596 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
2597 &dev_attr_cmb.attr, NULL))
2598 dev_warn(dev->dev,
2599 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
2600 index 2caed285fd7b..cdb7752dcbb7 100644
2601 --- a/drivers/nvme/target/admin-cmd.c
2602 +++ b/drivers/nvme/target/admin-cmd.c
2603 @@ -192,6 +192,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
2604 id->vid = 0;
2605 id->ssvid = 0;
2606
2607 + memset(id->sn, ' ', sizeof(id->sn));
2608 bin2hex(id->sn, &ctrl->subsys->serial,
2609 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
2610 copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
2611 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2612 index c7a695c2303a..2250f0d33481 100644
2613 --- a/drivers/pci/quirks.c
2614 +++ b/drivers/pci/quirks.c
2615 @@ -1634,8 +1634,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev)
2616 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
2617 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
2618 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
2619 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch);
2620
2621 +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
2622
2623 /*
2624 * It's possible for the MSI to get corrupted if shpc and acpi
2625 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
2626 index c29b9b611ab2..1515c9480f89 100644
2627 --- a/drivers/platform/x86/acer-wmi.c
2628 +++ b/drivers/platform/x86/acer-wmi.c
2629 @@ -1856,7 +1856,7 @@ static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
2630 if (!strcmp(ctx, "SENR")) {
2631 if (acpi_bus_get_device(ah, &dev))
2632 return AE_OK;
2633 - if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
2634 + if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
2635 return AE_OK;
2636 } else
2637 return AE_OK;
2638 @@ -1877,8 +1877,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop,
2639 handle = NULL;
2640 status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
2641 (void *)name, &handle);
2642 -
2643 - if (ACPI_SUCCESS(status)) {
2644 + if (ACPI_SUCCESS(status) && handle) {
2645 *ah = handle;
2646 return 0;
2647 } else {
2648 @@ -2247,8 +2246,8 @@ static int __init acer_wmi_init(void)
2649 if (err)
2650 return err;
2651 err = acer_wmi_accel_setup();
2652 - if (err)
2653 - return err;
2654 + if (err && err != -ENODEV)
2655 + pr_warn("Cannot enable accelerometer\n");
2656 }
2657
2658 err = platform_driver_register(&acer_platform_driver);
2659 diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
2660 index 51364621f77c..a421d6c551b6 100644
2661 --- a/drivers/ptp/ptp_chardev.c
2662 +++ b/drivers/ptp/ptp_chardev.c
2663 @@ -24,6 +24,8 @@
2664 #include <linux/slab.h>
2665 #include <linux/timekeeping.h>
2666
2667 +#include <linux/nospec.h>
2668 +
2669 #include "ptp_private.h"
2670
2671 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
2672 @@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
2673 err = -EINVAL;
2674 break;
2675 }
2676 + pin_index = array_index_nospec(pin_index, ops->n_pins);
2677 if (mutex_lock_interruptible(&ptp->pincfg_mux))
2678 return -ERESTARTSYS;
2679 pd = ops->pin_config[pin_index];
2680 @@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
2681 err = -EINVAL;
2682 break;
2683 }
2684 + pin_index = array_index_nospec(pin_index, ops->n_pins);
2685 if (mutex_lock_interruptible(&ptp->pincfg_mux))
2686 return -ERESTARTSYS;
2687 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
2688 diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
2689 index 35ce53edabf9..d5e5229308f2 100644
2690 --- a/drivers/reset/hisilicon/hi6220_reset.c
2691 +++ b/drivers/reset/hisilicon/hi6220_reset.c
2692 @@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
2693 }
2694
2695 postcore_initcall(hi6220_reset_init);
2696 +
2697 +MODULE_LICENSE("GPL v2");
2698 diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
2699 index 7b178d765726..c0592fda409e 100644
2700 --- a/drivers/scsi/aacraid/src.c
2701 +++ b/drivers/scsi/aacraid/src.c
2702 @@ -445,7 +445,7 @@ err_out:
2703 return -1;
2704
2705 err_blink:
2706 - return (status > 16) & 0xFF;
2707 + return (status >> 16) & 0xFF;
2708 }
2709
2710 /**
2711 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
2712 index 5f66b6da65f2..b6d9e3104b89 100644
2713 --- a/drivers/scsi/qla2xxx/qla_init.c
2714 +++ b/drivers/scsi/qla2xxx/qla_init.c
2715 @@ -368,8 +368,8 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
2716 srb_t *sp = (srb_t *)ptr;
2717 struct srb_iocb *abt = &sp->u.iocb_cmd;
2718
2719 - del_timer(&sp->u.iocb_cmd.timer);
2720 - complete(&abt->u.abt.comp);
2721 + if (del_timer(&sp->u.iocb_cmd.timer))
2722 + complete(&abt->u.abt.comp);
2723 }
2724
2725 static int
2726 diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
2727 index 2caacd9d2526..2cc82ed6433a 100644
2728 --- a/drivers/soc/fsl/qbman/qman.c
2729 +++ b/drivers/soc/fsl/qbman/qman.c
2730 @@ -2713,6 +2713,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2731 {
2732 unsigned long addr;
2733
2734 + if (!p)
2735 + return -ENODEV;
2736 +
2737 addr = gen_pool_alloc(p, cnt);
2738 if (!addr)
2739 return -ENOMEM;
2740 diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
2741 index c646d8713861..681f7d4b7724 100644
2742 --- a/drivers/soc/fsl/qe/ucc.c
2743 +++ b/drivers/soc/fsl/qe/ucc.c
2744 @@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
2745 {
2746 u32 shift;
2747
2748 - shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
2749 + shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
2750 shift -= tdm_num * 2;
2751
2752 return shift;
2753 diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
2754 index 07d6e4824a9d..2e5e3b368532 100644
2755 --- a/drivers/staging/wilc1000/linux_wlan.c
2756 +++ b/drivers/staging/wilc1000/linux_wlan.c
2757 @@ -1260,11 +1260,12 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
2758 else
2759 strcpy(ndev->name, "p2p%d");
2760
2761 - vif->idx = wl->vif_num;
2762 vif->wilc = *wilc;
2763 vif->ndev = ndev;
2764 wl->vif[i] = vif;
2765 wl->vif_num = i;
2766 + vif->idx = wl->vif_num;
2767 +
2768 ndev->netdev_ops = &wilc_netdev_ops;
2769
2770 {
2771 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2772 index 25ae9b9895a9..dbe44e890c99 100644
2773 --- a/drivers/usb/class/cdc-acm.c
2774 +++ b/drivers/usb/class/cdc-acm.c
2775 @@ -333,17 +333,17 @@ static void acm_ctrl_irq(struct urb *urb)
2776
2777 if (difference & ACM_CTRL_DSR)
2778 acm->iocount.dsr++;
2779 - if (difference & ACM_CTRL_BRK)
2780 - acm->iocount.brk++;
2781 - if (difference & ACM_CTRL_RI)
2782 - acm->iocount.rng++;
2783 if (difference & ACM_CTRL_DCD)
2784 acm->iocount.dcd++;
2785 - if (difference & ACM_CTRL_FRAMING)
2786 + if (newctrl & ACM_CTRL_BRK)
2787 + acm->iocount.brk++;
2788 + if (newctrl & ACM_CTRL_RI)
2789 + acm->iocount.rng++;
2790 + if (newctrl & ACM_CTRL_FRAMING)
2791 acm->iocount.frame++;
2792 - if (difference & ACM_CTRL_PARITY)
2793 + if (newctrl & ACM_CTRL_PARITY)
2794 acm->iocount.parity++;
2795 - if (difference & ACM_CTRL_OVERRUN)
2796 + if (newctrl & ACM_CTRL_OVERRUN)
2797 acm->iocount.overrun++;
2798 spin_unlock(&acm->read_lock);
2799
2800 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2801 index 988240e3cb58..52fc2084b310 100644
2802 --- a/drivers/usb/core/devio.c
2803 +++ b/drivers/usb/core/devio.c
2804 @@ -1490,8 +1490,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2805 u = 0;
2806 switch (uurb->type) {
2807 case USBDEVFS_URB_TYPE_CONTROL:
2808 - if (is_in)
2809 - allow_short = true;
2810 if (!usb_endpoint_xfer_control(&ep->desc))
2811 return -EINVAL;
2812 /* min 8 byte setup packet */
2813 @@ -1521,6 +1519,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
2814 is_in = 0;
2815 uurb->endpoint &= ~USB_DIR_IN;
2816 }
2817 + if (is_in)
2818 + allow_short = true;
2819 snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
2820 "bRequest=%02x wValue=%04x "
2821 "wIndex=%04x wLength=%04x\n",
2822 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
2823 index f221cb479e14..8e69150776f5 100644
2824 --- a/drivers/usb/dwc3/dwc3-omap.c
2825 +++ b/drivers/usb/dwc3/dwc3-omap.c
2826 @@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
2827
2828 /* check the DMA Status */
2829 reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
2830 - irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
2831 - ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
2832 - dwc3_omap_interrupt_thread, IRQF_SHARED,
2833 - "dwc3-omap", omap);
2834 - if (ret) {
2835 - dev_err(dev, "failed to request IRQ #%d --> %d\n",
2836 - omap->irq, ret);
2837 - goto err1;
2838 - }
2839
2840 ret = dwc3_omap_extcon_register(omap);
2841 if (ret < 0)
2842 @@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev)
2843 goto err2;
2844 }
2845
2846 + ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
2847 + dwc3_omap_interrupt_thread, IRQF_SHARED,
2848 + "dwc3-omap", omap);
2849 + if (ret) {
2850 + dev_err(dev, "failed to request IRQ #%d --> %d\n",
2851 + omap->irq, ret);
2852 + goto err1;
2853 + }
2854 dwc3_omap_enable_irqs(omap);
2855 - enable_irq(omap->irq);
2856 return 0;
2857
2858 err2:
2859 diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
2860 index d2fc237cd87a..fac9424940d4 100644
2861 --- a/drivers/usb/gadget/function/f_mass_storage.c
2862 +++ b/drivers/usb/gadget/function/f_mass_storage.c
2863 @@ -220,6 +220,8 @@
2864 #include <linux/usb/gadget.h>
2865 #include <linux/usb/composite.h>
2866
2867 +#include <linux/nospec.h>
2868 +
2869 #include "configfs.h"
2870
2871
2872 @@ -3260,6 +3262,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
2873 fsg_opts = to_fsg_opts(&group->cg_item);
2874 if (num >= FSG_MAX_LUNS)
2875 return ERR_PTR(-ERANGE);
2876 + num = array_index_nospec(num, FSG_MAX_LUNS);
2877
2878 mutex_lock(&fsg_opts->lock);
2879 if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
2880 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2881 index 45a03eff4db1..0f09ab5399f4 100644
2882 --- a/drivers/usb/host/xhci-hub.c
2883 +++ b/drivers/usb/host/xhci-hub.c
2884 @@ -366,7 +366,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
2885
2886 slot_id = 0;
2887 for (i = 0; i < MAX_HC_SLOTS; i++) {
2888 - if (!xhci->devs[i])
2889 + if (!xhci->devs[i] || !xhci->devs[i]->udev)
2890 continue;
2891 speed = xhci->devs[i]->udev->speed;
2892 if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
2893 diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
2894 index 93fba9033b00..5984fb134cf4 100644
2895 --- a/drivers/usb/renesas_usbhs/mod_gadget.c
2896 +++ b/drivers/usb/renesas_usbhs/mod_gadget.c
2897 @@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
2898 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
2899 struct usbhs_pipe *pipe;
2900 unsigned long flags;
2901 - int ret = 0;
2902
2903 spin_lock_irqsave(&uep->lock, flags);
2904 pipe = usbhsg_uep_to_pipe(uep);
2905 - if (!pipe) {
2906 - ret = -EINVAL;
2907 + if (!pipe)
2908 goto out;
2909 - }
2910
2911 usbhsg_pipe_disable(uep);
2912 usbhs_pipe_free(pipe);
2913 @@ -1085,7 +1082,6 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
2914 ret = -ENOMEM;
2915 goto usbhs_mod_gadget_probe_err_gpriv;
2916 }
2917 - spin_lock_init(&uep->lock);
2918
2919 gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
2920 dev_info(dev, "%stransceiver found\n",
2921 @@ -1135,6 +1131,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
2922 uep->ep.name = uep->ep_name;
2923 uep->ep.ops = &usbhsg_ep_ops;
2924 INIT_LIST_HEAD(&uep->ep.ep_list);
2925 + spin_lock_init(&uep->lock);
2926
2927 /* init DCP */
2928 if (usbhsg_is_dcp(uep)) {
2929 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2930 index c569b6454a9d..4c5625cb540c 100644
2931 --- a/drivers/vhost/vhost.c
2932 +++ b/drivers/vhost/vhost.c
2933 @@ -28,6 +28,7 @@
2934 #include <linux/module.h>
2935 #include <linux/sort.h>
2936 #include <linux/interval_tree_generic.h>
2937 +#include <linux/nospec.h>
2938
2939 #include "vhost.h"
2940
2941 @@ -1289,6 +1290,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
2942 if (idx >= d->nvqs)
2943 return -ENOBUFS;
2944
2945 + idx = array_index_nospec(idx, d->nvqs);
2946 vq = d->vqs[idx];
2947
2948 mutex_lock(&vq->mutex);
2949 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2950 index 6f2e729a308f..f4b6d063a4b7 100644
2951 --- a/drivers/video/fbdev/efifb.c
2952 +++ b/drivers/video/fbdev/efifb.c
2953 @@ -375,7 +375,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
2954 if (!base)
2955 return;
2956
2957 - for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
2958 + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2959 struct resource *res = &dev->resource[i];
2960
2961 if (!(res->flags & IORESOURCE_MEM))
2962 diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
2963 index def3a501acd6..d059d04c63ac 100644
2964 --- a/drivers/video/fbdev/pxa168fb.c
2965 +++ b/drivers/video/fbdev/pxa168fb.c
2966 @@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2967 /*
2968 * enable controller clock
2969 */
2970 - clk_enable(fbi->clk);
2971 + clk_prepare_enable(fbi->clk);
2972
2973 pxa168fb_set_par(info);
2974
2975 @@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
2976 failed_free_cmap:
2977 fb_dealloc_cmap(&info->cmap);
2978 failed_free_clk:
2979 - clk_disable(fbi->clk);
2980 + clk_disable_unprepare(fbi->clk);
2981 failed_free_fbmem:
2982 dma_free_coherent(fbi->dev, info->fix.smem_len,
2983 info->screen_base, fbi->fb_start_dma);
2984 @@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
2985 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
2986 info->screen_base, info->fix.smem_start);
2987
2988 - clk_disable(fbi->clk);
2989 + clk_disable_unprepare(fbi->clk);
2990
2991 framebuffer_release(info);
2992
2993 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2994 index 2b96ca68dc10..5feaef9bcbda 100644
2995 --- a/fs/btrfs/extent_io.c
2996 +++ b/fs/btrfs/extent_io.c
2997 @@ -4377,6 +4377,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
2998 return NULL;
2999 }
3000
3001 +/*
3002 + * To cache previous fiemap extent
3003 + *
3004 + * Will be used for merging fiemap extent
3005 + */
3006 +struct fiemap_cache {
3007 + u64 offset;
3008 + u64 phys;
3009 + u64 len;
3010 + u32 flags;
3011 + bool cached;
3012 +};
3013 +
3014 +/*
3015 + * Helper to submit fiemap extent.
3016 + *
3017 + * Will try to merge current fiemap extent specified by @offset, @phys,
3018 + * @len and @flags with cached one.
3019 + * And only when we fails to merge, cached one will be submitted as
3020 + * fiemap extent.
3021 + *
3022 + * Return value is the same as fiemap_fill_next_extent().
3023 + */
3024 +static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
3025 + struct fiemap_cache *cache,
3026 + u64 offset, u64 phys, u64 len, u32 flags)
3027 +{
3028 + int ret = 0;
3029 +
3030 + if (!cache->cached)
3031 + goto assign;
3032 +
3033 + /*
3034 + * Sanity check, extent_fiemap() should have ensured that new
3035 + * fiemap extent won't overlap with cahced one.
3036 + * Not recoverable.
3037 + *
3038 + * NOTE: Physical address can overlap, due to compression
3039 + */
3040 + if (cache->offset + cache->len > offset) {
3041 + WARN_ON(1);
3042 + return -EINVAL;
3043 + }
3044 +
3045 + /*
3046 + * Only merges fiemap extents if
3047 + * 1) Their logical addresses are continuous
3048 + *
3049 + * 2) Their physical addresses are continuous
3050 + * So truly compressed (physical size smaller than logical size)
3051 + * extents won't get merged with each other
3052 + *
3053 + * 3) Share same flags except FIEMAP_EXTENT_LAST
3054 + * So regular extent won't get merged with prealloc extent
3055 + */
3056 + if (cache->offset + cache->len == offset &&
3057 + cache->phys + cache->len == phys &&
3058 + (cache->flags & ~FIEMAP_EXTENT_LAST) ==
3059 + (flags & ~FIEMAP_EXTENT_LAST)) {
3060 + cache->len += len;
3061 + cache->flags |= flags;
3062 + goto try_submit_last;
3063 + }
3064 +
3065 + /* Not mergeable, need to submit cached one */
3066 + ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
3067 + cache->len, cache->flags);
3068 + cache->cached = false;
3069 + if (ret)
3070 + return ret;
3071 +assign:
3072 + cache->cached = true;
3073 + cache->offset = offset;
3074 + cache->phys = phys;
3075 + cache->len = len;
3076 + cache->flags = flags;
3077 +try_submit_last:
3078 + if (cache->flags & FIEMAP_EXTENT_LAST) {
3079 + ret = fiemap_fill_next_extent(fieinfo, cache->offset,
3080 + cache->phys, cache->len, cache->flags);
3081 + cache->cached = false;
3082 + }
3083 + return ret;
3084 +}
3085 +
3086 +/*
3087 + * Sanity check for fiemap cache
3088 + *
3089 + * All fiemap cache should be submitted by emit_fiemap_extent()
3090 + * Iteration should be terminated either by last fiemap extent or
3091 + * fieinfo->fi_extents_max.
3092 + * So no cached fiemap should exist.
3093 + */
3094 +static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
3095 + struct fiemap_extent_info *fieinfo,
3096 + struct fiemap_cache *cache)
3097 +{
3098 + int ret;
3099 +
3100 + if (!cache->cached)
3101 + return 0;
3102 +
3103 + /* Small and recoverbale problem, only to info developer */
3104 +#ifdef CONFIG_BTRFS_DEBUG
3105 + WARN_ON(1);
3106 +#endif
3107 + btrfs_warn(fs_info,
3108 + "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
3109 + cache->offset, cache->phys, cache->len, cache->flags);
3110 + ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
3111 + cache->len, cache->flags);
3112 + cache->cached = false;
3113 + if (ret > 0)
3114 + ret = 0;
3115 + return ret;
3116 +}
3117 +
3118 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3119 __u64 start, __u64 len, get_extent_t *get_extent)
3120 {
3121 @@ -4394,6 +4511,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3122 struct extent_state *cached_state = NULL;
3123 struct btrfs_path *path;
3124 struct btrfs_root *root = BTRFS_I(inode)->root;
3125 + struct fiemap_cache cache = { 0 };
3126 int end = 0;
3127 u64 em_start = 0;
3128 u64 em_len = 0;
3129 @@ -4573,8 +4691,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3130 flags |= FIEMAP_EXTENT_LAST;
3131 end = 1;
3132 }
3133 - ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3134 - em_len, flags);
3135 + ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
3136 + em_len, flags);
3137 if (ret) {
3138 if (ret == 1)
3139 ret = 0;
3140 @@ -4582,6 +4700,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3141 }
3142 }
3143 out_free:
3144 + if (!ret)
3145 + ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
3146 free_extent_map(em);
3147 out:
3148 btrfs_free_path(path);
3149 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3150 index bd036557c6bc..5ebdb58079e1 100644
3151 --- a/fs/btrfs/inode.c
3152 +++ b/fs/btrfs/inode.c
3153 @@ -2966,7 +2966,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3154
3155 ret = test_range_bit(io_tree, ordered_extent->file_offset,
3156 ordered_extent->file_offset + ordered_extent->len - 1,
3157 - EXTENT_DEFRAG, 1, cached_state);
3158 + EXTENT_DEFRAG, 0, cached_state);
3159 if (ret) {
3160 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
3161 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
3162 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3163 index c8d2eec6596b..79dc3ee1de58 100644
3164 --- a/fs/btrfs/send.c
3165 +++ b/fs/btrfs/send.c
3166 @@ -5165,15 +5165,12 @@ static int is_extent_unchanged(struct send_ctx *sctx,
3167 goto out;
3168 }
3169
3170 - right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3171 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
3172 right_len = btrfs_file_extent_inline_len(eb, slot, ei);
3173 right_len = PAGE_ALIGN(right_len);
3174 } else {
3175 right_len = btrfs_file_extent_num_bytes(eb, ei);
3176 }
3177 - right_offset = btrfs_file_extent_offset(eb, ei);
3178 - right_gen = btrfs_file_extent_generation(eb, ei);
3179
3180 /*
3181 * Are we at extent 8? If yes, we know the extent is changed.
3182 @@ -5198,6 +5195,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
3183 goto out;
3184 }
3185
3186 + right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3187 + right_offset = btrfs_file_extent_offset(eb, ei);
3188 + right_gen = btrfs_file_extent_generation(eb, ei);
3189 +
3190 left_offset_fixed = left_offset;
3191 if (key.offset < ekey->offset) {
3192 /* Fix the right offset for 2a and 7. */
3193 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
3194 index 2026885702a2..3fb95e3d20d6 100644
3195 --- a/fs/cachefiles/namei.c
3196 +++ b/fs/cachefiles/namei.c
3197 @@ -340,7 +340,7 @@ try_again:
3198 trap = lock_rename(cache->graveyard, dir);
3199
3200 /* do some checks before getting the grave dentry */
3201 - if (rep->d_parent != dir) {
3202 + if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
3203 /* the entry was probably culled when we dropped the parent dir
3204 * lock */
3205 unlock_rename(cache->graveyard, dir);
3206 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3207 index a012f70bba5c..77a18fe10805 100644
3208 --- a/fs/cifs/inode.c
3209 +++ b/fs/cifs/inode.c
3210 @@ -704,7 +704,7 @@ cgfi_exit:
3211 /* Simple function to return a 64 bit hash of string. Rarely called */
3212 static __u64 simple_hashstr(const char *str)
3213 {
3214 - const __u64 hash_mult = 1125899906842597L; /* a big enough prime */
3215 + const __u64 hash_mult = 1125899906842597ULL; /* a big enough prime */
3216 __u64 hash = 0;
3217
3218 while (*str)
3219 diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
3220 index 8add4e8bab99..af719d93507e 100644
3221 --- a/fs/f2fs/dir.c
3222 +++ b/fs/f2fs/dir.c
3223 @@ -212,13 +212,9 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
3224 f2fs_put_page(dentry_page, 0);
3225 }
3226
3227 - /* This is to increase the speed of f2fs_create */
3228 - if (!de && room) {
3229 - F2FS_I(dir)->task = current;
3230 - if (F2FS_I(dir)->chash != namehash) {
3231 - F2FS_I(dir)->chash = namehash;
3232 - F2FS_I(dir)->clevel = level;
3233 - }
3234 + if (!de && room && F2FS_I(dir)->chash != namehash) {
3235 + F2FS_I(dir)->chash = namehash;
3236 + F2FS_I(dir)->clevel = level;
3237 }
3238
3239 return de;
3240 @@ -259,6 +255,9 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3241 break;
3242 }
3243 out:
3244 + /* This is to increase the speed of f2fs_create */
3245 + if (!de)
3246 + F2FS_I(dir)->task = current;
3247 return de;
3248 }
3249
3250 diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
3251 index 3b7644e43796..a9cad9b60790 100644
3252 --- a/fs/fat/fatent.c
3253 +++ b/fs/fat/fatent.c
3254 @@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
3255 if (ops->ent_get(&fatent) == FAT_ENT_FREE)
3256 free++;
3257 } while (fat_ent_next(sbi, &fatent));
3258 + cond_resched();
3259 }
3260 sbi->free_clusters = free;
3261 sbi->free_clus_valid = 1;
3262 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
3263 index 785fcc29d85d..5729d55da67d 100644
3264 --- a/fs/ocfs2/dlmglue.c
3265 +++ b/fs/ocfs2/dlmglue.c
3266 @@ -2599,6 +2599,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
3267 struct ocfs2_lock_res *lockres;
3268
3269 lockres = &OCFS2_I(inode)->ip_inode_lockres;
3270 + /* had_lock means that the currect process already takes the cluster
3271 + * lock previously. If had_lock is 1, we have nothing to do here, and
3272 + * it will get unlocked where we got the lock.
3273 + */
3274 if (!had_lock) {
3275 ocfs2_remove_holder(lockres, oh);
3276 ocfs2_inode_unlock(inode, ex);
3277 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
3278 index 03f6ff249edb..01932763b4d1 100644
3279 --- a/fs/ocfs2/xattr.c
3280 +++ b/fs/ocfs2/xattr.c
3281 @@ -1330,20 +1330,21 @@ static int ocfs2_xattr_get(struct inode *inode,
3282 void *buffer,
3283 size_t buffer_size)
3284 {
3285 - int ret;
3286 + int ret, had_lock;
3287 struct buffer_head *di_bh = NULL;
3288 + struct ocfs2_lock_holder oh;
3289
3290 - ret = ocfs2_inode_lock(inode, &di_bh, 0);
3291 - if (ret < 0) {
3292 - mlog_errno(ret);
3293 - return ret;
3294 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
3295 + if (had_lock < 0) {
3296 + mlog_errno(had_lock);
3297 + return had_lock;
3298 }
3299 down_read(&OCFS2_I(inode)->ip_xattr_sem);
3300 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
3301 name, buffer, buffer_size);
3302 up_read(&OCFS2_I(inode)->ip_xattr_sem);
3303
3304 - ocfs2_inode_unlock(inode, 0);
3305 + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
3306
3307 brelse(di_bh);
3308
3309 @@ -3539,11 +3540,12 @@ int ocfs2_xattr_set(struct inode *inode,
3310 {
3311 struct buffer_head *di_bh = NULL;
3312 struct ocfs2_dinode *di;
3313 - int ret, credits, ref_meta = 0, ref_credits = 0;
3314 + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
3315 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3316 struct inode *tl_inode = osb->osb_tl_inode;
3317 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
3318 struct ocfs2_refcount_tree *ref_tree = NULL;
3319 + struct ocfs2_lock_holder oh;
3320
3321 struct ocfs2_xattr_info xi = {
3322 .xi_name_index = name_index,
3323 @@ -3574,8 +3576,9 @@ int ocfs2_xattr_set(struct inode *inode,
3324 return -ENOMEM;
3325 }
3326
3327 - ret = ocfs2_inode_lock(inode, &di_bh, 1);
3328 - if (ret < 0) {
3329 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
3330 + if (had_lock < 0) {
3331 + ret = had_lock;
3332 mlog_errno(ret);
3333 goto cleanup_nolock;
3334 }
3335 @@ -3672,7 +3675,7 @@ cleanup:
3336 if (ret)
3337 mlog_errno(ret);
3338 }
3339 - ocfs2_inode_unlock(inode, 1);
3340 + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
3341 cleanup_nolock:
3342 brelse(di_bh);
3343 brelse(xbs.xattr_bh);
3344 diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
3345 index 237c9c04dc3b..a34b25be39c5 100644
3346 --- a/fs/orangefs/xattr.c
3347 +++ b/fs/orangefs/xattr.c
3348 @@ -76,7 +76,7 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
3349 if (S_ISLNK(inode->i_mode))
3350 return -EOPNOTSUPP;
3351
3352 - if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
3353 + if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN)
3354 return -EINVAL;
3355
3356 fsuid = from_kuid(&init_user_ns, current_fsuid());
3357 @@ -169,7 +169,7 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
3358 struct orangefs_kernel_op_s *new_op = NULL;
3359 int ret = -ENOMEM;
3360
3361 - if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
3362 + if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN)
3363 return -EINVAL;
3364
3365 down_write(&orangefs_inode->xattr_sem);
3366 @@ -233,7 +233,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
3367
3368 if (size > ORANGEFS_MAX_XATTR_VALUELEN)
3369 return -EINVAL;
3370 - if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
3371 + if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN)
3372 return -EINVAL;
3373
3374 internal_flag = convert_to_internal_xattr_flags(flags);
3375 diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
3376 index a2760a2869f4..0f22c036699a 100644
3377 --- a/fs/ufs/inode.c
3378 +++ b/fs/ufs/inode.c
3379 @@ -846,6 +846,7 @@ void ufs_evict_inode(struct inode * inode)
3380 inode->i_size = 0;
3381 if (inode->i_blocks)
3382 ufs_truncate_blocks(inode);
3383 + ufs_update_inode(inode, inode_needs_sync(inode));
3384 }
3385
3386 invalidate_inode_buffers(inode);
3387 diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
3388 index b456cca1bfb2..c0ecdec8e0a9 100644
3389 --- a/fs/xfs/libxfs/xfs_trans_resv.c
3390 +++ b/fs/xfs/libxfs/xfs_trans_resv.c
3391 @@ -232,8 +232,6 @@ xfs_calc_write_reservation(
3392 * the super block to reflect the freed blocks: sector size
3393 * worst case split in allocation btrees per extent assuming 4 extents:
3394 * 4 exts * 2 trees * (2 * max depth - 1) * block size
3395 - * the inode btree: max depth * blocksize
3396 - * the allocation btrees: 2 trees * (max depth - 1) * block size
3397 */
3398 STATIC uint
3399 xfs_calc_itruncate_reservation(
3400 @@ -245,12 +243,7 @@ xfs_calc_itruncate_reservation(
3401 XFS_FSB_TO_B(mp, 1))),
3402 (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
3403 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
3404 - XFS_FSB_TO_B(mp, 1)) +
3405 - xfs_calc_buf_res(5, 0) +
3406 - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
3407 - XFS_FSB_TO_B(mp, 1)) +
3408 - xfs_calc_buf_res(2 + mp->m_ialloc_blks +
3409 - mp->m_in_maxlevels, 0)));
3410 + XFS_FSB_TO_B(mp, 1))));
3411 }
3412
3413 /*
3414 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
3415 index e7f358d2e5fc..eaa58c0f894b 100644
3416 --- a/include/linux/elevator.h
3417 +++ b/include/linux/elevator.h
3418 @@ -102,7 +102,7 @@ struct elevator_type
3419 struct module *elevator_owner;
3420
3421 /* managed by elevator core */
3422 - char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
3423 + char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
3424 struct list_head list;
3425 };
3426
3427 diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
3428 index 767467d886de..67c75372b691 100644
3429 --- a/include/linux/iio/buffer-dma.h
3430 +++ b/include/linux/iio/buffer-dma.h
3431 @@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
3432 char __user *user_buffer);
3433 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
3434 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
3435 -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
3436 +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
3437 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
3438
3439 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
3440 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
3441 index 62d44c176071..e4b8678183f5 100644
3442 --- a/include/linux/posix-timers.h
3443 +++ b/include/linux/posix-timers.h
3444 @@ -65,8 +65,8 @@ struct k_itimer {
3445 spinlock_t it_lock;
3446 clockid_t it_clock; /* which timer type */
3447 timer_t it_id; /* timer id */
3448 - int it_overrun; /* overrun on pending signal */
3449 - int it_overrun_last; /* overrun on last delivered signal */
3450 + s64 it_overrun; /* overrun on pending signal */
3451 + s64 it_overrun_last; /* overrun on last delivered signal */
3452 int it_requeue_pending; /* waiting to requeue this timer */
3453 #define REQUEUE_PENDING 1
3454 int it_sigev_notify; /* notify word of sigevent struct */
3455 diff --git a/init/main.c b/init/main.c
3456 index 4313772d634a..3c7f71d8e704 100644
3457 --- a/init/main.c
3458 +++ b/init/main.c
3459 @@ -915,7 +915,7 @@ static int try_to_run_init_process(const char *init_filename)
3460
3461 static noinline void __init kernel_init_freeable(void);
3462
3463 -#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_SET_MODULE_RONX)
3464 +#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
3465 bool rodata_enabled __ro_after_init = true;
3466 static int __init set_debug_rodata(char *str)
3467 {
3468 diff --git a/kernel/events/core.c b/kernel/events/core.c
3469 index 95bd00d9f2c3..1af0bbf20984 100644
3470 --- a/kernel/events/core.c
3471 +++ b/kernel/events/core.c
3472 @@ -4331,7 +4331,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value);
3473 static int __perf_read_group_add(struct perf_event *leader,
3474 u64 read_format, u64 *values)
3475 {
3476 + struct perf_event_context *ctx = leader->ctx;
3477 struct perf_event *sub;
3478 + unsigned long flags;
3479 int n = 1; /* skip @nr */
3480 int ret;
3481
3482 @@ -4361,12 +4363,15 @@ static int __perf_read_group_add(struct perf_event *leader,
3483 if (read_format & PERF_FORMAT_ID)
3484 values[n++] = primary_event_id(leader);
3485
3486 + raw_spin_lock_irqsave(&ctx->lock, flags);
3487 +
3488 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3489 values[n++] += perf_event_count(sub);
3490 if (read_format & PERF_FORMAT_ID)
3491 values[n++] = primary_event_id(sub);
3492 }
3493
3494 + raw_spin_unlock_irqrestore(&ctx->lock, flags);
3495 return 0;
3496 }
3497
3498 @@ -7737,6 +7742,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
3499 goto unlock;
3500
3501 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3502 + if (event->cpu != smp_processor_id())
3503 + continue;
3504 if (event->attr.type != PERF_TYPE_TRACEPOINT)
3505 continue;
3506 if (event->attr.config != entry->type)
3507 diff --git a/kernel/futex.c b/kernel/futex.c
3508 index c3ea6f2a6997..053d7be08be5 100644
3509 --- a/kernel/futex.c
3510 +++ b/kernel/futex.c
3511 @@ -1467,8 +1467,16 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
3512 int oldval, ret;
3513
3514 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
3515 - if (oparg < 0 || oparg > 31)
3516 - return -EINVAL;
3517 + if (oparg < 0 || oparg > 31) {
3518 + char comm[sizeof(current->comm)];
3519 + /*
3520 + * kill this print and return -EINVAL when userspace
3521 + * is sane again
3522 + */
3523 + pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
3524 + get_task_comm(comm, current), oparg);
3525 + oparg &= 31;
3526 + }
3527 oparg = 1 << oparg;
3528 }
3529
3530 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3531 index f81adb476c03..5ad109ccec35 100644
3532 --- a/kernel/sched/fair.c
3533 +++ b/kernel/sched/fair.c
3534 @@ -3976,9 +3976,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3535
3536 /*
3537 * Add to the _head_ of the list, so that an already-started
3538 - * distribute_cfs_runtime will not see us
3539 + * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
3540 + * not running add to the tail so that later runqueues don't get starved.
3541 */
3542 - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3543 + if (cfs_b->distribute_running)
3544 + list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3545 + else
3546 + list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3547
3548 /*
3549 * If we're the first throttled task, make sure the bandwidth
3550 @@ -4121,14 +4125,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3551 * in us over-using our runtime if it is all used during this loop, but
3552 * only by limited amounts in that extreme case.
3553 */
3554 - while (throttled && cfs_b->runtime > 0) {
3555 + while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
3556 runtime = cfs_b->runtime;
3557 + cfs_b->distribute_running = 1;
3558 raw_spin_unlock(&cfs_b->lock);
3559 /* we can't nest cfs_b->lock while distributing bandwidth */
3560 runtime = distribute_cfs_runtime(cfs_b, runtime,
3561 runtime_expires);
3562 raw_spin_lock(&cfs_b->lock);
3563
3564 + cfs_b->distribute_running = 0;
3565 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3566
3567 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3568 @@ -4239,6 +4245,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3569
3570 /* confirm we're still not at a refresh boundary */
3571 raw_spin_lock(&cfs_b->lock);
3572 + if (cfs_b->distribute_running) {
3573 + raw_spin_unlock(&cfs_b->lock);
3574 + return;
3575 + }
3576 +
3577 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3578 raw_spin_unlock(&cfs_b->lock);
3579 return;
3580 @@ -4248,6 +4259,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3581 runtime = cfs_b->runtime;
3582
3583 expires = cfs_b->runtime_expires;
3584 + if (runtime)
3585 + cfs_b->distribute_running = 1;
3586 +
3587 raw_spin_unlock(&cfs_b->lock);
3588
3589 if (!runtime)
3590 @@ -4258,6 +4272,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3591 raw_spin_lock(&cfs_b->lock);
3592 if (expires == cfs_b->runtime_expires)
3593 cfs_b->runtime -= min(runtime, cfs_b->runtime);
3594 + cfs_b->distribute_running = 0;
3595 raw_spin_unlock(&cfs_b->lock);
3596 }
3597
3598 @@ -4366,6 +4381,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3599 cfs_b->period_timer.function = sched_cfs_period_timer;
3600 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3601 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3602 + cfs_b->distribute_running = 0;
3603 }
3604
3605 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3606 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
3607 index 923cc35e8490..ec6e838e991a 100644
3608 --- a/kernel/sched/sched.h
3609 +++ b/kernel/sched/sched.h
3610 @@ -255,6 +255,8 @@ struct cfs_bandwidth {
3611 /* statistics */
3612 int nr_periods, nr_throttled;
3613 u64 throttled_time;
3614 +
3615 + bool distribute_running;
3616 #endif
3617 };
3618
3619 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
3620 index 39008d78927a..21a27bb73587 100644
3621 --- a/kernel/time/posix-cpu-timers.c
3622 +++ b/kernel/time/posix-cpu-timers.c
3623 @@ -103,7 +103,7 @@ static void bump_cpu_timer(struct k_itimer *timer,
3624 continue;
3625
3626 timer->it.cpu.expires += incr;
3627 - timer->it_overrun += 1 << i;
3628 + timer->it_overrun += 1LL << i;
3629 delta -= incr;
3630 }
3631 }
3632 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
3633 index fc7c37ad90a0..0e6ed2e7d066 100644
3634 --- a/kernel/time/posix-timers.c
3635 +++ b/kernel/time/posix-timers.c
3636 @@ -355,6 +355,17 @@ static __init int init_posix_timers(void)
3637
3638 __initcall(init_posix_timers);
3639
3640 +/*
3641 + * The siginfo si_overrun field and the return value of timer_getoverrun(2)
3642 + * are of type int. Clamp the overrun value to INT_MAX
3643 + */
3644 +static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
3645 +{
3646 + s64 sum = timr->it_overrun_last + (s64)baseval;
3647 +
3648 + return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
3649 +}
3650 +
3651 static void schedule_next_timer(struct k_itimer *timr)
3652 {
3653 struct hrtimer *timer = &timr->it.real.timer;
3654 @@ -362,12 +373,11 @@ static void schedule_next_timer(struct k_itimer *timr)
3655 if (timr->it.real.interval.tv64 == 0)
3656 return;
3657
3658 - timr->it_overrun += (unsigned int) hrtimer_forward(timer,
3659 - timer->base->get_time(),
3660 - timr->it.real.interval);
3661 + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
3662 + timr->it.real.interval);
3663
3664 timr->it_overrun_last = timr->it_overrun;
3665 - timr->it_overrun = -1;
3666 + timr->it_overrun = -1LL;
3667 ++timr->it_requeue_pending;
3668 hrtimer_restart(timer);
3669 }
3670 @@ -396,7 +406,7 @@ void do_schedule_next_timer(struct siginfo *info)
3671 else
3672 schedule_next_timer(timr);
3673
3674 - info->si_overrun += timr->it_overrun_last;
3675 + info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
3676 }
3677
3678 if (timr)
3679 @@ -491,8 +501,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
3680 now = ktime_add(now, kj);
3681 }
3682 #endif
3683 - timr->it_overrun += (unsigned int)
3684 - hrtimer_forward(timer, now,
3685 + timr->it_overrun += hrtimer_forward(timer, now,
3686 timr->it.real.interval);
3687 ret = HRTIMER_RESTART;
3688 ++timr->it_requeue_pending;
3689 @@ -633,7 +642,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
3690 it_id_set = IT_ID_SET;
3691 new_timer->it_id = (timer_t) new_timer_id;
3692 new_timer->it_clock = which_clock;
3693 - new_timer->it_overrun = -1;
3694 + new_timer->it_overrun = -1LL;
3695
3696 if (timer_event_spec) {
3697 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
3698 @@ -762,7 +771,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
3699 */
3700 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
3701 timr->it_sigev_notify == SIGEV_NONE))
3702 - timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
3703 + timr->it_overrun += hrtimer_forward(timer, now, iv);
3704
3705 remaining = __hrtimer_expires_remaining_adjusted(timer, now);
3706 /* Return 0 only, when the timer is expired and not pending */
3707 @@ -824,7 +833,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
3708 if (!timr)
3709 return -EINVAL;
3710
3711 - overrun = timr->it_overrun_last;
3712 + overrun = timer_overrun_to_int(timr, 0);
3713 unlock_timer(timr, flags);
3714
3715 return overrun;
3716 diff --git a/lib/test_bpf.c b/lib/test_bpf.c
3717 index 1586dfdea809..960d4d627361 100644
3718 --- a/lib/test_bpf.c
3719 +++ b/lib/test_bpf.c
3720 @@ -4874,7 +4874,7 @@ static struct bpf_test tests[] = {
3721 {
3722 "BPF_MAXINSNS: Jump, gap, jump, ...",
3723 { },
3724 -#ifdef CONFIG_BPF_JIT_ALWAYS_ON
3725 +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
3726 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
3727 #else
3728 CLASSIC | FLAG_NO_DATA,
3729 diff --git a/mm/frame_vector.c b/mm/frame_vector.c
3730 index 375a103d7a56..d73eed0443f6 100644
3731 --- a/mm/frame_vector.c
3732 +++ b/mm/frame_vector.c
3733 @@ -61,8 +61,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
3734 * get_user_pages_longterm() and disallow it for filesystem-dax
3735 * mappings.
3736 */
3737 - if (vma_is_fsdax(vma))
3738 - return -EOPNOTSUPP;
3739 + if (vma_is_fsdax(vma)) {
3740 + ret = -EOPNOTSUPP;
3741 + goto out;
3742 + }
3743
3744 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
3745 vec->got_ref = true;
3746 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
3747 index c9f715b2917f..0f962cc3f1bf 100644
3748 --- a/mm/memory_hotplug.c
3749 +++ b/mm/memory_hotplug.c
3750 @@ -1508,7 +1508,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
3751 while ((i < MAX_ORDER_NR_PAGES) &&
3752 !pfn_valid_within(pfn + i))
3753 i++;
3754 - if (i == MAX_ORDER_NR_PAGES)
3755 + if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
3756 continue;
3757 page = pfn_to_page(pfn + i);
3758 if (zone && page_zone(page) != zone)
3759 @@ -1522,7 +1522,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
3760
3761 if (zone) {
3762 *valid_start = start;
3763 - *valid_end = end;
3764 + *valid_end = min(end, end_pfn);
3765 return 1;
3766 } else {
3767 return 0;
3768 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
3769 index 1fba2a03f8ae..ba24f613c0fc 100644
3770 --- a/net/bluetooth/mgmt.c
3771 +++ b/net/bluetooth/mgmt.c
3772 @@ -2298,9 +2298,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3773 /* LE address type */
3774 addr_type = le_addr_type(cp->addr.type);
3775
3776 - hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
3777 -
3778 - err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
3779 + /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3780 + err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3781 if (err < 0) {
3782 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3783 MGMT_STATUS_NOT_PAIRED, &rp,
3784 @@ -2314,8 +2313,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3785 goto done;
3786 }
3787
3788 - /* Abort any ongoing SMP pairing */
3789 - smp_cancel_pairing(conn);
3790
3791 /* Defer clearing up the connection parameters until closing to
3792 * give a chance of keeping them if a repairing happens.
3793 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
3794 index ead4d1baeaa6..1abfbcd8090a 100644
3795 --- a/net/bluetooth/smp.c
3796 +++ b/net/bluetooth/smp.c
3797 @@ -2353,30 +2353,51 @@ unlock:
3798 return ret;
3799 }
3800
3801 -void smp_cancel_pairing(struct hci_conn *hcon)
3802 +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
3803 + u8 addr_type)
3804 {
3805 - struct l2cap_conn *conn = hcon->l2cap_data;
3806 + struct hci_conn *hcon;
3807 + struct l2cap_conn *conn;
3808 struct l2cap_chan *chan;
3809 struct smp_chan *smp;
3810 + int err;
3811 +
3812 + err = hci_remove_ltk(hdev, bdaddr, addr_type);
3813 + hci_remove_irk(hdev, bdaddr, addr_type);
3814 +
3815 + hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
3816 + if (!hcon)
3817 + goto done;
3818
3819 + conn = hcon->l2cap_data;
3820 if (!conn)
3821 - return;
3822 + goto done;
3823
3824 chan = conn->smp;
3825 if (!chan)
3826 - return;
3827 + goto done;
3828
3829 l2cap_chan_lock(chan);
3830
3831 smp = chan->data;
3832 if (smp) {
3833 + /* Set keys to NULL to make sure smp_failure() does not try to
3834 + * remove and free already invalidated rcu list entries. */
3835 + smp->ltk = NULL;
3836 + smp->slave_ltk = NULL;
3837 + smp->remote_irk = NULL;
3838 +
3839 if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
3840 smp_failure(conn, 0);
3841 else
3842 smp_failure(conn, SMP_UNSPECIFIED);
3843 + err = 0;
3844 }
3845
3846 l2cap_chan_unlock(chan);
3847 +
3848 +done:
3849 + return err;
3850 }
3851
3852 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
3853 diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
3854 index ffcc70b6b199..993cbd7bcfe7 100644
3855 --- a/net/bluetooth/smp.h
3856 +++ b/net/bluetooth/smp.h
3857 @@ -180,7 +180,8 @@ enum smp_key_pref {
3858 };
3859
3860 /* SMP Commands */
3861 -void smp_cancel_pairing(struct hci_conn *hcon);
3862 +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
3863 + u8 addr_type);
3864 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
3865 enum smp_key_pref key_pref);
3866 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
3867 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3868 index 2136e45f5277..4bd57507b9a4 100644
3869 --- a/net/bridge/br_multicast.c
3870 +++ b/net/bridge/br_multicast.c
3871 @@ -1287,7 +1287,14 @@ static void br_multicast_query_received(struct net_bridge *br,
3872 return;
3873
3874 br_multicast_update_query_timer(br, query, max_delay);
3875 - br_multicast_mark_router(br, port);
3876 +
3877 + /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
3878 + * the arrival port for IGMP Queries where the source address
3879 + * is 0.0.0.0 should not be added to router port list.
3880 + */
3881 + if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
3882 + saddr->proto == htons(ETH_P_IPV6))
3883 + br_multicast_mark_router(br, port);
3884 }
3885
3886 static int br_ip4_multicast_query(struct net_bridge *br,
3887 diff --git a/net/core/datagram.c b/net/core/datagram.c
3888 index 4fa4011feec1..146502f310ce 100644
3889 --- a/net/core/datagram.c
3890 +++ b/net/core/datagram.c
3891 @@ -754,8 +754,9 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
3892 return -EINVAL;
3893 }
3894
3895 - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
3896 - netdev_rx_csum_fault(skb->dev);
3897 + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3898 + !skb->csum_complete_sw)
3899 + netdev_rx_csum_fault(NULL);
3900 }
3901 return 0;
3902 fault:
3903 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
3904 index 7913771ec474..a8a9938aeceb 100644
3905 --- a/net/core/ethtool.c
3906 +++ b/net/core/ethtool.c
3907 @@ -2397,13 +2397,17 @@ roll_back:
3908 return ret;
3909 }
3910
3911 -static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
3912 +static int ethtool_set_per_queue(struct net_device *dev,
3913 + void __user *useraddr, u32 sub_cmd)
3914 {
3915 struct ethtool_per_queue_op per_queue_opt;
3916
3917 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
3918 return -EFAULT;
3919
3920 + if (per_queue_opt.sub_command != sub_cmd)
3921 + return -EINVAL;
3922 +
3923 switch (per_queue_opt.sub_command) {
3924 case ETHTOOL_GCOALESCE:
3925 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
3926 @@ -2669,7 +2673,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
3927 rc = ethtool_get_phy_stats(dev, useraddr);
3928 break;
3929 case ETHTOOL_PERQUEUE:
3930 - rc = ethtool_set_per_queue(dev, useraddr);
3931 + rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
3932 break;
3933 case ETHTOOL_GLINKSETTINGS:
3934 rc = ethtool_get_link_ksettings(dev, useraddr);
3935 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3936 index 189082dc288d..928a0b84469d 100644
3937 --- a/net/core/rtnetlink.c
3938 +++ b/net/core/rtnetlink.c
3939 @@ -2987,6 +2987,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
3940 return -EINVAL;
3941 }
3942
3943 + if (dev->type != ARPHRD_ETHER) {
3944 + pr_info("PF_BRIDGE: FDB add only supported for Ethernet devices");
3945 + return -EINVAL;
3946 + }
3947 +
3948 addr = nla_data(tb[NDA_LLADDR]);
3949
3950 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3951 @@ -3090,6 +3095,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
3952 return -EINVAL;
3953 }
3954
3955 + if (dev->type != ARPHRD_ETHER) {
3956 + pr_info("PF_BRIDGE: FDB delete only supported for Ethernet devices");
3957 + return -EINVAL;
3958 + }
3959 +
3960 addr = nla_data(tb[NDA_LLADDR]);
3961
3962 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3963 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3964 index 038ec74fa131..68ecb7d71c2b 100644
3965 --- a/net/core/skbuff.c
3966 +++ b/net/core/skbuff.c
3967 @@ -1585,8 +1585,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
3968 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3969 int delta = skb->len - len;
3970
3971 - skb->csum = csum_sub(skb->csum,
3972 - skb_checksum(skb, len, delta, 0));
3973 + skb->csum = csum_block_sub(skb->csum,
3974 + skb_checksum(skb, len, delta, 0),
3975 + len);
3976 }
3977 return __pskb_trim(skb, len);
3978 }
3979 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
3980 index cc8c6ac84d08..80e48f40c3a8 100644
3981 --- a/net/ipv4/ip_fragment.c
3982 +++ b/net/ipv4/ip_fragment.c
3983 @@ -718,10 +718,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
3984 if (ip_is_fragment(&iph)) {
3985 skb = skb_share_check(skb, GFP_ATOMIC);
3986 if (skb) {
3987 - if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
3988 - return skb;
3989 - if (pskb_trim_rcsum(skb, netoff + len))
3990 - return skb;
3991 + if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
3992 + kfree_skb(skb);
3993 + return NULL;
3994 + }
3995 + if (pskb_trim_rcsum(skb, netoff + len)) {
3996 + kfree_skb(skb);
3997 + return NULL;
3998 + }
3999 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
4000 if (ip_defrag(net, skb, user))
4001 return NULL;
4002 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4003 index b9b2a9828d98..5d4b5e0f6b5e 100644
4004 --- a/net/ipv4/udp.c
4005 +++ b/net/ipv4/udp.c
4006 @@ -1726,8 +1726,24 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
4007 /* Note, we are only interested in != 0 or == 0, thus the
4008 * force to int.
4009 */
4010 - return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
4011 - inet_compute_pseudo);
4012 + err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
4013 + inet_compute_pseudo);
4014 + if (err)
4015 + return err;
4016 +
4017 + if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
4018 + /* If SW calculated the value, we know it's bad */
4019 + if (skb->csum_complete_sw)
4020 + return 1;
4021 +
4022 + /* HW says the value is bad. Let's validate that.
4023 + * skb->csum is no longer the full packet checksum,
4024 + * so don't treat it as such.
4025 + */
4026 + skb_checksum_complete_unset(skb);
4027 + }
4028 +
4029 + return 0;
4030 }
4031
4032 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
4033 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4034 index bc532206077f..8f79f0414bc3 100644
4035 --- a/net/ipv6/addrconf.c
4036 +++ b/net/ipv6/addrconf.c
4037 @@ -4721,8 +4721,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4038
4039 /* unicast address incl. temp addr */
4040 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4041 - if (++ip_idx < s_ip_idx)
4042 - continue;
4043 + if (ip_idx < s_ip_idx)
4044 + goto next;
4045 err = inet6_fill_ifaddr(skb, ifa,
4046 NETLINK_CB(cb->skb).portid,
4047 cb->nlh->nlmsg_seq,
4048 @@ -4731,6 +4731,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4049 if (err < 0)
4050 break;
4051 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4052 +next:
4053 + ip_idx++;
4054 }
4055 break;
4056 }
4057 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
4058 index 421379014995..f7b425615c12 100644
4059 --- a/net/ipv6/af_inet6.c
4060 +++ b/net/ipv6/af_inet6.c
4061 @@ -1045,11 +1045,11 @@ netfilter_fail:
4062 igmp_fail:
4063 ndisc_cleanup();
4064 ndisc_fail:
4065 - ip6_mr_cleanup();
4066 + icmpv6_cleanup();
4067 icmp_fail:
4068 - unregister_pernet_subsys(&inet6_net_ops);
4069 + ip6_mr_cleanup();
4070 ipmr_fail:
4071 - icmpv6_cleanup();
4072 + unregister_pernet_subsys(&inet6_net_ops);
4073 register_pernet_fail:
4074 sock_unregister(PF_INET6);
4075 rtnl_unregister_all(PF_INET6);
4076 diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
4077 index 1dc023ca98fd..9d9a16e219d6 100644
4078 --- a/net/ipv6/ip6_checksum.c
4079 +++ b/net/ipv6/ip6_checksum.c
4080 @@ -87,8 +87,24 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
4081 * Note, we are only interested in != 0 or == 0, thus the
4082 * force to int.
4083 */
4084 - return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
4085 - ip6_compute_pseudo);
4086 + err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
4087 + ip6_compute_pseudo);
4088 + if (err)
4089 + return err;
4090 +
4091 + if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
4092 + /* If SW calculated the value, we know it's bad */
4093 + if (skb->csum_complete_sw)
4094 + return 1;
4095 +
4096 + /* HW says the value is bad. Let's validate that.
4097 + * skb->csum is no longer the full packet checksum,
4098 + * so don't treat is as such.
4099 + */
4100 + skb_checksum_complete_unset(skb);
4101 + }
4102 +
4103 + return 0;
4104 }
4105 EXPORT_SYMBOL(udp6_csum_init);
4106
4107 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4108 index fd081a14064e..9c5afa5153ce 100644
4109 --- a/net/ipv6/ip6_tunnel.c
4110 +++ b/net/ipv6/ip6_tunnel.c
4111 @@ -1185,11 +1185,6 @@ route_lookup:
4112 }
4113 skb_dst_set(skb, dst);
4114
4115 - if (encap_limit >= 0) {
4116 - init_tel_txopt(&opt, encap_limit);
4117 - ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
4118 - }
4119 -
4120 /* Calculate max headroom for all the headers and adjust
4121 * needed_headroom if necessary.
4122 */
4123 @@ -1202,6 +1197,11 @@ route_lookup:
4124 if (err)
4125 return err;
4126
4127 + if (encap_limit >= 0) {
4128 + init_tel_txopt(&opt, encap_limit);
4129 + ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
4130 + }
4131 +
4132 skb_push(skb, sizeof(struct ipv6hdr));
4133 skb_reset_network_header(skb);
4134 ipv6h = ipv6_hdr(skb);
4135 @@ -1258,7 +1258,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4136 fl6.flowi6_proto = IPPROTO_IPIP;
4137 fl6.daddr = key->u.ipv6.dst;
4138 fl6.flowlabel = key->label;
4139 - dsfield = ip6_tclass(key->label);
4140 + dsfield = key->tos;
4141 } else {
4142 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
4143 encap_limit = t->parms.encap_limit;
4144 @@ -1329,7 +1329,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4145 fl6.flowi6_proto = IPPROTO_IPV6;
4146 fl6.daddr = key->u.ipv6.dst;
4147 fl6.flowlabel = key->label;
4148 - dsfield = ip6_tclass(key->label);
4149 + dsfield = key->tos;
4150 } else {
4151 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
4152 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
4153 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
4154 index 6c54c76847bf..40262abb15db 100644
4155 --- a/net/ipv6/mcast.c
4156 +++ b/net/ipv6/mcast.c
4157 @@ -2413,17 +2413,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
4158 {
4159 int err;
4160
4161 - /* callers have the socket lock and rtnl lock
4162 - * so no other readers or writers of iml or its sflist
4163 - */
4164 + write_lock_bh(&iml->sflock);
4165 if (!iml->sflist) {
4166 /* any-source empty exclude case */
4167 - return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
4168 + err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
4169 + } else {
4170 + err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
4171 + iml->sflist->sl_count, iml->sflist->sl_addr, 0);
4172 + sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
4173 + iml->sflist = NULL;
4174 }
4175 - err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
4176 - iml->sflist->sl_count, iml->sflist->sl_addr, 0);
4177 - sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
4178 - iml->sflist = NULL;
4179 + write_unlock_bh(&iml->sflock);
4180 return err;
4181 }
4182
4183 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
4184 index 21f3bf2125f4..505d048ffff5 100644
4185 --- a/net/ipv6/ndisc.c
4186 +++ b/net/ipv6/ndisc.c
4187 @@ -1692,10 +1692,9 @@ int ndisc_rcv(struct sk_buff *skb)
4188 return 0;
4189 }
4190
4191 - memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
4192 -
4193 switch (msg->icmph.icmp6_type) {
4194 case NDISC_NEIGHBOUR_SOLICITATION:
4195 + memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
4196 ndisc_recv_ns(skb);
4197 break;
4198
4199 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
4200 index b9147558a8f2..e46185377981 100644
4201 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
4202 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
4203 @@ -597,8 +597,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
4204 fq->q.meat == fq->q.len &&
4205 nf_ct_frag6_reasm(fq, skb, dev))
4206 ret = 0;
4207 - else
4208 - skb_dst_drop(skb);
4209
4210 out_unlock:
4211 spin_unlock_bh(&fq->q.lock);
4212 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4213 index 70fa31e37360..4cc12eeca7ab 100644
4214 --- a/net/ipv6/route.c
4215 +++ b/net/ipv6/route.c
4216 @@ -2289,6 +2289,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
4217 if (on_link)
4218 nrt->rt6i_flags &= ~RTF_GATEWAY;
4219
4220 + nrt->rt6i_protocol = RTPROT_REDIRECT;
4221 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4222
4223 if (ip6_ins_rt(nrt))
4224 @@ -2393,6 +2394,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
4225 .fc_dst_len = prefixlen,
4226 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4227 RTF_UP | RTF_PREF(pref),
4228 + .fc_protocol = RTPROT_RA,
4229 .fc_nlinfo.portid = 0,
4230 .fc_nlinfo.nlh = NULL,
4231 .fc_nlinfo.nl_net = net,
4232 @@ -2445,6 +2447,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
4233 .fc_ifindex = dev->ifindex,
4234 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4235 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4236 + .fc_protocol = RTPROT_RA,
4237 .fc_nlinfo.portid = 0,
4238 .fc_nlinfo.nlh = NULL,
4239 .fc_nlinfo.nl_net = dev_net(dev),
4240 @@ -3241,14 +3244,6 @@ static int rt6_fill_node(struct net *net,
4241 }
4242 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4243 rtm->rtm_protocol = rt->rt6i_protocol;
4244 - if (rt->rt6i_flags & RTF_DYNAMIC)
4245 - rtm->rtm_protocol = RTPROT_REDIRECT;
4246 - else if (rt->rt6i_flags & RTF_ADDRCONF) {
4247 - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
4248 - rtm->rtm_protocol = RTPROT_RA;
4249 - else
4250 - rtm->rtm_protocol = RTPROT_KERNEL;
4251 - }
4252
4253 if (rt->rt6i_flags & RTF_CACHE)
4254 rtm->rtm_flags |= RTM_F_CLONED;
4255 diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
4256 index 4d09ce6fa90e..64862c5084ee 100644
4257 --- a/net/ipv6/xfrm6_output.c
4258 +++ b/net/ipv6/xfrm6_output.c
4259 @@ -165,9 +165,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4260
4261 if (toobig && xfrm6_local_dontfrag(skb)) {
4262 xfrm6_local_rxpmtu(skb, mtu);
4263 + kfree_skb(skb);
4264 return -EMSGSIZE;
4265 } else if (!skb->ignore_df && toobig && skb->sk) {
4266 xfrm_local_error(skb, mtu);
4267 + kfree_skb(skb);
4268 return -EMSGSIZE;
4269 }
4270
4271 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4272 index a5333f6cb65a..b96dbe38ecad 100644
4273 --- a/net/l2tp/l2tp_core.c
4274 +++ b/net/l2tp/l2tp_core.c
4275 @@ -845,10 +845,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
4276 }
4277 }
4278
4279 - /* Session data offset is handled differently for L2TPv2 and
4280 - * L2TPv3. For L2TPv2, there is an optional 16-bit value in
4281 - * the header. For L2TPv3, the offset is negotiated using AVPs
4282 - * in the session setup control protocol.
4283 + /* Session data offset is defined only for L2TPv2 and is
4284 + * indicated by an optional 16-bit value in the header.
4285 */
4286 if (tunnel->version == L2TP_HDR_VER_2) {
4287 /* If offset bit set, skip it. */
4288 @@ -856,8 +854,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
4289 offset = ntohs(*(__be16 *)ptr);
4290 ptr += 2 + offset;
4291 }
4292 - } else
4293 - ptr += session->offset;
4294 + }
4295
4296 offset = ptr - optr;
4297 if (!pskb_may_pull(skb, offset))
4298 @@ -1141,8 +1138,6 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
4299 }
4300 bufp += session->l2specific_len;
4301 }
4302 - if (session->offset)
4303 - bufp += session->offset;
4304
4305 return bufp - optr;
4306 }
4307 @@ -1827,7 +1822,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
4308 if (session->send_seq)
4309 session->hdr_len += 4;
4310 } else {
4311 - session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
4312 + session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
4313 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
4314 session->hdr_len += 4;
4315 }
4316 @@ -1878,7 +1873,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
4317 session->recv_seq = cfg->recv_seq;
4318 session->lns_mode = cfg->lns_mode;
4319 session->reorder_timeout = cfg->reorder_timeout;
4320 - session->offset = cfg->offset;
4321 session->l2specific_type = cfg->l2specific_type;
4322 session->l2specific_len = cfg->l2specific_len;
4323 session->cookie_len = cfg->cookie_len;
4324 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
4325 index 42419f1c24cf..86356a23a0a7 100644
4326 --- a/net/l2tp/l2tp_core.h
4327 +++ b/net/l2tp/l2tp_core.h
4328 @@ -68,7 +68,6 @@ struct l2tp_session_cfg {
4329 int debug; /* bitmask of debug message
4330 * categories */
4331 u16 vlan_id; /* VLAN pseudowire only */
4332 - u16 offset; /* offset to payload */
4333 u16 l2specific_len; /* Layer 2 specific length */
4334 u16 l2specific_type; /* Layer 2 specific type */
4335 u8 cookie[8]; /* optional cookie */
4336 @@ -94,8 +93,6 @@ struct l2tp_session {
4337 int cookie_len;
4338 u8 peer_cookie[8];
4339 int peer_cookie_len;
4340 - u16 offset; /* offset from end of L2TP header
4341 - to beginning of data */
4342 u16 l2specific_len;
4343 u16 l2specific_type;
4344 u16 hdr_len;
4345 diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
4346 index d100aed3d06f..2d2a73280ec2 100644
4347 --- a/net/l2tp/l2tp_debugfs.c
4348 +++ b/net/l2tp/l2tp_debugfs.c
4349 @@ -181,8 +181,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
4350 session->lns_mode ? "LNS" : "LAC",
4351 session->debug,
4352 jiffies_to_msecs(session->reorder_timeout));
4353 - seq_printf(m, " offset %hu l2specific %hu/%hu\n",
4354 - session->offset, session->l2specific_type, session->l2specific_len);
4355 + seq_printf(m, " offset 0 l2specific %hu/%hu\n",
4356 + session->l2specific_type, session->l2specific_len);
4357 if (session->cookie_len) {
4358 seq_printf(m, " cookie %02x%02x%02x%02x",
4359 session->cookie[0], session->cookie[1],
4360 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
4361 index ee03bc866d1b..d6fccfdca201 100644
4362 --- a/net/l2tp/l2tp_netlink.c
4363 +++ b/net/l2tp/l2tp_netlink.c
4364 @@ -536,9 +536,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
4365 }
4366
4367 if (tunnel->version > 2) {
4368 - if (info->attrs[L2TP_ATTR_OFFSET])
4369 - cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
4370 -
4371 if (info->attrs[L2TP_ATTR_DATA_SEQ])
4372 cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
4373
4374 diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
4375 index 79c346fd859b..b9290a183a2f 100644
4376 --- a/net/llc/llc_conn.c
4377 +++ b/net/llc/llc_conn.c
4378 @@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
4379 llc_sk(sk)->sap = sap;
4380
4381 spin_lock_bh(&sap->sk_lock);
4382 + sock_set_flag(sk, SOCK_RCU_FREE);
4383 sap->sk_count++;
4384 sk_nulls_add_node_rcu(sk, laddr_hb);
4385 hlist_add_head(&llc->dev_hash_node, dev_hb);
4386 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
4387 index 45319cc01121..80c45567ee3a 100644
4388 --- a/net/mac80211/agg-tx.c
4389 +++ b/net/mac80211/agg-tx.c
4390 @@ -7,7 +7,7 @@
4391 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
4392 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
4393 * Copyright 2007-2010, Intel Corporation
4394 - * Copyright(c) 2015 Intel Deutschland GmbH
4395 + * Copyright(c) 2015-2017 Intel Deutschland GmbH
4396 *
4397 * This program is free software; you can redistribute it and/or modify
4398 * it under the terms of the GNU General Public License version 2 as
4399 @@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
4400 ieee80211_agg_start_txq(sta, tid, true);
4401 }
4402
4403 -void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
4404 +void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
4405 + struct tid_ampdu_tx *tid_tx)
4406 {
4407 - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4408 + struct ieee80211_sub_if_data *sdata = sta->sdata;
4409 struct ieee80211_local *local = sdata->local;
4410 - struct sta_info *sta;
4411 - struct tid_ampdu_tx *tid_tx;
4412
4413 - trace_api_start_tx_ba_cb(sdata, ra, tid);
4414 + if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
4415 + return;
4416 +
4417 + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
4418 + ieee80211_agg_tx_operational(local, sta, tid);
4419 +}
4420 +
4421 +static struct tid_ampdu_tx *
4422 +ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
4423 + const u8 *ra, u16 tid, struct sta_info **sta)
4424 +{
4425 + struct tid_ampdu_tx *tid_tx;
4426
4427 if (tid >= IEEE80211_NUM_TIDS) {
4428 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
4429 tid, IEEE80211_NUM_TIDS);
4430 - return;
4431 + return NULL;
4432 }
4433
4434 - mutex_lock(&local->sta_mtx);
4435 - sta = sta_info_get_bss(sdata, ra);
4436 - if (!sta) {
4437 - mutex_unlock(&local->sta_mtx);
4438 + *sta = sta_info_get_bss(sdata, ra);
4439 + if (!*sta) {
4440 ht_dbg(sdata, "Could not find station: %pM\n", ra);
4441 - return;
4442 + return NULL;
4443 }
4444
4445 - mutex_lock(&sta->ampdu_mlme.mtx);
4446 - tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
4447 + tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
4448
4449 - if (WARN_ON(!tid_tx)) {
4450 + if (WARN_ON(!tid_tx))
4451 ht_dbg(sdata, "addBA was not requested!\n");
4452 - goto unlock;
4453 - }
4454
4455 - if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
4456 - goto unlock;
4457 -
4458 - if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
4459 - ieee80211_agg_tx_operational(local, sta, tid);
4460 -
4461 - unlock:
4462 - mutex_unlock(&sta->ampdu_mlme.mtx);
4463 - mutex_unlock(&local->sta_mtx);
4464 + return tid_tx;
4465 }
4466
4467 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
4468 @@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
4469 {
4470 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4471 struct ieee80211_local *local = sdata->local;
4472 - struct ieee80211_ra_tid *ra_tid;
4473 - struct sk_buff *skb = dev_alloc_skb(0);
4474 + struct sta_info *sta;
4475 + struct tid_ampdu_tx *tid_tx;
4476
4477 - if (unlikely(!skb))
4478 - return;
4479 + trace_api_start_tx_ba_cb(sdata, ra, tid);
4480
4481 - ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
4482 - memcpy(&ra_tid->ra, ra, ETH_ALEN);
4483 - ra_tid->tid = tid;
4484 + rcu_read_lock();
4485 + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
4486 + if (!tid_tx)
4487 + goto out;
4488
4489 - skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
4490 - skb_queue_tail(&sdata->skb_queue, skb);
4491 - ieee80211_queue_work(&local->hw, &sdata->work);
4492 + set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
4493 + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
4494 + out:
4495 + rcu_read_unlock();
4496 }
4497 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
4498
4499 @@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
4500 }
4501 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
4502
4503 -void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
4504 +void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
4505 + struct tid_ampdu_tx *tid_tx)
4506 {
4507 - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4508 - struct ieee80211_local *local = sdata->local;
4509 - struct sta_info *sta;
4510 - struct tid_ampdu_tx *tid_tx;
4511 + struct ieee80211_sub_if_data *sdata = sta->sdata;
4512 bool send_delba = false;
4513
4514 - trace_api_stop_tx_ba_cb(sdata, ra, tid);
4515 -
4516 - if (tid >= IEEE80211_NUM_TIDS) {
4517 - ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
4518 - tid, IEEE80211_NUM_TIDS);
4519 - return;
4520 - }
4521 -
4522 - ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
4523 -
4524 - mutex_lock(&local->sta_mtx);
4525 -
4526 - sta = sta_info_get_bss(sdata, ra);
4527 - if (!sta) {
4528 - ht_dbg(sdata, "Could not find station: %pM\n", ra);
4529 - goto unlock;
4530 - }
4531 + ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
4532 + sta->sta.addr, tid);
4533
4534 - mutex_lock(&sta->ampdu_mlme.mtx);
4535 spin_lock_bh(&sta->lock);
4536 - tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
4537
4538 - if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
4539 + if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
4540 ht_dbg(sdata,
4541 "unexpected callback to A-MPDU stop for %pM tid %d\n",
4542 sta->sta.addr, tid);
4543 @@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
4544 spin_unlock_bh(&sta->lock);
4545
4546 if (send_delba)
4547 - ieee80211_send_delba(sdata, ra, tid,
4548 + ieee80211_send_delba(sdata, sta->sta.addr, tid,
4549 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
4550 -
4551 - mutex_unlock(&sta->ampdu_mlme.mtx);
4552 - unlock:
4553 - mutex_unlock(&local->sta_mtx);
4554 }
4555
4556 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
4557 @@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
4558 {
4559 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4560 struct ieee80211_local *local = sdata->local;
4561 - struct ieee80211_ra_tid *ra_tid;
4562 - struct sk_buff *skb = dev_alloc_skb(0);
4563 + struct sta_info *sta;
4564 + struct tid_ampdu_tx *tid_tx;
4565
4566 - if (unlikely(!skb))
4567 - return;
4568 + trace_api_stop_tx_ba_cb(sdata, ra, tid);
4569
4570 - ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
4571 - memcpy(&ra_tid->ra, ra, ETH_ALEN);
4572 - ra_tid->tid = tid;
4573 + rcu_read_lock();
4574 + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
4575 + if (!tid_tx)
4576 + goto out;
4577
4578 - skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
4579 - skb_queue_tail(&sdata->skb_queue, skb);
4580 - ieee80211_queue_work(&local->hw, &sdata->work);
4581 + set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
4582 + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
4583 + out:
4584 + rcu_read_unlock();
4585 }
4586 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
4587
4588 diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
4589 index f4a528773563..6ca5442b1e03 100644
4590 --- a/net/mac80211/ht.c
4591 +++ b/net/mac80211/ht.c
4592 @@ -7,6 +7,7 @@
4593 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
4594 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
4595 * Copyright 2007-2010, Intel Corporation
4596 + * Copyright 2017 Intel Deutschland GmbH
4597 *
4598 * This program is free software; you can redistribute it and/or modify
4599 * it under the terms of the GNU General Public License version 2 as
4600 @@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
4601 {
4602 int i;
4603
4604 - cancel_work_sync(&sta->ampdu_mlme.work);
4605 -
4606 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
4607 __ieee80211_stop_tx_ba_session(sta, i, reason);
4608 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
4609 @@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
4610 reason != AGG_STOP_DESTROY_STA &&
4611 reason != AGG_STOP_PEER_REQUEST);
4612 }
4613 +
4614 + /* stopping might queue the work again - so cancel only afterwards */
4615 + cancel_work_sync(&sta->ampdu_mlme.work);
4616 }
4617
4618 void ieee80211_ba_session_work(struct work_struct *work)
4619 @@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
4620 spin_unlock_bh(&sta->lock);
4621
4622 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
4623 - if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
4624 - &tid_tx->state))
4625 + if (!tid_tx)
4626 + continue;
4627 +
4628 + if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
4629 + ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
4630 + if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
4631 ___ieee80211_stop_tx_ba_session(sta, tid,
4632 AGG_STOP_LOCAL_REQUEST);
4633 + if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
4634 + ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
4635 }
4636 mutex_unlock(&sta->ampdu_mlme.mtx);
4637 }
4638 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
4639 index 7fd544d970d9..8a690ebd7374 100644
4640 --- a/net/mac80211/ieee80211_i.h
4641 +++ b/net/mac80211/ieee80211_i.h
4642 @@ -1026,8 +1026,6 @@ struct ieee80211_rx_agg {
4643
4644 enum sdata_queue_type {
4645 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
4646 - IEEE80211_SDATA_QUEUE_AGG_START = 1,
4647 - IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
4648 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
4649 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
4650 };
4651 @@ -1416,12 +1414,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
4652 return local->hw.wiphy->bands[band];
4653 }
4654
4655 -/* this struct represents 802.11n's RA/TID combination */
4656 -struct ieee80211_ra_tid {
4657 - u8 ra[ETH_ALEN];
4658 - u16 tid;
4659 -};
4660 -
4661 /* this struct holds the value parsing from channel switch IE */
4662 struct ieee80211_csa_ie {
4663 struct cfg80211_chan_def chandef;
4664 @@ -1765,8 +1757,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
4665 enum ieee80211_agg_stop_reason reason);
4666 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
4667 enum ieee80211_agg_stop_reason reason);
4668 -void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
4669 -void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
4670 +void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
4671 + struct tid_ampdu_tx *tid_tx);
4672 +void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
4673 + struct tid_ampdu_tx *tid_tx);
4674 void ieee80211_ba_session_work(struct work_struct *work);
4675 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
4676 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
4677 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
4678 index fa7d757fef95..760ba8ec2944 100644
4679 --- a/net/mac80211/iface.c
4680 +++ b/net/mac80211/iface.c
4681 @@ -1248,7 +1248,6 @@ static void ieee80211_iface_work(struct work_struct *work)
4682 struct ieee80211_local *local = sdata->local;
4683 struct sk_buff *skb;
4684 struct sta_info *sta;
4685 - struct ieee80211_ra_tid *ra_tid;
4686 struct ieee80211_rx_agg *rx_agg;
4687
4688 if (!ieee80211_sdata_running(sdata))
4689 @@ -1264,15 +1263,7 @@ static void ieee80211_iface_work(struct work_struct *work)
4690 while ((skb = skb_dequeue(&sdata->skb_queue))) {
4691 struct ieee80211_mgmt *mgmt = (void *)skb->data;
4692
4693 - if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
4694 - ra_tid = (void *)&skb->cb;
4695 - ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
4696 - ra_tid->tid);
4697 - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
4698 - ra_tid = (void *)&skb->cb;
4699 - ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
4700 - ra_tid->tid);
4701 - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
4702 + if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
4703 rx_agg = (void *)&skb->cb;
4704 mutex_lock(&local->sta_mtx);
4705 sta = sta_info_get_bss(sdata, rx_agg->addr);
4706 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
4707 index 15599c70a38f..cc808ac783e5 100644
4708 --- a/net/mac80211/sta_info.h
4709 +++ b/net/mac80211/sta_info.h
4710 @@ -115,6 +115,8 @@ enum ieee80211_sta_info_flags {
4711 #define HT_AGG_STATE_STOPPING 3
4712 #define HT_AGG_STATE_WANT_START 4
4713 #define HT_AGG_STATE_WANT_STOP 5
4714 +#define HT_AGG_STATE_START_CB 6
4715 +#define HT_AGG_STATE_STOP_CB 7
4716
4717 enum ieee80211_agg_stop_reason {
4718 AGG_STOP_DECLINED,
4719 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
4720 index 72fe9bc7a1f9..7892bac21eac 100644
4721 --- a/net/mac80211/status.c
4722 +++ b/net/mac80211/status.c
4723 @@ -472,11 +472,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
4724 if (!skb)
4725 return;
4726
4727 - if (dropped) {
4728 - dev_kfree_skb_any(skb);
4729 - return;
4730 - }
4731 -
4732 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
4733 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
4734 struct ieee80211_sub_if_data *sdata;
4735 @@ -497,6 +492,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
4736 }
4737 rcu_read_unlock();
4738
4739 + dev_kfree_skb_any(skb);
4740 + } else if (dropped) {
4741 dev_kfree_skb_any(skb);
4742 } else {
4743 /* consumes skb */
4744 diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
4745 index f20dcf1b1830..c64ae68ae4f8 100644
4746 --- a/net/mac80211/tdls.c
4747 +++ b/net/mac80211/tdls.c
4748 @@ -16,6 +16,7 @@
4749 #include "ieee80211_i.h"
4750 #include "driver-ops.h"
4751 #include "rate.h"
4752 +#include "wme.h"
4753
4754 /* give usermode some time for retries in setting up the TDLS session */
4755 #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
4756 @@ -1019,14 +1020,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
4757 switch (action_code) {
4758 case WLAN_TDLS_SETUP_REQUEST:
4759 case WLAN_TDLS_SETUP_RESPONSE:
4760 - skb_set_queue_mapping(skb, IEEE80211_AC_BK);
4761 - skb->priority = 2;
4762 + skb->priority = 256 + 2;
4763 break;
4764 default:
4765 - skb_set_queue_mapping(skb, IEEE80211_AC_VI);
4766 - skb->priority = 5;
4767 + skb->priority = 256 + 5;
4768 break;
4769 }
4770 + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
4771
4772 /*
4773 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
4774 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4775 index 84582998f65f..58fba4e569e6 100644
4776 --- a/net/mac80211/tx.c
4777 +++ b/net/mac80211/tx.c
4778 @@ -1833,7 +1833,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
4779 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
4780
4781 if (invoke_tx_handlers_early(&tx))
4782 - return false;
4783 + return true;
4784
4785 if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
4786 return true;
4787 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
4788 index 169156cfd4c8..96e61eab19bc 100644
4789 --- a/net/rds/ib_cm.c
4790 +++ b/net/rds/ib_cm.c
4791 @@ -505,7 +505,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
4792 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
4793 ic->i_send_cq, ic->i_recv_cq);
4794
4795 - return ret;
4796 + goto out;
4797
4798 sends_out:
4799 vfree(ic->i_sends);
4800 @@ -530,6 +530,7 @@ send_cq_out:
4801 ic->i_send_cq = NULL;
4802 rds_ibdev_out:
4803 rds_ib_remove_conn(rds_ibdev, conn);
4804 +out:
4805 rds_ib_dev_put(rds_ibdev);
4806
4807 return ret;
4808 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
4809 index f3ac85a285a2..a4380e182e6c 100644
4810 --- a/net/rxrpc/input.c
4811 +++ b/net/rxrpc/input.c
4812 @@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
4813 /*
4814 * Apply a hard ACK by advancing the Tx window.
4815 */
4816 -static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
4817 +static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
4818 struct rxrpc_ack_summary *summary)
4819 {
4820 struct sk_buff *skb, *list = NULL;
4821 + bool rot_last = false;
4822 int ix;
4823 u8 annotation;
4824
4825 @@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
4826 skb->next = list;
4827 list = skb;
4828
4829 - if (annotation & RXRPC_TX_ANNO_LAST)
4830 + if (annotation & RXRPC_TX_ANNO_LAST) {
4831 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
4832 + rot_last = true;
4833 + }
4834 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
4835 summary->nr_rot_new_acks++;
4836 }
4837
4838 spin_unlock(&call->lock);
4839
4840 - trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
4841 + trace_rxrpc_transmit(call, (rot_last ?
4842 rxrpc_transmit_rotate_last :
4843 rxrpc_transmit_rotate));
4844 wake_up(&call->waitq);
4845 @@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
4846 skb->next = NULL;
4847 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
4848 }
4849 +
4850 + return rot_last;
4851 }
4852
4853 /*
4854 @@ -332,11 +337,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
4855 ktime_get_real());
4856 }
4857
4858 - if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
4859 - rxrpc_rotate_tx_window(call, top, &summary);
4860 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
4861 - rxrpc_proto_abort("TXL", call, top);
4862 - return false;
4863 + if (!rxrpc_rotate_tx_window(call, top, &summary)) {
4864 + rxrpc_proto_abort("TXL", call, top);
4865 + return false;
4866 + }
4867 }
4868 if (!rxrpc_end_tx_phase(call, true, "ETD"))
4869 return false;
4870 @@ -803,6 +808,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
4871 rxrpc_propose_ack_respond_to_ack);
4872 }
4873
4874 + /* Discard any out-of-order or duplicate ACKs. */
4875 + if (before_eq(sp->hdr.serial, call->acks_latest)) {
4876 + _debug("discard ACK %d <= %d",
4877 + sp->hdr.serial, call->acks_latest);
4878 + return;
4879 + }
4880 + call->acks_latest_ts = skb->tstamp;
4881 + call->acks_latest = sp->hdr.serial;
4882 +
4883 + /* Parse rwind and mtu sizes if provided. */
4884 ioffset = offset + nr_acks + 3;
4885 if (skb->len >= ioffset + sizeof(buf.info)) {
4886 if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
4887 @@ -824,23 +839,18 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
4888 return;
4889 }
4890
4891 - /* Discard any out-of-order or duplicate ACKs. */
4892 - if (before_eq(sp->hdr.serial, call->acks_latest)) {
4893 - _debug("discard ACK %d <= %d",
4894 - sp->hdr.serial, call->acks_latest);
4895 - return;
4896 - }
4897 - call->acks_latest_ts = skb->tstamp;
4898 - call->acks_latest = sp->hdr.serial;
4899 -
4900 if (before(hard_ack, call->tx_hard_ack) ||
4901 after(hard_ack, call->tx_top))
4902 return rxrpc_proto_abort("AKW", call, 0);
4903 if (nr_acks > call->tx_top - hard_ack)
4904 return rxrpc_proto_abort("AKN", call, 0);
4905
4906 - if (after(hard_ack, call->tx_hard_ack))
4907 - rxrpc_rotate_tx_window(call, hard_ack, &summary);
4908 + if (after(hard_ack, call->tx_hard_ack)) {
4909 + if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
4910 + rxrpc_end_tx_phase(call, false, "ETA");
4911 + return;
4912 + }
4913 + }
4914
4915 if (nr_acks > 0) {
4916 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
4917 @@ -849,11 +859,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
4918 &summary);
4919 }
4920
4921 - if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
4922 - rxrpc_end_tx_phase(call, false, "ETA");
4923 - return;
4924 - }
4925 -
4926 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
4927 RXRPC_TX_ANNO_LAST &&
4928 summary.nr_acks == call->tx_top - hard_ack &&
4929 @@ -875,8 +880,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
4930
4931 _proto("Rx ACKALL %%%u", sp->hdr.serial);
4932
4933 - rxrpc_rotate_tx_window(call, call->tx_top, &summary);
4934 - if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
4935 + if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
4936 rxrpc_end_tx_phase(call, false, "ETL");
4937 }
4938
4939 diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
4940 index 44941e25f3ad..729c0e4eca21 100644
4941 --- a/net/sched/sch_gred.c
4942 +++ b/net/sched/sch_gred.c
4943 @@ -411,7 +411,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
4944 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
4945 if (tb[TCA_GRED_LIMIT] != NULL)
4946 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
4947 - return gred_change_table_def(sch, opt);
4948 + return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
4949 }
4950
4951 if (tb[TCA_GRED_PARMS] == NULL ||
4952 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4953 index 64d2d9ea2f8c..9827ba4b9f74 100644
4954 --- a/net/sctp/socket.c
4955 +++ b/net/sctp/socket.c
4956 @@ -185,13 +185,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
4957 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
4958 cb(chunk);
4959
4960 - list_for_each_entry(chunk, &q->retransmit, list)
4961 + list_for_each_entry(chunk, &q->retransmit, transmitted_list)
4962 cb(chunk);
4963
4964 - list_for_each_entry(chunk, &q->sacked, list)
4965 + list_for_each_entry(chunk, &q->sacked, transmitted_list)
4966 cb(chunk);
4967
4968 - list_for_each_entry(chunk, &q->abandoned, list)
4969 + list_for_each_entry(chunk, &q->abandoned, transmitted_list)
4970 cb(chunk);
4971
4972 list_for_each_entry(chunk, &q->out_chunk_list, list)
4973 @@ -248,11 +248,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
4974
4975 spin_lock_bh(&sctp_assocs_id_lock);
4976 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
4977 + if (asoc && (asoc->base.sk != sk || asoc->base.dead))
4978 + asoc = NULL;
4979 spin_unlock_bh(&sctp_assocs_id_lock);
4980
4981 - if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
4982 - return NULL;
4983 -
4984 return asoc;
4985 }
4986
4987 diff --git a/net/socket.c b/net/socket.c
4988 index 35fa349ba274..d9e2989c10c4 100644
4989 --- a/net/socket.c
4990 +++ b/net/socket.c
4991 @@ -2774,9 +2774,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
4992 copy_in_user(&rxnfc->fs.ring_cookie,
4993 &compat_rxnfc->fs.ring_cookie,
4994 (void __user *)(&rxnfc->fs.location + 1) -
4995 - (void __user *)&rxnfc->fs.ring_cookie) ||
4996 - copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
4997 - sizeof(rxnfc->rule_cnt)))
4998 + (void __user *)&rxnfc->fs.ring_cookie))
4999 + return -EFAULT;
5000 + if (ethcmd == ETHTOOL_GRXCLSRLALL) {
5001 + if (put_user(rule_cnt, &rxnfc->rule_cnt))
5002 + return -EFAULT;
5003 + } else if (copy_in_user(&rxnfc->rule_cnt,
5004 + &compat_rxnfc->rule_cnt,
5005 + sizeof(rxnfc->rule_cnt)))
5006 return -EFAULT;
5007 }
5008
5009 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5010 index 25bc5c30d7fb..9d3f047305ce 100644
5011 --- a/net/tipc/socket.c
5012 +++ b/net/tipc/socket.c
5013 @@ -2277,8 +2277,8 @@ void tipc_sk_reinit(struct net *net)
5014
5015 do {
5016 tsk = ERR_PTR(rhashtable_walk_start(&iter));
5017 - if (tsk)
5018 - continue;
5019 + if (IS_ERR(tsk))
5020 + goto walk_stop;
5021
5022 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
5023 spin_lock_bh(&tsk->sk.sk_lock.slock);
5024 @@ -2287,7 +2287,7 @@ void tipc_sk_reinit(struct net *net)
5025 msg_set_orignode(msg, tn->own_addr);
5026 spin_unlock_bh(&tsk->sk.sk_lock.slock);
5027 }
5028 -
5029 +walk_stop:
5030 rhashtable_walk_stop(&iter);
5031 } while (tsk == ERR_PTR(-EAGAIN));
5032 }
5033 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
5034 index 271cd66e4b3b..c2646446e157 100644
5035 --- a/net/tipc/subscr.c
5036 +++ b/net/tipc/subscr.c
5037 @@ -256,7 +256,9 @@ static void tipc_subscrp_delete(struct tipc_subscription *sub)
5038 static void tipc_subscrp_cancel(struct tipc_subscr *s,
5039 struct tipc_subscriber *subscriber)
5040 {
5041 + tipc_subscrb_get(subscriber);
5042 tipc_subscrb_subscrp_delete(subscriber, s);
5043 + tipc_subscrb_put(subscriber);
5044 }
5045
5046 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
5047 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5048 index 0e91ec49d3da..549d0a4083b3 100644
5049 --- a/net/wireless/nl80211.c
5050 +++ b/net/wireless/nl80211.c
5051 @@ -3422,6 +3422,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
5052 return false;
5053
5054 /* check availability */
5055 + ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
5056 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
5057 mcs[ridx] |= rbit;
5058 else
5059 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
5060 index 5dbac3749738..36d1d25082e3 100644
5061 --- a/net/wireless/reg.c
5062 +++ b/net/wireless/reg.c
5063 @@ -2298,6 +2298,7 @@ static int regulatory_hint_core(const char *alpha2)
5064 request->alpha2[0] = alpha2[0];
5065 request->alpha2[1] = alpha2[1];
5066 request->initiator = NL80211_REGDOM_SET_BY_CORE;
5067 + request->wiphy_idx = WIPHY_IDX_INVALID;
5068
5069 queue_regulatory_request(request);
5070
5071 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
5072 index 35ad69fd0838..435f904c1be5 100644
5073 --- a/net/wireless/scan.c
5074 +++ b/net/wireless/scan.c
5075 @@ -978,13 +978,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
5076 return NULL;
5077 }
5078
5079 +/*
5080 + * Update RX channel information based on the available frame payload
5081 + * information. This is mainly for the 2.4 GHz band where frames can be received
5082 + * from neighboring channels and the Beacon frames use the DSSS Parameter Set
5083 + * element to indicate the current (transmitting) channel, but this might also
5084 + * be needed on other bands if RX frequency does not match with the actual
5085 + * operating channel of a BSS.
5086 + */
5087 static struct ieee80211_channel *
5088 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
5089 - struct ieee80211_channel *channel)
5090 + struct ieee80211_channel *channel,
5091 + enum nl80211_bss_scan_width scan_width)
5092 {
5093 const u8 *tmp;
5094 u32 freq;
5095 int channel_number = -1;
5096 + struct ieee80211_channel *alt_channel;
5097
5098 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
5099 if (tmp && tmp[1] == 1) {
5100 @@ -998,16 +1008,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
5101 }
5102 }
5103
5104 - if (channel_number < 0)
5105 + if (channel_number < 0) {
5106 + /* No channel information in frame payload */
5107 return channel;
5108 + }
5109
5110 freq = ieee80211_channel_to_frequency(channel_number, channel->band);
5111 - channel = ieee80211_get_channel(wiphy, freq);
5112 - if (!channel)
5113 - return NULL;
5114 - if (channel->flags & IEEE80211_CHAN_DISABLED)
5115 + alt_channel = ieee80211_get_channel(wiphy, freq);
5116 + if (!alt_channel) {
5117 + if (channel->band == NL80211_BAND_2GHZ) {
5118 + /*
5119 + * Better not allow unexpected channels when that could
5120 + * be going beyond the 1-11 range (e.g., discovering
5121 + * BSS on channel 12 when radio is configured for
5122 + * channel 11.
5123 + */
5124 + return NULL;
5125 + }
5126 +
5127 + /* No match for the payload channel number - ignore it */
5128 + return channel;
5129 + }
5130 +
5131 + if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
5132 + scan_width == NL80211_BSS_CHAN_WIDTH_5) {
5133 + /*
5134 + * Ignore channel number in 5 and 10 MHz channels where there
5135 + * may not be an n:1 or 1:n mapping between frequencies and
5136 + * channel numbers.
5137 + */
5138 + return channel;
5139 + }
5140 +
5141 + /*
5142 + * Use the channel determined through the payload channel number
5143 + * instead of the RX channel reported by the driver.
5144 + */
5145 + if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
5146 return NULL;
5147 - return channel;
5148 + return alt_channel;
5149 }
5150
5151 /* Returned bss is reference counted and must be cleaned up appropriately. */
5152 @@ -1032,7 +1071,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
5153 (data->signal < 0 || data->signal > 100)))
5154 return NULL;
5155
5156 - channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
5157 + channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
5158 + data->scan_width);
5159 if (!channel)
5160 return NULL;
5161
5162 @@ -1130,7 +1170,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
5163 return NULL;
5164
5165 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
5166 - ielen, data->chan);
5167 + ielen, data->chan, data->scan_width);
5168 if (!channel)
5169 return NULL;
5170
5171 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
5172 index 6e768093d7c8..026770884d46 100644
5173 --- a/net/xfrm/xfrm_user.c
5174 +++ b/net/xfrm/xfrm_user.c
5175 @@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
5176 err = -EINVAL;
5177 switch (p->family) {
5178 case AF_INET:
5179 + if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
5180 + goto out;
5181 +
5182 break;
5183
5184 case AF_INET6:
5185 #if IS_ENABLED(CONFIG_IPV6)
5186 + if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
5187 + goto out;
5188 +
5189 break;
5190 #else
5191 err = -EAFNOSUPPORT;
5192 @@ -1316,10 +1322,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
5193
5194 switch (p->sel.family) {
5195 case AF_INET:
5196 + if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
5197 + return -EINVAL;
5198 +
5199 break;
5200
5201 case AF_INET6:
5202 #if IS_ENABLED(CONFIG_IPV6)
5203 + if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
5204 + return -EINVAL;
5205 +
5206 break;
5207 #else
5208 return -EAFNOSUPPORT;
5209 @@ -1400,6 +1412,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
5210 (ut[i].family != prev_family))
5211 return -EINVAL;
5212
5213 + if (ut[i].mode >= XFRM_MODE_MAX)
5214 + return -EINVAL;
5215 +
5216 prev_family = ut[i].family;
5217
5218 switch (ut[i].family) {
5219 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5220 index ca2945711dbe..cc48800f95e0 100644
5221 --- a/sound/pci/hda/patch_realtek.c
5222 +++ b/sound/pci/hda/patch_realtek.c
5223 @@ -3499,7 +3499,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
5224 }
5225 }
5226
5227 -#if IS_REACHABLE(INPUT)
5228 +#if IS_REACHABLE(CONFIG_INPUT)
5229 static void gpio2_mic_hotkey_event(struct hda_codec *codec,
5230 struct hda_jack_callback *event)
5231 {
5232 @@ -6392,8 +6392,11 @@ static int patch_alc269(struct hda_codec *codec)
5233 break;
5234 case 0x10ec0225:
5235 case 0x10ec0295:
5236 + spec->codec_variant = ALC269_TYPE_ALC225;
5237 + break;
5238 case 0x10ec0299:
5239 spec->codec_variant = ALC269_TYPE_ALC225;
5240 + spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
5241 break;
5242 case 0x10ec0234:
5243 case 0x10ec0274:
5244 diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
5245 index bef8a4546c12..b0c154d5924b 100644
5246 --- a/sound/soc/intel/skylake/skl-topology.c
5247 +++ b/sound/soc/intel/skylake/skl-topology.c
5248 @@ -2325,7 +2325,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
5249
5250 if (ret < 0)
5251 return ret;
5252 - tkn_count += ret;
5253 + tkn_count = ret;
5254
5255 tuple_size += tkn_count *
5256 sizeof(struct snd_soc_tplg_vendor_string_elem);
5257 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
5258 index 32a64e619028..cd86fd7b35c4 100644
5259 --- a/tools/perf/Makefile
5260 +++ b/tools/perf/Makefile
5261 @@ -83,10 +83,10 @@ endif # has_clean
5262 endif # MAKECMDGOALS
5263
5264 #
5265 -# The clean target is not really parallel, don't print the jobs info:
5266 +# Explicitly disable parallelism for the clean target.
5267 #
5268 clean:
5269 - $(make)
5270 + $(make) -j1
5271
5272 #
5273 # The build-test target is not really parallel, don't print the jobs info,
5274 diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
5275 index ade7213943ad..03239956987f 100644
5276 --- a/tools/perf/tests/builtin-test.c
5277 +++ b/tools/perf/tests/builtin-test.c
5278 @@ -335,7 +335,7 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
5279 if (!t->subtest.get_nr)
5280 pr_debug("%s:", t->desc);
5281 else
5282 - pr_debug("%s subtest %d:", t->desc, subtest);
5283 + pr_debug("%s subtest %d:", t->desc, subtest + 1);
5284
5285 switch (err) {
5286 case TEST_OK:
5287 @@ -413,7 +413,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
5288 for (subi = 0; subi < subn; subi++) {
5289 pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
5290 t->subtest.get_desc(subi));
5291 - err = test_and_print(t, skip, subi + 1);
5292 + err = test_and_print(t, skip, subi);
5293 if (err != TEST_OK && t->subtest.skip_if_fail)
5294 skip = true;
5295 }
5296 diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
5297 index 01a5ba2788c6..b0d005d295a9 100644
5298 --- a/tools/perf/tests/task-exit.c
5299 +++ b/tools/perf/tests/task-exit.c
5300 @@ -82,7 +82,7 @@ int test__task_exit(int subtest __maybe_unused)
5301
5302 evsel = perf_evlist__first(evlist);
5303 evsel->attr.task = 1;
5304 - evsel->attr.sample_freq = 0;
5305 + evsel->attr.sample_freq = 1;
5306 evsel->attr.inherit = 0;
5307 evsel->attr.watermark = 0;
5308 evsel->attr.wakeup_events = 1;
5309 diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
5310 index a38227eb5450..3336cbc6ec48 100644
5311 --- a/tools/perf/util/annotate.c
5312 +++ b/tools/perf/util/annotate.c
5313 @@ -495,9 +495,19 @@ static struct ins *ins__find(const char *name)
5314 int symbol__alloc_hist(struct symbol *sym)
5315 {
5316 struct annotation *notes = symbol__annotation(sym);
5317 - const size_t size = symbol__size(sym);
5318 + size_t size = symbol__size(sym);
5319 size_t sizeof_sym_hist;
5320
5321 + /*
5322 + * Add buffer of one element for zero length symbol.
5323 + * When sample is taken from first instruction of
5324 + * zero length symbol, perf still resolves it and
5325 + * shows symbol name in perf report and allows to
5326 + * annotate it.
5327 + */
5328 + if (size == 0)
5329 + size = 1;
5330 +
5331 /* Check for overflow when calculating sizeof_sym_hist */
5332 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
5333 return -1;
5334 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
5335 index 3be8c489884e..f7128c2a6386 100644
5336 --- a/tools/perf/util/evsel.c
5337 +++ b/tools/perf/util/evsel.c
5338 @@ -263,8 +263,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
5339 struct perf_evsel *evsel;
5340
5341 event_attr_init(&attr);
5342 + /*
5343 + * Unnamed union member, not supported as struct member named
5344 + * initializer in older compilers such as gcc 4.4.7
5345 + *
5346 + * Just for probing the precise_ip:
5347 + */
5348 + attr.sample_period = 1;
5349
5350 perf_event_attr__set_max_precise_ip(&attr);
5351 + /*
5352 + * Now let the usual logic to set up the perf_event_attr defaults
5353 + * to kick in when we return and before perf_evsel__open() is called.
5354 + */
5355 + attr.sample_period = 0;
5356
5357 evsel = perf_evsel__new(&attr);
5358 if (evsel == NULL)
5359 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
5360 index c93daccec755..a7452fd3b6ee 100644
5361 --- a/tools/perf/util/probe-event.c
5362 +++ b/tools/perf/util/probe-event.c
5363 @@ -615,7 +615,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
5364 struct map *map, unsigned long offs)
5365 {
5366 struct symbol *sym;
5367 - u64 addr = tp->address + tp->offset - offs;
5368 + u64 addr = tp->address - offs;
5369
5370 sym = map__find_symbol(map, addr);
5371 if (!sym)
5372 diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
5373 index 635b07b4fdd3..b4a2e6af515f 100644
5374 --- a/tools/virtio/ringtest/ptr_ring.c
5375 +++ b/tools/virtio/ringtest/ptr_ring.c
5376 @@ -15,24 +15,41 @@
5377 #define unlikely(x) (__builtin_expect(!!(x), 0))
5378 #define likely(x) (__builtin_expect(!!(x), 1))
5379 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
5380 +#define SIZE_MAX (~(size_t)0)
5381 +
5382 typedef pthread_spinlock_t spinlock_t;
5383
5384 typedef int gfp_t;
5385 -static void *kmalloc(unsigned size, gfp_t gfp)
5386 -{
5387 - return memalign(64, size);
5388 -}
5389 +#define __GFP_ZERO 0x1
5390
5391 -static void *kzalloc(unsigned size, gfp_t gfp)
5392 +static void *kmalloc(unsigned size, gfp_t gfp)
5393 {
5394 void *p = memalign(64, size);
5395 if (!p)
5396 return p;
5397 - memset(p, 0, size);
5398
5399 + if (gfp & __GFP_ZERO)
5400 + memset(p, 0, size);
5401 return p;
5402 }
5403
5404 +static inline void *kzalloc(unsigned size, gfp_t flags)
5405 +{
5406 + return kmalloc(size, flags | __GFP_ZERO);
5407 +}
5408 +
5409 +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
5410 +{
5411 + if (size != 0 && n > SIZE_MAX / size)
5412 + return NULL;
5413 + return kmalloc(n * size, flags);
5414 +}
5415 +
5416 +static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
5417 +{
5418 + return kmalloc_array(n, size, flags | __GFP_ZERO);
5419 +}
5420 +
5421 static void kfree(void *p)
5422 {
5423 if (p)