Magellan Linux

Contents of /trunk/kernel-alx/patches-3.18/0112-3.18.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2613 - (show annotations) (download)
Mon Jul 13 08:28:43 2015 UTC (8 years, 10 months ago) by niro
File size: 165306 byte(s)
-linux-3.18.13
1 diff --git a/Makefile b/Makefile
2 index d64f6bf7cd55..9cd08d55f557 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 18
8 -SUBLEVEL = 12
9 +SUBLEVEL = 13
10 EXTRAVERSION =
11 NAME = Diseased Newt
12
13 diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
14 index cb3142a2d40b..a86d567f6c70 100644
15 --- a/arch/arc/kernel/signal.c
16 +++ b/arch/arc/kernel/signal.c
17 @@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
18 sigset_t *set)
19 {
20 int err;
21 - err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
22 + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
23 sizeof(sf->uc.uc_mcontext.regs.scratch));
24 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
25
26 @@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
27 if (!err)
28 set_current_blocked(&set);
29
30 - err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
31 + err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
32 sizeof(sf->uc.uc_mcontext.regs.scratch));
33
34 return err;
35 @@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
36 /* Don't restart from sigreturn */
37 syscall_wont_restart(regs);
38
39 + /*
40 + * Ensure that sigreturn always returns to user mode (in case the
41 + * regs saved on user stack got fudged between save and sigreturn)
42 + * Otherwise it is easy to panic the kernel with a custom
43 + * signal handler and/or restorer which clobberes the status32/ret
44 + * to return to a bogus location in kernel mode.
45 + */
46 + regs->status32 |= STATUS_U_MASK;
47 +
48 return regs->r0;
49
50 badframe:
51 @@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
52
53 /*
54 * handler returns using sigreturn stub provided already by userpsace
55 + * If not, nuke the process right away
56 */
57 - BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
58 + if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
59 + return 1;
60 +
61 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
62
63 /* User Stack for signal handler will be above the frame just carved */
64 @@ -296,12 +308,12 @@ static void
65 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
66 {
67 sigset_t *oldset = sigmask_to_save();
68 - int ret;
69 + int failed;
70
71 /* Set up the stack frame */
72 - ret = setup_rt_frame(ksig, oldset, regs);
73 + failed = setup_rt_frame(ksig, oldset, regs);
74
75 - signal_setup_done(ret, ksig, 0);
76 + signal_setup_done(failed, ksig, 0);
77 }
78
79 void do_signal(struct pt_regs *regs)
80 diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
81 index 1aaa1e15ef70..d5fd511c1474 100644
82 --- a/arch/arm/mach-sunxi/Kconfig
83 +++ b/arch/arm/mach-sunxi/Kconfig
84 @@ -1,10 +1,12 @@
85 menuconfig ARCH_SUNXI
86 bool "Allwinner SoCs" if ARCH_MULTI_V7
87 select ARCH_REQUIRE_GPIOLIB
88 + select ARCH_HAS_RESET_CONTROLLER
89 select CLKSRC_MMIO
90 select GENERIC_IRQ_CHIP
91 select PINCTRL
92 select SUN4I_TIMER
93 + select RESET_CONTROLLER
94
95 if ARCH_SUNXI
96
97 @@ -20,10 +22,8 @@ config MACH_SUN5I
98 config MACH_SUN6I
99 bool "Allwinner A31 (sun6i) SoCs support"
100 default ARCH_SUNXI
101 - select ARCH_HAS_RESET_CONTROLLER
102 select ARM_GIC
103 select MFD_SUN6I_PRCM
104 - select RESET_CONTROLLER
105 select SUN5I_HSTIMER
106
107 config MACH_SUN7I
108 @@ -37,9 +37,7 @@ config MACH_SUN7I
109 config MACH_SUN8I
110 bool "Allwinner A23 (sun8i) SoCs support"
111 default ARCH_SUNXI
112 - select ARCH_HAS_RESET_CONTROLLER
113 select ARM_GIC
114 select MFD_SUN6I_PRCM
115 - select RESET_CONTROLLER
116
117 endif
118 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
119 index 9532f8d5857e..dc2d66cdf311 100644
120 --- a/arch/arm64/Kconfig
121 +++ b/arch/arm64/Kconfig
122 @@ -193,6 +193,135 @@ endmenu
123
124 menu "Kernel Features"
125
126 +menu "ARM errata workarounds via the alternatives framework"
127 +
128 +config ARM64_ERRATUM_826319
129 + bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
130 + default y
131 + help
132 + This option adds an alternative code sequence to work around ARM
133 + erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or
134 + AXI master interface and an L2 cache.
135 +
136 + If a Cortex-A53 uses an AMBA AXI4 ACE interface to other processors
137 + and is unable to accept a certain write via this interface, it will
138 + not progress on read data presented on the read data channel and the
139 + system can deadlock.
140 +
141 + The workaround promotes data cache clean instructions to
142 + data cache clean-and-invalidate.
143 + Please note that this does not necessarily enable the workaround,
144 + as it depends on the alternative framework, which will only patch
145 + the kernel if an affected CPU is detected.
146 +
147 + If unsure, say Y.
148 +
149 +config ARM64_ERRATUM_827319
150 + bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect"
151 + default y
152 + help
153 + This option adds an alternative code sequence to work around ARM
154 + erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI
155 + master interface and an L2 cache.
156 +
157 + Under certain conditions this erratum can cause a clean line eviction
158 + to occur at the same time as another transaction to the same address
159 + on the AMBA 5 CHI interface, which can cause data corruption if the
160 + interconnect reorders the two transactions.
161 +
162 + The workaround promotes data cache clean instructions to
163 + data cache clean-and-invalidate.
164 + Please note that this does not necessarily enable the workaround,
165 + as it depends on the alternative framework, which will only patch
166 + the kernel if an affected CPU is detected.
167 +
168 + If unsure, say Y.
169 +
170 +config ARM64_ERRATUM_824069
171 + bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop"
172 + default y
173 + help
174 + This option adds an alternative code sequence to work around ARM
175 + erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected
176 + to a coherent interconnect.
177 +
178 + If a Cortex-A53 processor is executing a store or prefetch for
179 + write instruction at the same time as a processor in another
180 + cluster is executing a cache maintenance operation to the same
181 + address, then this erratum might cause a clean cache line to be
182 + incorrectly marked as dirty.
183 +
184 + The workaround promotes data cache clean instructions to
185 + data cache clean-and-invalidate.
186 + Please note that this option does not necessarily enable the
187 + workaround, as it depends on the alternative framework, which will
188 + only patch the kernel if an affected CPU is detected.
189 +
190 + If unsure, say Y.
191 +
192 +config ARM64_ERRATUM_819472
193 + bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption"
194 + default y
195 + help
196 + This option adds an alternative code sequence to work around ARM
197 + erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache
198 + present when it is connected to a coherent interconnect.
199 +
200 + If the processor is executing a load and store exclusive sequence at
201 + the same time as a processor in another cluster is executing a cache
202 + maintenance operation to the same address, then this erratum might
203 + cause data corruption.
204 +
205 + The workaround promotes data cache clean instructions to
206 + data cache clean-and-invalidate.
207 + Please note that this does not necessarily enable the workaround,
208 + as it depends on the alternative framework, which will only patch
209 + the kernel if an affected CPU is detected.
210 +
211 + If unsure, say Y.
212 +
213 +config ARM64_ERRATUM_832075
214 + bool "Cortex-A57: 832075: possible deadlock on mixing exclusive memory accesses with device loads"
215 + default y
216 + help
217 + This option adds an alternative code sequence to work around ARM
218 + erratum 832075 on Cortex-A57 parts up to r1p2.
219 +
220 + Affected Cortex-A57 parts might deadlock when exclusive load/store
221 + instructions to Write-Back memory are mixed with Device loads.
222 +
223 + The workaround is to promote device loads to use Load-Acquire
224 + semantics.
225 + Please note that this does not necessarily enable the workaround,
226 + as it depends on the alternative framework, which will only patch
227 + the kernel if an affected CPU is detected.
228 +
229 + If unsure, say Y.
230 +
231 +config ARM64_ERRATUM_845719
232 + bool "Cortex-A53: 845719: a load might read incorrect data"
233 + depends on COMPAT
234 + default y
235 + help
236 + This option adds an alternative code sequence to work around ARM
237 + erratum 845719 on Cortex-A53 parts up to r0p4.
238 +
239 + When running a compat (AArch32) userspace on an affected Cortex-A53
240 + part, a load at EL0 from a virtual address that matches the bottom 32
241 + bits of the virtual address used by a recent load at (AArch64) EL1
242 + might return incorrect data.
243 +
244 + The workaround is to write the contextidr_el1 register on exception
245 + return to a 32-bit task.
246 + Please note that this does not necessarily enable the workaround,
247 + as it depends on the alternative framework, which will only patch
248 + the kernel if an affected CPU is detected.
249 +
250 + If unsure, say Y.
251 +
252 +endmenu
253 +
254 +
255 choice
256 prompt "Page size"
257 default ARM64_4K_PAGES
258 diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
259 new file mode 100644
260 index 000000000000..919a67855b63
261 --- /dev/null
262 +++ b/arch/arm64/include/asm/alternative-asm.h
263 @@ -0,0 +1,29 @@
264 +#ifndef __ASM_ALTERNATIVE_ASM_H
265 +#define __ASM_ALTERNATIVE_ASM_H
266 +
267 +#ifdef __ASSEMBLY__
268 +
269 +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
270 + .word \orig_offset - .
271 + .word \alt_offset - .
272 + .hword \feature
273 + .byte \orig_len
274 + .byte \alt_len
275 +.endm
276 +
277 +.macro alternative_insn insn1 insn2 cap
278 +661: \insn1
279 +662: .pushsection .altinstructions, "a"
280 + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
281 + .popsection
282 + .pushsection .altinstr_replacement, "ax"
283 +663: \insn2
284 +664: .popsection
285 + .if ((664b-663b) != (662b-661b))
286 + .error "Alternatives instruction length mismatch"
287 + .endif
288 +.endm
289 +
290 +#endif /* __ASSEMBLY__ */
291 +
292 +#endif /* __ASM_ALTERNATIVE_ASM_H */
293 diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
294 new file mode 100644
295 index 000000000000..f6d206e7f9e9
296 --- /dev/null
297 +++ b/arch/arm64/include/asm/alternative.h
298 @@ -0,0 +1,43 @@
299 +#ifndef __ASM_ALTERNATIVE_H
300 +#define __ASM_ALTERNATIVE_H
301 +
302 +#include <linux/types.h>
303 +#include <linux/stddef.h>
304 +#include <linux/stringify.h>
305 +
306 +struct alt_instr {
307 + s32 orig_offset; /* offset to original instruction */
308 + s32 alt_offset; /* offset to replacement instruction */
309 + u16 cpufeature; /* cpufeature bit set for replacement */
310 + u8 orig_len; /* size of original instruction(s) */
311 + u8 alt_len; /* size of new instruction(s), <= orig_len */
312 +};
313 +
314 +void apply_alternatives(void);
315 +void free_alternatives_memory(void);
316 +
317 +#define ALTINSTR_ENTRY(feature) \
318 + " .word 661b - .\n" /* label */ \
319 + " .word 663f - .\n" /* new instruction */ \
320 + " .hword " __stringify(feature) "\n" /* feature bit */ \
321 + " .byte 662b-661b\n" /* source len */ \
322 + " .byte 664f-663f\n" /* replacement len */
323 +
324 +/* alternative assembly primitive: */
325 +#define ALTERNATIVE(oldinstr, newinstr, feature) \
326 + "661:\n\t" \
327 + oldinstr "\n" \
328 + "662:\n" \
329 + ".pushsection .altinstructions,\"a\"\n" \
330 + ALTINSTR_ENTRY(feature) \
331 + ".popsection\n" \
332 + ".pushsection .altinstr_replacement, \"a\"\n" \
333 + "663:\n\t" \
334 + newinstr "\n" \
335 + "664:\n\t" \
336 + ".popsection\n\t" \
337 + ".if ((664b-663b) != (662b-661b))\n\t" \
338 + " .error \"Alternatives instruction length mismatch\"\n\t"\
339 + ".endif\n"
340 +
341 +#endif /* __ASM_ALTERNATIVE_H */
342 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
343 index cd4ac0516488..c008bae718eb 100644
344 --- a/arch/arm64/include/asm/cpufeature.h
345 +++ b/arch/arm64/include/asm/cpufeature.h
346 @@ -21,9 +21,39 @@
347 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
348 #define cpu_feature(x) ilog2(HWCAP_ ## x)
349
350 +#define ARM64_WORKAROUND_CLEAN_CACHE 0
351 +#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
352 +#define ARM64_WORKAROUND_845719 2
353 +
354 +#define NCAPS 3
355 +
356 +#ifndef __ASSEMBLY__
357 +
358 +extern DECLARE_BITMAP(cpu_hwcaps, NCAPS);
359 +
360 static inline bool cpu_have_feature(unsigned int num)
361 {
362 return elf_hwcap & (1UL << num);
363 }
364
365 +static inline bool cpus_have_cap(unsigned int num)
366 +{
367 + if (num >= NCAPS)
368 + return false;
369 + return test_bit(num, cpu_hwcaps);
370 +}
371 +
372 +static inline void cpus_set_cap(unsigned int num)
373 +{
374 + if (num >= NCAPS)
375 + pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
376 + num, NCAPS);
377 + else
378 + __set_bit(num, cpu_hwcaps);
379 +}
380 +
381 +void check_local_cpu_errata(void);
382 +
383 +#endif /* __ASSEMBLY__ */
384 +
385 #endif
386 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
387 index 379d0b874328..8adb986a3086 100644
388 --- a/arch/arm64/include/asm/cputype.h
389 +++ b/arch/arm64/include/asm/cputype.h
390 @@ -57,6 +57,11 @@
391 #define MIDR_IMPLEMENTOR(midr) \
392 (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
393
394 +#define MIDR_CPU_PART(imp, partnum) \
395 + (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
396 + (0xf << MIDR_ARCHITECTURE_SHIFT) | \
397 + ((partnum) << MIDR_PARTNUM_SHIFT))
398 +
399 #define ARM_CPU_IMP_ARM 0x41
400 #define ARM_CPU_IMP_APM 0x50
401
402 diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
403 index 79f1d519221f..75825b63464d 100644
404 --- a/arch/arm64/include/asm/io.h
405 +++ b/arch/arm64/include/asm/io.h
406 @@ -28,6 +28,8 @@
407 #include <asm/barrier.h>
408 #include <asm/pgtable.h>
409 #include <asm/early_ioremap.h>
410 +#include <asm/alternative.h>
411 +#include <asm/cpufeature.h>
412
413 #include <xen/xen.h>
414
415 @@ -57,28 +59,41 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
416 static inline u8 __raw_readb(const volatile void __iomem *addr)
417 {
418 u8 val;
419 - asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
420 + asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
421 + "ldarb %w0, [%1]",
422 + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
423 + : "=r" (val) : "r" (addr));
424 return val;
425 }
426
427 static inline u16 __raw_readw(const volatile void __iomem *addr)
428 {
429 u16 val;
430 - asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
431 +
432 + asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
433 + "ldarh %w0, [%1]",
434 + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
435 + : "=r" (val) : "r" (addr));
436 return val;
437 }
438
439 static inline u32 __raw_readl(const volatile void __iomem *addr)
440 {
441 u32 val;
442 - asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
443 + asm volatile(ALTERNATIVE("ldr %w0, [%1]",
444 + "ldar %w0, [%1]",
445 + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
446 + : "=r" (val) : "r" (addr));
447 return val;
448 }
449
450 static inline u64 __raw_readq(const volatile void __iomem *addr)
451 {
452 u64 val;
453 - asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
454 + asm volatile(ALTERNATIVE("ldr %0, [%1]",
455 + "ldar %0, [%1]",
456 + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
457 + : "=r" (val) : "r" (addr));
458 return val;
459 }
460
461 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
462 index 5bd029b43644..da2272811a31 100644
463 --- a/arch/arm64/kernel/Makefile
464 +++ b/arch/arm64/kernel/Makefile
465 @@ -15,7 +15,7 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
466 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
467 sys.o stacktrace.o time.o traps.o io.o vdso.o \
468 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
469 - cpuinfo.o
470 + cpuinfo.o cpu_errata.o alternative.o
471
472 arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
473 sys_compat.o
474 diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
475 new file mode 100644
476 index 000000000000..1a3badab800a
477 --- /dev/null
478 +++ b/arch/arm64/kernel/alternative.c
479 @@ -0,0 +1,64 @@
480 +/*
481 + * alternative runtime patching
482 + * inspired by the x86 version
483 + *
484 + * Copyright (C) 2014 ARM Ltd.
485 + *
486 + * This program is free software; you can redistribute it and/or modify
487 + * it under the terms of the GNU General Public License version 2 as
488 + * published by the Free Software Foundation.
489 + *
490 + * This program is distributed in the hope that it will be useful,
491 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
492 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
493 + * GNU General Public License for more details.
494 + *
495 + * You should have received a copy of the GNU General Public License
496 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
497 + */
498 +
499 +#define pr_fmt(fmt) "alternatives: " fmt
500 +
501 +#include <linux/init.h>
502 +#include <linux/cpu.h>
503 +#include <asm/cacheflush.h>
504 +#include <asm/alternative.h>
505 +#include <asm/cpufeature.h>
506 +#include <linux/stop_machine.h>
507 +
508 +extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
509 +
510 +static int __apply_alternatives(void *dummy)
511 +{
512 + struct alt_instr *alt;
513 + u8 *origptr, *replptr;
514 +
515 + for (alt = __alt_instructions; alt < __alt_instructions_end; alt++) {
516 + if (!cpus_have_cap(alt->cpufeature))
517 + continue;
518 +
519 + BUG_ON(alt->alt_len > alt->orig_len);
520 +
521 + pr_info_once("patching kernel code\n");
522 +
523 + origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
524 + replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
525 + memcpy(origptr, replptr, alt->alt_len);
526 + flush_icache_range((uintptr_t)origptr,
527 + (uintptr_t)(origptr + alt->alt_len));
528 + }
529 +
530 + return 0;
531 +}
532 +
533 +void apply_alternatives(void)
534 +{
535 + /* better not try code patching on a live SMP system */
536 + stop_machine(__apply_alternatives, NULL, NULL);
537 +}
538 +
539 +void free_alternatives_memory(void)
540 +{
541 + free_reserved_area(__alt_instructions, __alt_instructions_end,
542 + 0, "alternatives");
543 +}
544 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
545 new file mode 100644
546 index 000000000000..bbc710aafb37
547 --- /dev/null
548 +++ b/arch/arm64/kernel/cpu_errata.c
549 @@ -0,0 +1,120 @@
550 +/*
551 + * Contains CPU specific errata definitions
552 + *
553 + * Copyright (C) 2014 ARM Ltd.
554 + *
555 + * This program is free software; you can redistribute it and/or modify
556 + * it under the terms of the GNU General Public License version 2 as
557 + * published by the Free Software Foundation.
558 + *
559 + * This program is distributed in the hope that it will be useful,
560 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
561 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
562 + * GNU General Public License for more details.
563 + *
564 + * You should have received a copy of the GNU General Public License
565 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
566 + */
567 +
568 +#define pr_fmt(fmt) "alternative: " fmt
569 +
570 +#include <linux/types.h>
571 +#include <asm/cpu.h>
572 +#include <asm/cputype.h>
573 +#include <asm/cpufeature.h>
574 +
575 +#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
576 +#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
577 +
578 +/*
579 + * Add a struct or another datatype to the union below if you need
580 + * different means to detect an affected CPU.
581 + */
582 +struct arm64_cpu_capabilities {
583 + const char *desc;
584 + u16 capability;
585 + bool (*is_affected)(struct arm64_cpu_capabilities *);
586 + union {
587 + struct {
588 + u32 midr_model;
589 + u32 midr_range_min, midr_range_max;
590 + };
591 + };
592 +};
593 +
594 +#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
595 + MIDR_ARCHITECTURE_MASK)
596 +
597 +static bool __maybe_unused
598 +is_affected_midr_range(struct arm64_cpu_capabilities *entry)
599 +{
600 + u32 midr = read_cpuid_id();
601 +
602 + if ((midr & CPU_MODEL_MASK) != entry->midr_model)
603 + return false;
604 +
605 + midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
606 +
607 + return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
608 +}
609 +
610 +#define MIDR_RANGE(model, min, max) \
611 + .is_affected = is_affected_midr_range, \
612 + .midr_model = model, \
613 + .midr_range_min = min, \
614 + .midr_range_max = max
615 +
616 +struct arm64_cpu_capabilities arm64_errata[] = {
617 +#if defined(CONFIG_ARM64_ERRATUM_826319) || \
618 + defined(CONFIG_ARM64_ERRATUM_827319) || \
619 + defined(CONFIG_ARM64_ERRATUM_824069)
620 + {
621 + /* Cortex-A53 r0p[012] */
622 + .desc = "ARM errata 826319, 827319, 824069",
623 + .capability = ARM64_WORKAROUND_CLEAN_CACHE,
624 + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
625 + },
626 +#endif
627 +#ifdef CONFIG_ARM64_ERRATUM_819472
628 + {
629 + /* Cortex-A53 r0p[01] */
630 + .desc = "ARM errata 819472",
631 + .capability = ARM64_WORKAROUND_CLEAN_CACHE,
632 + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
633 + },
634 +#endif
635 +#ifdef CONFIG_ARM64_ERRATUM_832075
636 + {
637 + /* Cortex-A57 r0p0 - r1p2 */
638 + .desc = "ARM erratum 832075",
639 + .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
640 + MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
641 + (1 << MIDR_VARIANT_SHIFT) | 2),
642 + },
643 +#endif
644 +#ifdef CONFIG_ARM64_ERRATUM_845719
645 + {
646 + /* Cortex-A53 r0p[01234] */
647 + .desc = "ARM erratum 845719",
648 + .capability = ARM64_WORKAROUND_845719,
649 + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
650 + },
651 +#endif
652 + {
653 + }
654 +};
655 +
656 +void check_local_cpu_errata(void)
657 +{
658 + struct arm64_cpu_capabilities *cpus = arm64_errata;
659 + int i;
660 +
661 + for (i = 0; cpus[i].desc; i++) {
662 + if (!cpus[i].is_affected(&cpus[i]))
663 + continue;
664 +
665 + if (!cpus_have_cap(cpus[i].capability))
666 + pr_info("enabling workaround for %s\n", cpus[i].desc);
667 + cpus_set_cap(cpus[i].capability);
668 + }
669 +}
670 diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
671 index 504fdaa8367e..16d6d032ecf1 100644
672 --- a/arch/arm64/kernel/cpuinfo.c
673 +++ b/arch/arm64/kernel/cpuinfo.c
674 @@ -18,6 +18,7 @@
675 #include <asm/cachetype.h>
676 #include <asm/cpu.h>
677 #include <asm/cputype.h>
678 +#include <asm/cpufeature.h>
679
680 #include <linux/bitops.h>
681 #include <linux/bug.h>
682 @@ -186,6 +187,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
683 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
684
685 cpuinfo_detect_icache_policy(info);
686 +
687 + check_local_cpu_errata();
688 }
689
690 void cpuinfo_store_cpu(void)
691 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
692 index 726b910fe6ec..2b0f3d5e11c7 100644
693 --- a/arch/arm64/kernel/entry.S
694 +++ b/arch/arm64/kernel/entry.S
695 @@ -21,8 +21,10 @@
696 #include <linux/init.h>
697 #include <linux/linkage.h>
698
699 +#include <asm/alternative-asm.h>
700 #include <asm/assembler.h>
701 #include <asm/asm-offsets.h>
702 +#include <asm/cpufeature.h>
703 #include <asm/errno.h>
704 #include <asm/esr.h>
705 #include <asm/thread_info.h>
706 @@ -118,6 +120,24 @@
707 .if \el == 0
708 ct_user_enter
709 ldr x23, [sp, #S_SP] // load return stack pointer
710 +
711 +#ifdef CONFIG_ARM64_ERRATUM_845719
712 + alternative_insn \
713 + "nop", \
714 + "tbz x22, #4, 1f", \
715 + ARM64_WORKAROUND_845719
716 +#ifdef CONFIG_PID_IN_CONTEXTIDR
717 + alternative_insn \
718 + "nop; nop", \
719 + "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
720 + ARM64_WORKAROUND_845719
721 +#else
722 + alternative_insn \
723 + "nop", \
724 + "msr contextidr_el1, xzr; 1:", \
725 + ARM64_WORKAROUND_845719
726 +#endif
727 +#endif
728 .endif
729 .if \ret
730 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
731 diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
732 index 38eead12f35b..d502a86bed9f 100644
733 --- a/arch/arm64/kernel/setup.c
734 +++ b/arch/arm64/kernel/setup.c
735 @@ -50,6 +50,7 @@
736 #include <asm/cputype.h>
737 #include <asm/elf.h>
738 #include <asm/cputable.h>
739 +#include <asm/cpufeature.h>
740 #include <asm/cpu_ops.h>
741 #include <asm/sections.h>
742 #include <asm/setup.h>
743 @@ -79,6 +80,8 @@ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
744 unsigned int compat_elf_hwcap2 __read_mostly;
745 #endif
746
747 +DECLARE_BITMAP(cpu_hwcaps, NCAPS);
748 +
749 static const char *cpu_name;
750 phys_addr_t __fdt_pointer __initdata;
751
752 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
753 index b06d1d90ee8c..0ef87896e4ae 100644
754 --- a/arch/arm64/kernel/smp.c
755 +++ b/arch/arm64/kernel/smp.c
756 @@ -37,6 +37,7 @@
757 #include <linux/of.h>
758 #include <linux/irq_work.h>
759
760 +#include <asm/alternative.h>
761 #include <asm/atomic.h>
762 #include <asm/cacheflush.h>
763 #include <asm/cpu.h>
764 @@ -309,6 +310,7 @@ void cpu_die(void)
765 void __init smp_cpus_done(unsigned int max_cpus)
766 {
767 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
768 + apply_alternatives();
769 }
770
771 void __init smp_prepare_boot_cpu(void)
772 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
773 index edf8715ba39b..2f600294e8ca 100644
774 --- a/arch/arm64/kernel/vmlinux.lds.S
775 +++ b/arch/arm64/kernel/vmlinux.lds.S
776 @@ -100,6 +100,17 @@ SECTIONS
777 . = ALIGN(PAGE_SIZE);
778 __init_end = .;
779
780 + . = ALIGN(4);
781 + .altinstructions : {
782 + __alt_instructions = .;
783 + *(.altinstructions)
784 + __alt_instructions_end = .;
785 + }
786 + .altinstr_replacement : {
787 + *(.altinstr_replacement)
788 + }
789 +
790 + . = ALIGN(PAGE_SIZE);
791 _data = .;
792 _sdata = .;
793 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
794 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
795 index 23663837acff..8eaf18577d71 100644
796 --- a/arch/arm64/mm/cache.S
797 +++ b/arch/arm64/mm/cache.S
798 @@ -20,6 +20,8 @@
799 #include <linux/linkage.h>
800 #include <linux/init.h>
801 #include <asm/assembler.h>
802 +#include <asm/cpufeature.h>
803 +#include <asm/alternative-asm.h>
804
805 #include "proc-macros.S"
806
807 @@ -210,7 +212,7 @@ __dma_clean_range:
808 dcache_line_size x2, x3
809 sub x3, x2, #1
810 bic x0, x0, x3
811 -1: dc cvac, x0 // clean D / U line
812 +1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
813 add x0, x0, x2
814 cmp x0, x1
815 b.lo 1b
816 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
817 index fff81f02251c..c95464a33f36 100644
818 --- a/arch/arm64/mm/init.c
819 +++ b/arch/arm64/mm/init.c
820 @@ -39,6 +39,7 @@
821 #include <asm/setup.h>
822 #include <asm/sizes.h>
823 #include <asm/tlb.h>
824 +#include <asm/alternative.h>
825
826 #include "mm.h"
827
828 @@ -325,6 +326,7 @@ void __init mem_init(void)
829 void free_initmem(void)
830 {
831 free_initmem_default(0);
832 + free_alternatives_memory();
833 }
834
835 #ifdef CONFIG_BLK_DEV_INITRD
836 diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
837 index 2bf8e9307be9..4c8ad592ae33 100644
838 --- a/arch/powerpc/include/asm/cputhreads.h
839 +++ b/arch/powerpc/include/asm/cputhreads.h
840 @@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
841
842 static inline int cpu_nr_cores(void)
843 {
844 - return NR_CPUS >> threads_shift;
845 + return nr_cpu_ids >> threads_shift;
846 }
847
848 static inline cpumask_t cpu_online_cores_map(void)
849 diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
850 new file mode 100644
851 index 000000000000..744fd54de374
852 --- /dev/null
853 +++ b/arch/powerpc/include/asm/irq_work.h
854 @@ -0,0 +1,9 @@
855 +#ifndef _ASM_POWERPC_IRQ_WORK_H
856 +#define _ASM_POWERPC_IRQ_WORK_H
857 +
858 +static inline bool arch_irq_work_has_interrupt(void)
859 +{
860 + return true;
861 +}
862 +
863 +#endif /* _ASM_POWERPC_IRQ_WORK_H */
864 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
865 index 17962e667a91..587be13be0be 100644
866 --- a/arch/x86/kernel/reboot.c
867 +++ b/arch/x86/kernel/reboot.c
868 @@ -182,6 +182,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
869 },
870 },
871
872 + /* ASRock */
873 + { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
874 + .callback = set_pci_reboot,
875 + .ident = "ASRock Q1900DC-ITX",
876 + .matches = {
877 + DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
878 + DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
879 + },
880 + },
881 +
882 /* ASUS */
883 { /* Handle problems with rebooting on ASUS P4S800 */
884 .callback = set_bios_reboot,
885 diff --git a/block/blk-merge.c b/block/blk-merge.c
886 index 89b97b5e0881..2be75ff7f171 100644
887 --- a/block/blk-merge.c
888 +++ b/block/blk-merge.c
889 @@ -609,7 +609,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
890 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
891 struct bio_vec *bprev;
892
893 - bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
894 + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
895 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
896 return false;
897 }
898 diff --git a/block/blk-mq.c b/block/blk-mq.c
899 index 1d016fc9a8b6..849479debac3 100644
900 --- a/block/blk-mq.c
901 +++ b/block/blk-mq.c
902 @@ -1831,7 +1831,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
903 */
904 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
905 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
906 - goto err_map;
907 + goto err_mq_usage;
908
909 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
910 blk_queue_rq_timeout(q, 30000);
911 @@ -1874,7 +1874,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
912 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
913
914 if (blk_mq_init_hw_queues(q, set))
915 - goto err_hw;
916 + goto err_mq_usage;
917
918 mutex_lock(&all_q_mutex);
919 list_add_tail(&q->all_q_node, &all_q_list);
920 @@ -1886,7 +1886,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
921
922 return q;
923
924 -err_hw:
925 +err_mq_usage:
926 blk_cleanup_queue(q);
927 err_hctxs:
928 kfree(map);
929 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
930 index 17f9ec501972..fd8496a92b45 100644
931 --- a/drivers/acpi/processor_idle.c
932 +++ b/drivers/acpi/processor_idle.c
933 @@ -962,7 +962,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
934 return -EINVAL;
935
936 drv->safe_state_index = -1;
937 - for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
938 + for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
939 drv->states[i].name[0] = '\0';
940 drv->states[i].desc[0] = '\0';
941 }
942 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
943 index 4bc2a5cb9935..a98c41f72c63 100644
944 --- a/drivers/block/nbd.c
945 +++ b/drivers/block/nbd.c
946 @@ -803,10 +803,6 @@ static int __init nbd_init(void)
947 return -EINVAL;
948 }
949
950 - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
951 - if (!nbd_dev)
952 - return -ENOMEM;
953 -
954 part_shift = 0;
955 if (max_part > 0) {
956 part_shift = fls(max_part);
957 @@ -828,6 +824,10 @@ static int __init nbd_init(void)
958 if (nbds_max > 1UL << (MINORBITS - part_shift))
959 return -EINVAL;
960
961 + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
962 + if (!nbd_dev)
963 + return -ENOMEM;
964 +
965 for (i = 0; i < nbds_max; i++) {
966 struct gendisk *disk = alloc_disk(1 << part_shift);
967 if (!disk)
968 diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
969 index fe1678c4ff89..de4c8499cbac 100644
970 --- a/drivers/bluetooth/ath3k.c
971 +++ b/drivers/bluetooth/ath3k.c
972 @@ -79,6 +79,7 @@ static const struct usb_device_id ath3k_table[] = {
973 { USB_DEVICE(0x0489, 0xe057) },
974 { USB_DEVICE(0x0489, 0xe056) },
975 { USB_DEVICE(0x0489, 0xe05f) },
976 + { USB_DEVICE(0x0489, 0xe078) },
977 { USB_DEVICE(0x04c5, 0x1330) },
978 { USB_DEVICE(0x04CA, 0x3004) },
979 { USB_DEVICE(0x04CA, 0x3005) },
980 @@ -86,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
981 { USB_DEVICE(0x04CA, 0x3007) },
982 { USB_DEVICE(0x04CA, 0x3008) },
983 { USB_DEVICE(0x04CA, 0x300b) },
984 + { USB_DEVICE(0x04CA, 0x3010) },
985 { USB_DEVICE(0x0930, 0x0219) },
986 { USB_DEVICE(0x0930, 0x0220) },
987 { USB_DEVICE(0x0930, 0x0227) },
988 @@ -132,6 +134,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
989 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
990 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
991 { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
992 + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
993 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
994 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
995 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
996 @@ -139,6 +142,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
997 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
998 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
999 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
1000 + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
1001 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
1002 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
1003 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
1004 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1005 index f0e2f721c8ce..9a7d24f95c5e 100644
1006 --- a/drivers/bluetooth/btusb.c
1007 +++ b/drivers/bluetooth/btusb.c
1008 @@ -125,6 +125,9 @@ static const struct usb_device_id btusb_table[] = {
1009 { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
1010 .driver_info = BTUSB_BCM_PATCHRAM },
1011
1012 + /* ASUSTek Computer - Broadcom based */
1013 + { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
1014 +
1015 /* Belkin F8065bf - Broadcom based */
1016 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
1017
1018 @@ -164,6 +167,7 @@ static const struct usb_device_id blacklist_table[] = {
1019 { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
1020 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
1021 { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
1022 + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
1023 { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
1024 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
1025 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
1026 @@ -171,6 +175,7 @@ static const struct usb_device_id blacklist_table[] = {
1027 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
1028 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
1029 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
1030 + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
1031 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
1032 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
1033 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
1034 diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
1035 index c0a842b335c5..a52154caf526 100644
1036 --- a/drivers/clk/clk-divider.c
1037 +++ b/drivers/clk/clk-divider.c
1038 @@ -129,12 +129,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
1039 return DIV_ROUND_UP(parent_rate, div);
1040 }
1041
1042 -/*
1043 - * The reverse of DIV_ROUND_UP: The maximum number which
1044 - * divided by m is r
1045 - */
1046 -#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
1047 -
1048 static bool _is_valid_table_div(const struct clk_div_table *table,
1049 unsigned int div)
1050 {
1051 @@ -208,6 +202,7 @@ static int _div_round_closest(struct clk_divider *divider,
1052 unsigned long parent_rate, unsigned long rate)
1053 {
1054 int up, down, div;
1055 + unsigned long up_rate, down_rate;
1056
1057 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
1058
1059 @@ -219,7 +214,10 @@ static int _div_round_closest(struct clk_divider *divider,
1060 down = _round_down_table(divider->table, div);
1061 }
1062
1063 - return (up - div) <= (div - down) ? up : down;
1064 + up_rate = DIV_ROUND_UP(parent_rate, up);
1065 + down_rate = DIV_ROUND_UP(parent_rate, down);
1066 +
1067 + return (rate - up_rate) <= (down_rate - rate) ? up : down;
1068 }
1069
1070 static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
1071 @@ -300,7 +298,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
1072 return i;
1073 }
1074 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
1075 - MULT_ROUND_UP(rate, i));
1076 + rate * i);
1077 now = DIV_ROUND_UP(parent_rate, i);
1078 if (_is_best_div(divider, rate, now, best)) {
1079 bestdiv = i;
1080 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1081 index e3bf702b5588..90e8deb6c15e 100644
1082 --- a/drivers/cpufreq/cpufreq.c
1083 +++ b/drivers/cpufreq/cpufreq.c
1084 @@ -1717,15 +1717,18 @@ void cpufreq_resume(void)
1085 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1086 pr_err("%s: Failed to start governor for policy: %p\n",
1087 __func__, policy);
1088 -
1089 - /*
1090 - * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1091 - * policy in list. It will verify that the current freq is in
1092 - * sync with what we believe it to be.
1093 - */
1094 - if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1095 - schedule_work(&policy->update);
1096 }
1097 +
1098 + /*
1099 + * schedule call cpufreq_update_policy() for first-online CPU, as that
1100 + * wouldn't be hotplugged-out on suspend. It will verify that the
1101 + * current freq is in sync with what we believe it to be.
1102 + */
1103 + policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1104 + if (WARN_ON(!policy))
1105 + return;
1106 +
1107 + schedule_work(&policy->update);
1108 }
1109
1110 /**
1111 diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
1112 index 125150dc6e81..9ab99642ca7a 100644
1113 --- a/drivers/cpuidle/cpuidle.c
1114 +++ b/drivers/cpuidle/cpuidle.c
1115 @@ -297,9 +297,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
1116 if (!dev->registered)
1117 return -EINVAL;
1118
1119 - if (!dev->state_count)
1120 - dev->state_count = drv->state_count;
1121 -
1122 ret = cpuidle_add_device_sysfs(dev);
1123 if (ret)
1124 return ret;
1125 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
1126 index 97c5903b4606..832a2c3f01ff 100644
1127 --- a/drivers/cpuidle/sysfs.c
1128 +++ b/drivers/cpuidle/sysfs.c
1129 @@ -401,7 +401,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
1130 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
1131
1132 /* state statistics */
1133 - for (i = 0; i < device->state_count; i++) {
1134 + for (i = 0; i < drv->state_count; i++) {
1135 kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
1136 if (!kobj)
1137 goto error_state;
1138 @@ -433,9 +433,10 @@ error_state:
1139 */
1140 static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
1141 {
1142 + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
1143 int i;
1144
1145 - for (i = 0; i < device->state_count; i++)
1146 + for (i = 0; i < drv->state_count; i++)
1147 cpuidle_free_state_kobj(device, i);
1148 }
1149
1150 diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
1151 index 4cfaaa5a49be..abff66c18432 100644
1152 --- a/drivers/dma/edma.c
1153 +++ b/drivers/dma/edma.c
1154 @@ -258,6 +258,13 @@ static int edma_terminate_all(struct edma_chan *echan)
1155 */
1156 if (echan->edesc) {
1157 int cyclic = echan->edesc->cyclic;
1158 +
1159 + /*
1160 + * free the running request descriptor
1161 + * since it is not in any of the vdesc lists
1162 + */
1163 + edma_desc_free(&echan->edesc->vdesc);
1164 +
1165 echan->edesc = NULL;
1166 edma_stop(echan->ch_num);
1167 /* Move the cyclic channel back to default queue */
1168 diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
1169 index bbea8243f9e8..f477f3ba223a 100644
1170 --- a/drivers/dma/omap-dma.c
1171 +++ b/drivers/dma/omap-dma.c
1172 @@ -978,6 +978,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
1173 * c->desc is NULL and exit.)
1174 */
1175 if (c->desc) {
1176 + omap_dma_desc_free(&c->desc->vd);
1177 c->desc = NULL;
1178 /* Avoid stopping the dma twice */
1179 if (!c->paused)
1180 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1181 index da41ad42d3a6..b7f101b52d81 100644
1182 --- a/drivers/gpu/drm/drm_crtc.c
1183 +++ b/drivers/gpu/drm/drm_crtc.c
1184 @@ -531,17 +531,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
1185 }
1186 EXPORT_SYMBOL(drm_framebuffer_reference);
1187
1188 -static void drm_framebuffer_free_bug(struct kref *kref)
1189 -{
1190 - BUG();
1191 -}
1192 -
1193 -static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
1194 -{
1195 - DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
1196 - kref_put(&fb->refcount, drm_framebuffer_free_bug);
1197 -}
1198 -
1199 /**
1200 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
1201 * @fb: fb to unregister
1202 @@ -1297,7 +1286,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
1203 return;
1204 }
1205 /* disconnect the plane from the fb and crtc: */
1206 - __drm_framebuffer_unreference(plane->old_fb);
1207 + drm_framebuffer_unreference(plane->old_fb);
1208 plane->old_fb = NULL;
1209 plane->fb = NULL;
1210 plane->crtc = NULL;
1211 diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
1212 index 0a235fe61c9b..144a0368ccd0 100644
1213 --- a/drivers/gpu/drm/drm_edid_load.c
1214 +++ b/drivers/gpu/drm/drm_edid_load.c
1215 @@ -288,6 +288,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
1216
1217 drm_mode_connector_update_edid_property(connector, edid);
1218 ret = drm_add_edid_modes(connector, edid);
1219 + drm_edid_to_eld(connector, edid);
1220 kfree(edid);
1221
1222 return ret;
1223 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
1224 index 6857e9ad6339..5edc61f2f212 100644
1225 --- a/drivers/gpu/drm/drm_probe_helper.c
1226 +++ b/drivers/gpu/drm/drm_probe_helper.c
1227 @@ -151,6 +151,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
1228 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
1229
1230 count = drm_add_edid_modes(connector, edid);
1231 + drm_edid_to_eld(connector, edid);
1232 } else
1233 count = (*connector_funcs->get_modes)(connector);
1234 }
1235 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1236 index 925697320949..59f23fca0596 100644
1237 --- a/drivers/gpu/drm/i915/i915_drv.c
1238 +++ b/drivers/gpu/drm/i915/i915_drv.c
1239 @@ -1141,6 +1141,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1240 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1241 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1242 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1243 + s->pcbr = I915_READ(VLV_PCBR);
1244 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1245
1246 /*
1247 @@ -1235,6 +1236,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1248 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1249 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1250 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1251 + I915_WRITE(VLV_PCBR, s->pcbr);
1252 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1253 }
1254
1255 @@ -1243,19 +1245,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1256 u32 val;
1257 int err;
1258
1259 - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1260 - WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1261 -
1262 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1263 - /* Wait for a previous force-off to settle */
1264 - if (force_on) {
1265 - err = wait_for(!COND, 20);
1266 - if (err) {
1267 - DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1268 - I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1269 - return err;
1270 - }
1271 - }
1272
1273 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1274 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1275 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1276 index 45434333b289..a84971351eee 100644
1277 --- a/drivers/gpu/drm/i915/i915_drv.h
1278 +++ b/drivers/gpu/drm/i915/i915_drv.h
1279 @@ -938,6 +938,7 @@ struct vlv_s0ix_state {
1280 /* Display 2 CZ domain */
1281 u32 gu_ctl0;
1282 u32 gu_ctl1;
1283 + u32 pcbr;
1284 u32 clock_gate_dis2;
1285 };
1286
1287 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
1288 index 0ee76b25204c..360087eb83fd 100644
1289 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
1290 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
1291 @@ -485,10 +485,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1292 stolen_offset, gtt_offset, size);
1293
1294 /* KISS and expect everything to be page-aligned */
1295 - BUG_ON(stolen_offset & 4095);
1296 - BUG_ON(size & 4095);
1297 -
1298 - if (WARN_ON(size == 0))
1299 + if (WARN_ON(size == 0 || stolen_offset & 4095 || size & 4095))
1300 return NULL;
1301
1302 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
1303 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1304 index 7a7c445b07b4..448327fe4d85 100644
1305 --- a/drivers/gpu/drm/i915/intel_display.c
1306 +++ b/drivers/gpu/drm/i915/intel_display.c
1307 @@ -2358,13 +2358,19 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
1308 struct drm_device *dev = crtc->base.dev;
1309 struct drm_i915_gem_object *obj = NULL;
1310 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1311 - u32 base = plane_config->base;
1312 + u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
1313 + u32 size_aligned = round_up(plane_config->base + plane_config->size,
1314 + PAGE_SIZE);
1315 +
1316 + size_aligned -= base_aligned;
1317
1318 if (plane_config->size == 0)
1319 return false;
1320
1321 - obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
1322 - plane_config->size);
1323 + obj = i915_gem_object_create_stolen_for_preallocated(dev,
1324 + base_aligned,
1325 + base_aligned,
1326 + size_aligned);
1327 if (!obj)
1328 return false;
1329
1330 @@ -6383,8 +6389,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
1331 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
1332 plane_config->tiled);
1333
1334 - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
1335 - aligned_height);
1336 + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height;
1337
1338 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
1339 pipe, plane, crtc->base.primary->fb->width,
1340 @@ -7424,8 +7429,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
1341 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
1342 plane_config->tiled);
1343
1344 - plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
1345 - aligned_height);
1346 + plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height;
1347
1348 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
1349 pipe, plane, crtc->base.primary->fb->width,
1350 diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
1351 index 07a74ef589bd..4edebce7f213 100644
1352 --- a/drivers/gpu/drm/i915/intel_sprite.c
1353 +++ b/drivers/gpu/drm/i915/intel_sprite.c
1354 @@ -1178,7 +1178,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1355 drm_modeset_lock_all(dev);
1356
1357 plane = drm_plane_find(dev, set->plane_id);
1358 - if (!plane) {
1359 + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
1360 ret = -ENOENT;
1361 goto out_unlock;
1362 }
1363 @@ -1205,7 +1205,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1364 drm_modeset_lock_all(dev);
1365
1366 plane = drm_plane_find(dev, get->plane_id);
1367 - if (!plane) {
1368 + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
1369 ret = -ENOENT;
1370 goto out_unlock;
1371 }
1372 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1373 index dbe51bfe3ef4..d6f0e40db81d 100644
1374 --- a/drivers/gpu/drm/radeon/radeon.h
1375 +++ b/drivers/gpu/drm/radeon/radeon.h
1376 @@ -1544,6 +1544,7 @@ struct radeon_dpm {
1377 int new_active_crtc_count;
1378 u32 current_active_crtcs;
1379 int current_active_crtc_count;
1380 + bool single_display;
1381 struct radeon_dpm_dynamic_state dyn_state;
1382 struct radeon_dpm_fan fan;
1383 u32 tdp_limit;
1384 diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
1385 index 63ccb8fa799c..d27e4ccb848c 100644
1386 --- a/drivers/gpu/drm/radeon/radeon_bios.c
1387 +++ b/drivers/gpu/drm/radeon/radeon_bios.c
1388 @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
1389
1390 static bool radeon_read_bios(struct radeon_device *rdev)
1391 {
1392 - uint8_t __iomem *bios;
1393 + uint8_t __iomem *bios, val1, val2;
1394 size_t size;
1395
1396 rdev->bios = NULL;
1397 @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
1398 return false;
1399 }
1400
1401 - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
1402 + val1 = readb(&bios[0]);
1403 + val2 = readb(&bios[1]);
1404 +
1405 + if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
1406 pci_unmap_rom(rdev->pdev, bios);
1407 return false;
1408 }
1409 - rdev->bios = kmemdup(bios, size, GFP_KERNEL);
1410 + rdev->bios = kzalloc(size, GFP_KERNEL);
1411 if (rdev->bios == NULL) {
1412 pci_unmap_rom(rdev->pdev, bios);
1413 return false;
1414 }
1415 + memcpy_fromio(rdev->bios, bios, size);
1416 pci_unmap_rom(rdev->pdev, bios);
1417 return true;
1418 }
1419 diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
1420 index a69bd441dd2d..572b4dbec186 100644
1421 --- a/drivers/gpu/drm/radeon/radeon_mn.c
1422 +++ b/drivers/gpu/drm/radeon/radeon_mn.c
1423 @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
1424 it = interval_tree_iter_first(&rmn->objects, start, end);
1425 while (it) {
1426 struct radeon_bo *bo;
1427 - struct fence *fence;
1428 int r;
1429
1430 bo = container_of(it, struct radeon_bo, mn_it);
1431 @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
1432 continue;
1433 }
1434
1435 - fence = reservation_object_get_excl(bo->tbo.resv);
1436 - if (fence) {
1437 - r = radeon_fence_wait((struct radeon_fence *)fence, false);
1438 - if (r)
1439 - DRM_ERROR("(%d) failed to wait for user bo\n", r);
1440 - }
1441 + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
1442 + false, MAX_SCHEDULE_TIMEOUT);
1443 + if (r)
1444 + DRM_ERROR("(%d) failed to wait for user bo\n", r);
1445
1446 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
1447 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
1448 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1449 index f7da8fe96a66..1d94b542cd82 100644
1450 --- a/drivers/gpu/drm/radeon/radeon_pm.c
1451 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1452 @@ -704,12 +704,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
1453 radeon_pm_compute_clocks(rdev);
1454 }
1455
1456 -static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
1457 - enum radeon_pm_state_type dpm_state)
1458 +static bool radeon_dpm_single_display(struct radeon_device *rdev)
1459 {
1460 - int i;
1461 - struct radeon_ps *ps;
1462 - u32 ui_class;
1463 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
1464 true : false;
1465
1466 @@ -719,6 +715,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
1467 single_display = false;
1468 }
1469
1470 + return single_display;
1471 +}
1472 +
1473 +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
1474 + enum radeon_pm_state_type dpm_state)
1475 +{
1476 + int i;
1477 + struct radeon_ps *ps;
1478 + u32 ui_class;
1479 + bool single_display = radeon_dpm_single_display(rdev);
1480 +
1481 /* certain older asics have a separare 3D performance state,
1482 * so try that first if the user selected performance
1483 */
1484 @@ -844,6 +851,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1485 struct radeon_ps *ps;
1486 enum radeon_pm_state_type dpm_state;
1487 int ret;
1488 + bool single_display = radeon_dpm_single_display(rdev);
1489
1490 /* if dpm init failed */
1491 if (!rdev->pm.dpm_enabled)
1492 @@ -868,6 +876,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1493 /* vce just modifies an existing state so force a change */
1494 if (ps->vce_active != rdev->pm.dpm.vce_active)
1495 goto force;
1496 + /* user has made a display change (such as timing) */
1497 + if (rdev->pm.dpm.single_display != single_display)
1498 + goto force;
1499 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1500 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
1501 * all we need to do is update the display configuration.
1502 @@ -930,6 +941,7 @@ force:
1503
1504 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1505 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1506 + rdev->pm.dpm.single_display = single_display;
1507
1508 /* wait for the rings to drain */
1509 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1510 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1511 index d2510cfd3fea..9ab8e2694602 100644
1512 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
1513 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1514 @@ -575,6 +575,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
1515 enum dma_data_direction direction = write ?
1516 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1517
1518 + /* double check that we don't free the table twice */
1519 + if (!ttm->sg->sgl)
1520 + return;
1521 +
1522 /* free the sg table and pages again */
1523 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
1524
1525 diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
1526 index 513bd6d14293..eaf46ea31177 100644
1527 --- a/drivers/iio/accel/bmc150-accel.c
1528 +++ b/drivers/iio/accel/bmc150-accel.c
1529 @@ -168,14 +168,14 @@ static const struct {
1530 int val;
1531 int val2;
1532 u8 bw_bits;
1533 -} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
1534 - {15, 630000, 0x09},
1535 - {31, 250000, 0x0A},
1536 - {62, 500000, 0x0B},
1537 - {125, 0, 0x0C},
1538 - {250, 0, 0x0D},
1539 - {500, 0, 0x0E},
1540 - {1000, 0, 0x0F} };
1541 +} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
1542 + {31, 260000, 0x09},
1543 + {62, 500000, 0x0A},
1544 + {125, 0, 0x0B},
1545 + {250, 0, 0x0C},
1546 + {500, 0, 0x0D},
1547 + {1000, 0, 0x0E},
1548 + {2000, 0, 0x0F} };
1549
1550 static const struct {
1551 int bw_bits;
1552 @@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
1553 }
1554
1555 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
1556 - "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
1557 + "15.620000 31.260000 62.50000 125 250 500 1000 2000");
1558
1559 static struct attribute *bmc150_accel_attributes[] = {
1560 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1561 diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
1562 index 4a10ae97dbf2..38e60860165d 100644
1563 --- a/drivers/iio/adc/vf610_adc.c
1564 +++ b/drivers/iio/adc/vf610_adc.c
1565 @@ -141,9 +141,13 @@ struct vf610_adc {
1566 struct regulator *vref;
1567 struct vf610_adc_feature adc_feature;
1568
1569 + u32 sample_freq_avail[5];
1570 +
1571 struct completion completion;
1572 };
1573
1574 +static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
1575 +
1576 #define VF610_ADC_CHAN(_idx, _chan_type) { \
1577 .type = (_chan_type), \
1578 .indexed = 1, \
1579 @@ -173,35 +177,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
1580 /* sentinel */
1581 };
1582
1583 -/*
1584 - * ADC sample frequency, unit is ADCK cycles.
1585 - * ADC clk source is ipg clock, which is the same as bus clock.
1586 - *
1587 - * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
1588 - * SFCAdder: fixed to 6 ADCK cycles
1589 - * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
1590 - * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
1591 - * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
1592 - *
1593 - * By default, enable 12 bit resolution mode, clock source
1594 - * set to ipg clock, So get below frequency group:
1595 - */
1596 -static const u32 vf610_sample_freq_avail[5] =
1597 -{1941176, 559332, 286957, 145374, 73171};
1598 +static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
1599 +{
1600 + unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
1601 + int i;
1602 +
1603 + /*
1604 + * Calculate ADC sample frequencies
1605 + * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
1606 + * which is the same as bus clock.
1607 + *
1608 + * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
1609 + * SFCAdder: fixed to 6 ADCK cycles
1610 + * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
1611 + * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
1612 + * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
1613 + */
1614 + adck_rate = ipg_rate / info->adc_feature.clk_div;
1615 + for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
1616 + info->sample_freq_avail[i] =
1617 + adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
1618 +}
1619
1620 static inline void vf610_adc_cfg_init(struct vf610_adc *info)
1621 {
1622 + struct vf610_adc_feature *adc_feature = &info->adc_feature;
1623 +
1624 /* set default Configuration for ADC controller */
1625 - info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
1626 - info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
1627 + adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
1628 + adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
1629 +
1630 + adc_feature->calibration = true;
1631 + adc_feature->ovwren = true;
1632 +
1633 + adc_feature->res_mode = 12;
1634 + adc_feature->sample_rate = 1;
1635 + adc_feature->lpm = true;
1636
1637 - info->adc_feature.calibration = true;
1638 - info->adc_feature.ovwren = true;
1639 + /* Use a save ADCK which is below 20MHz on all devices */
1640 + adc_feature->clk_div = 8;
1641
1642 - info->adc_feature.clk_div = 1;
1643 - info->adc_feature.res_mode = 12;
1644 - info->adc_feature.sample_rate = 1;
1645 - info->adc_feature.lpm = true;
1646 + vf610_adc_calculate_rates(info);
1647 }
1648
1649 static void vf610_adc_cfg_post_set(struct vf610_adc *info)
1650 @@ -283,12 +299,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
1651
1652 cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
1653
1654 - /* low power configuration */
1655 cfg_data &= ~VF610_ADC_ADLPC_EN;
1656 if (adc_feature->lpm)
1657 cfg_data |= VF610_ADC_ADLPC_EN;
1658
1659 - /* disable high speed */
1660 cfg_data &= ~VF610_ADC_ADHSC_EN;
1661
1662 writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
1663 @@ -428,10 +442,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
1664 return IRQ_HANDLED;
1665 }
1666
1667 -static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
1668 +static ssize_t vf610_show_samp_freq_avail(struct device *dev,
1669 + struct device_attribute *attr, char *buf)
1670 +{
1671 + struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
1672 + size_t len = 0;
1673 + int i;
1674 +
1675 + for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
1676 + len += scnprintf(buf + len, PAGE_SIZE - len,
1677 + "%u ", info->sample_freq_avail[i]);
1678 +
1679 + /* replace trailing space by newline */
1680 + buf[len - 1] = '\n';
1681 +
1682 + return len;
1683 +}
1684 +
1685 +static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
1686
1687 static struct attribute *vf610_attributes[] = {
1688 - &iio_const_attr_sampling_frequency_available.dev_attr.attr,
1689 + &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
1690 NULL
1691 };
1692
1693 @@ -478,7 +509,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
1694 return IIO_VAL_FRACTIONAL_LOG2;
1695
1696 case IIO_CHAN_INFO_SAMP_FREQ:
1697 - *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
1698 + *val = info->sample_freq_avail[info->adc_feature.sample_rate];
1699 *val2 = 0;
1700 return IIO_VAL_INT;
1701
1702 @@ -501,9 +532,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
1703 switch (mask) {
1704 case IIO_CHAN_INFO_SAMP_FREQ:
1705 for (i = 0;
1706 - i < ARRAY_SIZE(vf610_sample_freq_avail);
1707 + i < ARRAY_SIZE(info->sample_freq_avail);
1708 i++)
1709 - if (val == vf610_sample_freq_avail[i]) {
1710 + if (val == info->sample_freq_avail[i]) {
1711 info->adc_feature.sample_rate = i;
1712 vf610_adc_sample_set(info);
1713 return 0;
1714 diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
1715 index e0017c22bb9c..f53e9a803a0e 100644
1716 --- a/drivers/iio/imu/adis_trigger.c
1717 +++ b/drivers/iio/imu/adis_trigger.c
1718 @@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
1719 iio_trigger_set_drvdata(adis->trig, adis);
1720 ret = iio_trigger_register(adis->trig);
1721
1722 - indio_dev->trig = adis->trig;
1723 + indio_dev->trig = iio_trigger_get(adis->trig);
1724 if (ret)
1725 goto error_free_irq;
1726
1727 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
1728 index 0cd306a72a6e..ba27e277511f 100644
1729 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
1730 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
1731 @@ -24,6 +24,16 @@
1732 #include <linux/poll.h>
1733 #include "inv_mpu_iio.h"
1734
1735 +static void inv_clear_kfifo(struct inv_mpu6050_state *st)
1736 +{
1737 + unsigned long flags;
1738 +
1739 + /* take the spin lock sem to avoid interrupt kick in */
1740 + spin_lock_irqsave(&st->time_stamp_lock, flags);
1741 + kfifo_reset(&st->timestamps);
1742 + spin_unlock_irqrestore(&st->time_stamp_lock, flags);
1743 +}
1744 +
1745 int inv_reset_fifo(struct iio_dev *indio_dev)
1746 {
1747 int result;
1748 @@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
1749 INV_MPU6050_BIT_FIFO_RST);
1750 if (result)
1751 goto reset_fifo_fail;
1752 +
1753 + /* clear timestamps fifo */
1754 + inv_clear_kfifo(st);
1755 +
1756 /* enable interrupt */
1757 if (st->chip_config.accl_fifo_enable ||
1758 st->chip_config.gyro_fifo_enable) {
1759 @@ -83,16 +97,6 @@ reset_fifo_fail:
1760 return result;
1761 }
1762
1763 -static void inv_clear_kfifo(struct inv_mpu6050_state *st)
1764 -{
1765 - unsigned long flags;
1766 -
1767 - /* take the spin lock sem to avoid interrupt kick in */
1768 - spin_lock_irqsave(&st->time_stamp_lock, flags);
1769 - kfifo_reset(&st->timestamps);
1770 - spin_unlock_irqrestore(&st->time_stamp_lock, flags);
1771 -}
1772 -
1773 /**
1774 * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
1775 */
1776 @@ -184,7 +188,6 @@ end_session:
1777 flush_fifo:
1778 /* Flush HW and SW FIFOs. */
1779 inv_reset_fifo(indio_dev);
1780 - inv_clear_kfifo(st);
1781 mutex_unlock(&indio_dev->mlock);
1782 iio_trigger_notify_done(indio_dev->trig);
1783
1784 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
1785 index af3e76d652ba..f009d053384a 100644
1786 --- a/drivers/iio/industrialio-core.c
1787 +++ b/drivers/iio/industrialio-core.c
1788 @@ -832,8 +832,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1789 * @attr_list: List of IIO device attributes
1790 *
1791 * This function frees the memory allocated for each of the IIO device
1792 - * attributes in the list. Note: if you want to reuse the list after calling
1793 - * this function you have to reinitialize it using INIT_LIST_HEAD().
1794 + * attributes in the list.
1795 */
1796 void iio_free_chan_devattr_list(struct list_head *attr_list)
1797 {
1798 @@ -841,6 +840,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
1799
1800 list_for_each_entry_safe(p, n, attr_list, l) {
1801 kfree(p->dev_attr.attr.name);
1802 + list_del(&p->l);
1803 kfree(p);
1804 }
1805 }
1806 @@ -921,6 +921,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
1807
1808 iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
1809 kfree(indio_dev->chan_attr_group.attrs);
1810 + indio_dev->chan_attr_group.attrs = NULL;
1811 }
1812
1813 static void iio_dev_release(struct device *device)
1814 diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
1815 index 0c1e37e3120a..35c02aeec75e 100644
1816 --- a/drivers/iio/industrialio-event.c
1817 +++ b/drivers/iio/industrialio-event.c
1818 @@ -493,6 +493,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
1819 error_free_setup_event_lines:
1820 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
1821 kfree(indio_dev->event_interface);
1822 + indio_dev->event_interface = NULL;
1823 return ret;
1824 }
1825
1826 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1827 index df0c4f605a21..dfa4286f98a4 100644
1828 --- a/drivers/infiniband/core/umem.c
1829 +++ b/drivers/infiniband/core/umem.c
1830 @@ -94,6 +94,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1831 if (dmasync)
1832 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
1833
1834 + /*
1835 + * If the combination of the addr and size requested for this memory
1836 + * region causes an integer overflow, return error.
1837 + */
1838 + if ((PAGE_ALIGN(addr + size) <= size) ||
1839 + (PAGE_ALIGN(addr + size) <= addr))
1840 + return ERR_PTR(-EINVAL);
1841 +
1842 if (!can_do_mlock())
1843 return ERR_PTR(-EPERM);
1844
1845 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
1846 index 82a7dd87089b..729382c06c5e 100644
1847 --- a/drivers/infiniband/hw/mlx4/mad.c
1848 +++ b/drivers/infiniband/hw/mlx4/mad.c
1849 @@ -64,6 +64,14 @@ enum {
1850 #define GUID_TBL_BLK_NUM_ENTRIES 8
1851 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
1852
1853 +/* Counters should be saturate once they reach their maximum value */
1854 +#define ASSIGN_32BIT_COUNTER(counter, value) do {\
1855 + if ((value) > U32_MAX) \
1856 + counter = cpu_to_be32(U32_MAX); \
1857 + else \
1858 + counter = cpu_to_be32(value); \
1859 +} while (0)
1860 +
1861 struct mlx4_mad_rcv_buf {
1862 struct ib_grh grh;
1863 u8 payload[256];
1864 @@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1865 static void edit_counter(struct mlx4_counter *cnt,
1866 struct ib_pma_portcounters *pma_cnt)
1867 {
1868 - pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
1869 - pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
1870 - pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
1871 - pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
1872 + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
1873 + (be64_to_cpu(cnt->tx_bytes) >> 2));
1874 + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
1875 + (be64_to_cpu(cnt->rx_bytes) >> 2));
1876 + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
1877 + be64_to_cpu(cnt->tx_frames));
1878 + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
1879 + be64_to_cpu(cnt->rx_frames));
1880 }
1881
1882 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1883 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1884 index b2b9c9264131..0ff89b2ecdb5 100644
1885 --- a/drivers/iommu/intel-iommu.c
1886 +++ b/drivers/iommu/intel-iommu.c
1887 @@ -1746,8 +1746,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1888 static void domain_exit(struct dmar_domain *domain)
1889 {
1890 struct dmar_drhd_unit *drhd;
1891 - struct intel_iommu *iommu;
1892 struct page *freelist = NULL;
1893 + int i;
1894
1895 /* Domain 0 is reserved, so dont process it */
1896 if (!domain)
1897 @@ -1767,8 +1767,8 @@ static void domain_exit(struct dmar_domain *domain)
1898
1899 /* clear attached or cached domains */
1900 rcu_read_lock();
1901 - for_each_active_iommu(iommu, drhd)
1902 - iommu_detach_domain(domain, iommu);
1903 + for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1904 + iommu_detach_domain(domain, g_iommus[i]);
1905 rcu_read_unlock();
1906
1907 dma_free_pagelist(freelist);
1908 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1909 index 3e41ca1293ed..6849c7e79bb5 100644
1910 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1911 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
1912 @@ -29,7 +29,7 @@
1913
1914 /* Offset base used to differentiate between CAPTURE and OUTPUT
1915 * while mmaping */
1916 -#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
1917 +#define DST_QUEUE_OFF_BASE (1 << 30)
1918
1919 #define MFC_BANK1_ALLOC_CTX 0
1920 #define MFC_BANK2_ALLOC_CTX 1
1921 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
1922 index be3b3bc71a0f..54cb88a39afc 100644
1923 --- a/drivers/media/platform/sh_veu.c
1924 +++ b/drivers/media/platform/sh_veu.c
1925 @@ -1179,6 +1179,7 @@ static int sh_veu_probe(struct platform_device *pdev)
1926 }
1927
1928 *vdev = sh_veu_videodev;
1929 + vdev->v4l2_dev = &veu->v4l2_dev;
1930 spin_lock_init(&veu->lock);
1931 mutex_init(&veu->fop_lock);
1932 vdev->lock = &veu->fop_lock;
1933 diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
1934 index 8e61b976da19..139704727e34 100644
1935 --- a/drivers/media/platform/soc_camera/soc_camera.c
1936 +++ b/drivers/media/platform/soc_camera/soc_camera.c
1937 @@ -1681,7 +1681,7 @@ eclkreg:
1938 eaddpdev:
1939 platform_device_put(sasc->pdev);
1940 eallocpdev:
1941 - devm_kfree(ici->v4l2_dev.dev, sasc);
1942 + devm_kfree(ici->v4l2_dev.dev, info);
1943 dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret);
1944
1945 return ret;
1946 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
1947 index ea36447c74f9..cc9537ef4829 100644
1948 --- a/drivers/media/v4l2-core/videobuf2-core.c
1949 +++ b/drivers/media/v4l2-core/videobuf2-core.c
1950 @@ -3226,18 +3226,13 @@ int vb2_thread_stop(struct vb2_queue *q)
1951
1952 if (threadio == NULL)
1953 return 0;
1954 - call_void_qop(q, wait_finish, q);
1955 threadio->stop = true;
1956 - vb2_internal_streamoff(q, q->type);
1957 - call_void_qop(q, wait_prepare, q);
1958 + /* Wake up all pending sleeps in the thread */
1959 + vb2_queue_error(q);
1960 err = kthread_stop(threadio->thread);
1961 - q->fileio = NULL;
1962 - fileio->req.count = 0;
1963 - vb2_reqbufs(q, &fileio->req);
1964 - kfree(fileio);
1965 + __vb2_cleanup_fileio(q);
1966 threadio->thread = NULL;
1967 kfree(threadio);
1968 - q->fileio = NULL;
1969 q->threadio = NULL;
1970 return err;
1971 }
1972 diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
1973 index d2eadab787c5..4a6928457015 100644
1974 --- a/drivers/net/bonding/bond_alb.c
1975 +++ b/drivers/net/bonding/bond_alb.c
1976 @@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
1977 skb->dev = client_info->slave->dev;
1978
1979 if (client_info->vlan_id) {
1980 - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
1981 - if (!skb) {
1982 - netdev_err(client_info->slave->bond->dev,
1983 - "failed to insert VLAN tag\n");
1984 - continue;
1985 - }
1986 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1987 + client_info->vlan_id);
1988 }
1989
1990 arp_xmit(skb);
1991 @@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1992 skb->priority = TC_PRIO_CONTROL;
1993 skb->dev = slave->dev;
1994
1995 - if (vid) {
1996 - skb = vlan_put_tag(skb, vlan_proto, vid);
1997 - if (!skb) {
1998 - netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
1999 - return;
2000 - }
2001 - }
2002 + if (vid)
2003 + __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
2004
2005 dev_queue_xmit(skb);
2006 }
2007 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2008 index a5115fb7cf33..1cc06c0e3e92 100644
2009 --- a/drivers/net/bonding/bond_main.c
2010 +++ b/drivers/net/bonding/bond_main.c
2011 @@ -2143,8 +2143,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2012
2013 netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
2014 ntohs(outer_tag->vlan_proto), tags->vlan_id);
2015 - skb = __vlan_put_tag(skb, tags->vlan_proto,
2016 - tags->vlan_id);
2017 + skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2018 + tags->vlan_id);
2019 if (!skb) {
2020 net_err_ratelimited("failed to insert inner VLAN tag\n");
2021 return;
2022 @@ -2156,12 +2156,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2023 if (outer_tag->vlan_id) {
2024 netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
2025 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2026 - skb = vlan_put_tag(skb, outer_tag->vlan_proto,
2027 - outer_tag->vlan_id);
2028 - if (!skb) {
2029 - net_err_ratelimited("failed to insert outer VLAN tag\n");
2030 - return;
2031 - }
2032 + __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2033 + outer_tag->vlan_id);
2034 }
2035
2036 xmit:
2037 @@ -3799,7 +3795,8 @@ static inline int bond_slave_override(struct bonding *bond,
2038 /* Find out if any slaves have the same mapping as this skb. */
2039 bond_for_each_slave_rcu(bond, slave, iter) {
2040 if (slave->queue_id == skb->queue_mapping) {
2041 - if (bond_slave_can_tx(slave)) {
2042 + if (bond_slave_is_up(slave) &&
2043 + slave->link == BOND_LINK_UP) {
2044 bond_dev_queue_xmit(bond, skb, slave->dev);
2045 return 0;
2046 }
2047 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2048 index 60f86bd0434a..9768ba6387ad 100644
2049 --- a/drivers/net/can/flexcan.c
2050 +++ b/drivers/net/can/flexcan.c
2051 @@ -1224,12 +1224,19 @@ static int flexcan_probe(struct platform_device *pdev)
2052 const struct flexcan_devtype_data *devtype_data;
2053 struct net_device *dev;
2054 struct flexcan_priv *priv;
2055 + struct regulator *reg_xceiver;
2056 struct resource *mem;
2057 struct clk *clk_ipg = NULL, *clk_per = NULL;
2058 void __iomem *base;
2059 int err, irq;
2060 u32 clock_freq = 0;
2061
2062 + reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
2063 + if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
2064 + return -EPROBE_DEFER;
2065 + else if (IS_ERR(reg_xceiver))
2066 + reg_xceiver = NULL;
2067 +
2068 if (pdev->dev.of_node)
2069 of_property_read_u32(pdev->dev.of_node,
2070 "clock-frequency", &clock_freq);
2071 @@ -1291,9 +1298,7 @@ static int flexcan_probe(struct platform_device *pdev)
2072 priv->pdata = dev_get_platdata(&pdev->dev);
2073 priv->devtype_data = devtype_data;
2074
2075 - priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
2076 - if (IS_ERR(priv->reg_xceiver))
2077 - priv->reg_xceiver = NULL;
2078 + priv->reg_xceiver = reg_xceiver;
2079
2080 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
2081
2082 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2083 index c3a6072134f5..2559206d8704 100644
2084 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2085 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2086 @@ -531,20 +531,8 @@ struct bnx2x_fastpath {
2087 struct napi_struct napi;
2088
2089 #ifdef CONFIG_NET_RX_BUSY_POLL
2090 - unsigned int state;
2091 -#define BNX2X_FP_STATE_IDLE 0
2092 -#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
2093 -#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
2094 -#define BNX2X_FP_STATE_DISABLED (1 << 2)
2095 -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
2096 -#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
2097 -#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
2098 -#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
2099 -#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
2100 -#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
2101 - /* protect state */
2102 - spinlock_t lock;
2103 -#endif /* CONFIG_NET_RX_BUSY_POLL */
2104 + unsigned long busy_poll_state;
2105 +#endif
2106
2107 union host_hc_status_block status_blk;
2108 /* chip independent shortcuts into sb structure */
2109 @@ -619,104 +607,83 @@ struct bnx2x_fastpath {
2110 #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
2111
2112 #ifdef CONFIG_NET_RX_BUSY_POLL
2113 -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
2114 +
2115 +enum bnx2x_fp_state {
2116 + BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
2117 +
2118 + BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
2119 + BNX2X_STATE_FP_NAPI_REQ = BIT(1),
2120 +
2121 + BNX2X_STATE_FP_POLL_BIT = 2,
2122 + BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
2123 +
2124 + BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
2125 +};
2126 +
2127 +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
2128 {
2129 - spin_lock_init(&fp->lock);
2130 - fp->state = BNX2X_FP_STATE_IDLE;
2131 + WRITE_ONCE(fp->busy_poll_state, 0);
2132 }
2133
2134 /* called from the device poll routine to get ownership of a FP */
2135 static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
2136 {
2137 - bool rc = true;
2138 -
2139 - spin_lock_bh(&fp->lock);
2140 - if (fp->state & BNX2X_FP_LOCKED) {
2141 - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
2142 - fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
2143 - rc = false;
2144 - } else {
2145 - /* we don't care if someone yielded */
2146 - fp->state = BNX2X_FP_STATE_NAPI;
2147 + unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
2148 +
2149 + while (1) {
2150 + switch (old) {
2151 + case BNX2X_STATE_FP_POLL:
2152 + /* make sure bnx2x_fp_lock_poll() wont starve us */
2153 + set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
2154 + &fp->busy_poll_state);
2155 + /* fallthrough */
2156 + case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
2157 + return false;
2158 + default:
2159 + break;
2160 + }
2161 + prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
2162 + if (unlikely(prev != old)) {
2163 + old = prev;
2164 + continue;
2165 + }
2166 + return true;
2167 }
2168 - spin_unlock_bh(&fp->lock);
2169 - return rc;
2170 }
2171
2172 -/* returns true is someone tried to get the FP while napi had it */
2173 -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
2174 +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
2175 {
2176 - bool rc = false;
2177 -
2178 - spin_lock_bh(&fp->lock);
2179 - WARN_ON(fp->state &
2180 - (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
2181 -
2182 - if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
2183 - rc = true;
2184 -
2185 - /* state ==> idle, unless currently disabled */
2186 - fp->state &= BNX2X_FP_STATE_DISABLED;
2187 - spin_unlock_bh(&fp->lock);
2188 - return rc;
2189 + smp_wmb();
2190 + fp->busy_poll_state = 0;
2191 }
2192
2193 /* called from bnx2x_low_latency_poll() */
2194 static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
2195 {
2196 - bool rc = true;
2197 -
2198 - spin_lock_bh(&fp->lock);
2199 - if ((fp->state & BNX2X_FP_LOCKED)) {
2200 - fp->state |= BNX2X_FP_STATE_POLL_YIELD;
2201 - rc = false;
2202 - } else {
2203 - /* preserve yield marks */
2204 - fp->state |= BNX2X_FP_STATE_POLL;
2205 - }
2206 - spin_unlock_bh(&fp->lock);
2207 - return rc;
2208 + return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
2209 }
2210
2211 -/* returns true if someone tried to get the FP while it was locked */
2212 -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
2213 +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
2214 {
2215 - bool rc = false;
2216 -
2217 - spin_lock_bh(&fp->lock);
2218 - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
2219 -
2220 - if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
2221 - rc = true;
2222 -
2223 - /* state ==> idle, unless currently disabled */
2224 - fp->state &= BNX2X_FP_STATE_DISABLED;
2225 - spin_unlock_bh(&fp->lock);
2226 - return rc;
2227 + smp_mb__before_atomic();
2228 + clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
2229 }
2230
2231 -/* true if a socket is polling, even if it did not get the lock */
2232 +/* true if a socket is polling */
2233 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
2234 {
2235 - WARN_ON(!(fp->state & BNX2X_FP_OWNED));
2236 - return fp->state & BNX2X_FP_USER_PEND;
2237 + return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
2238 }
2239
2240 /* false if fp is currently owned */
2241 static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
2242 {
2243 - int rc = true;
2244 -
2245 - spin_lock_bh(&fp->lock);
2246 - if (fp->state & BNX2X_FP_OWNED)
2247 - rc = false;
2248 - fp->state |= BNX2X_FP_STATE_DISABLED;
2249 - spin_unlock_bh(&fp->lock);
2250 + set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
2251 + return !bnx2x_fp_ll_polling(fp);
2252
2253 - return rc;
2254 }
2255 #else
2256 -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
2257 +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
2258 {
2259 }
2260
2261 @@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
2262 return true;
2263 }
2264
2265 -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
2266 +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
2267 {
2268 - return false;
2269 }
2270
2271 static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
2272 @@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
2273 return false;
2274 }
2275
2276 -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
2277 +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
2278 {
2279 - return false;
2280 }
2281
2282 static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
2283 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2284 index ec4cebabff49..e36e3a50b342 100644
2285 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2286 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2287 @@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
2288 int i;
2289
2290 for_each_rx_queue_cnic(bp, i) {
2291 - bnx2x_fp_init_lock(&bp->fp[i]);
2292 + bnx2x_fp_busy_poll_init(&bp->fp[i]);
2293 napi_enable(&bnx2x_fp(bp, i, napi));
2294 }
2295 }
2296 @@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
2297 int i;
2298
2299 for_each_eth_queue(bp, i) {
2300 - bnx2x_fp_init_lock(&bp->fp[i]);
2301 + bnx2x_fp_busy_poll_init(&bp->fp[i]);
2302 napi_enable(&bnx2x_fp(bp, i, napi));
2303 }
2304 }
2305 @@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
2306 }
2307 }
2308
2309 + bnx2x_fp_unlock_napi(fp);
2310 +
2311 /* Fall out from the NAPI loop if needed */
2312 - if (!bnx2x_fp_unlock_napi(fp) &&
2313 - !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2314 + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2315
2316 /* No need to update SB for FCoE L2 ring as long as
2317 * it's connected to the default SB and the SB
2318 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2319 index 5748542f6717..a37800ecb27c 100644
2320 --- a/drivers/net/ethernet/broadcom/tg3.c
2321 +++ b/drivers/net/ethernet/broadcom/tg3.c
2322 @@ -17840,8 +17840,10 @@ static int tg3_init_one(struct pci_dev *pdev,
2323 */
2324 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
2325 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
2326 + tg3_full_lock(tp, 0);
2327 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
2328 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
2329 + tg3_full_unlock(tp);
2330 }
2331
2332 err = tg3_test_dma(tp);
2333 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
2334 index d2975fa7e549..e51faf0ca989 100644
2335 --- a/drivers/net/ethernet/emulex/benet/be_main.c
2336 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
2337 @@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
2338 }
2339
2340 if (vlan_tag) {
2341 - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
2342 + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
2343 + vlan_tag);
2344 if (unlikely(!skb))
2345 return skb;
2346 skb->vlan_tci = 0;
2347 @@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
2348 /* Insert the outer VLAN, if any */
2349 if (adapter->qnq_vid) {
2350 vlan_tag = adapter->qnq_vid;
2351 - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
2352 + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
2353 + vlan_tag);
2354 if (unlikely(!skb))
2355 return skb;
2356 if (skip_hw_vlan)
2357 diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
2358 index b16e1b95566f..61ebb038fb75 100644
2359 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
2360 +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
2361 @@ -585,7 +585,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
2362 * on the host, we deprecate the error message for this
2363 * specific command/input_mod/opcode_mod/fw-status to be debug.
2364 */
2365 - if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
2366 + if (op == MLX4_CMD_SET_PORT &&
2367 + (in_modifier == 1 || in_modifier == 2) &&
2368 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
2369 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
2370 op, context->fw_status);
2371 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2372 index 6bdaa313e7ea..0207044f6d57 100644
2373 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2374 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2375 @@ -2606,13 +2606,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2376 netif_carrier_off(dev);
2377 mlx4_en_set_default_moderation(priv);
2378
2379 - err = register_netdev(dev);
2380 - if (err) {
2381 - en_err(priv, "Netdev registration failed for port %d\n", port);
2382 - goto out;
2383 - }
2384 - priv->registered = 1;
2385 -
2386 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2387 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2388
2389 @@ -2652,6 +2645,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2390 queue_delayed_work(mdev->workqueue, &priv->service_task,
2391 SERVICE_TASK_DELAY);
2392
2393 + err = register_netdev(dev);
2394 + if (err) {
2395 + en_err(priv, "Netdev registration failed for port %d\n", port);
2396 + goto out;
2397 + }
2398 +
2399 + priv->registered = 1;
2400 +
2401 return 0;
2402
2403 out:
2404 diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
2405 index 5c55f11572ba..75d6f26729a3 100644
2406 --- a/drivers/net/usb/asix_common.c
2407 +++ b/drivers/net/usb/asix_common.c
2408 @@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
2409 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
2410 skb_put(skb, sizeof(padbytes));
2411 }
2412 +
2413 + usbnet_set_skb_tx_stats(skb, 1, 0);
2414 return skb;
2415 }
2416
2417 diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
2418 index 5ee7a1dbc023..96fc8a5bde84 100644
2419 --- a/drivers/net/usb/cdc_mbim.c
2420 +++ b/drivers/net/usb/cdc_mbim.c
2421 @@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
2422
2423 /* map MBIM session to VLAN */
2424 if (tci)
2425 - vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
2426 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
2427 err:
2428 return skb;
2429 }
2430 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2431 index 80a844e0ae03..c3e4da9e79ca 100644
2432 --- a/drivers/net/usb/cdc_ncm.c
2433 +++ b/drivers/net/usb/cdc_ncm.c
2434 @@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
2435
2436 /* return skb */
2437 ctx->tx_curr_skb = NULL;
2438 - dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
2439
2440 /* keep private stats: framing overhead and number of NTBs */
2441 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
2442 ctx->tx_ntbs++;
2443
2444 - /* usbnet has already counted all the framing overhead.
2445 + /* usbnet will count all the framing overhead by default.
2446 * Adjust the stats so that the tx_bytes counter show real
2447 * payload data instead.
2448 */
2449 - dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
2450 + usbnet_set_skb_tx_stats(skb_out, n,
2451 + ctx->tx_curr_frame_payload - skb_out->len);
2452
2453 return skb_out;
2454
2455 diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
2456 index b94a0fbb8b3b..953de13267df 100644
2457 --- a/drivers/net/usb/sr9800.c
2458 +++ b/drivers/net/usb/sr9800.c
2459 @@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
2460 skb_put(skb, sizeof(padbytes));
2461 }
2462
2463 + usbnet_set_skb_tx_stats(skb, 1, 0);
2464 return skb;
2465 }
2466
2467 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
2468 index 3a6770a65d78..e7ed2513b1d1 100644
2469 --- a/drivers/net/usb/usbnet.c
2470 +++ b/drivers/net/usb/usbnet.c
2471 @@ -1189,8 +1189,7 @@ static void tx_complete (struct urb *urb)
2472 struct usbnet *dev = entry->dev;
2473
2474 if (urb->status == 0) {
2475 - if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
2476 - dev->net->stats.tx_packets++;
2477 + dev->net->stats.tx_packets += entry->packets;
2478 dev->net->stats.tx_bytes += entry->length;
2479 } else {
2480 dev->net->stats.tx_errors++;
2481 @@ -1348,7 +1347,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
2482 } else
2483 urb->transfer_flags |= URB_ZERO_PACKET;
2484 }
2485 - entry->length = urb->transfer_buffer_length = length;
2486 + urb->transfer_buffer_length = length;
2487 +
2488 + if (info->flags & FLAG_MULTI_PACKET) {
2489 + /* Driver has set number of packets and a length delta.
2490 + * Calculate the complete length and ensure that it's
2491 + * positive.
2492 + */
2493 + entry->length += length;
2494 + if (WARN_ON_ONCE(entry->length <= 0))
2495 + entry->length = length;
2496 + } else {
2497 + usbnet_set_skb_tx_stats(skb, 1, length);
2498 + }
2499
2500 spin_lock_irqsave(&dev->txq.lock, flags);
2501 retval = usb_autopm_get_interface_async(dev->intf);
2502 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2503 index 42b2d6a56d05..2826c5508762 100644
2504 --- a/drivers/net/vxlan.c
2505 +++ b/drivers/net/vxlan.c
2506 @@ -1578,10 +1578,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
2507 int err;
2508 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
2509
2510 - skb = udp_tunnel_handle_offloads(skb, udp_sum);
2511 - if (IS_ERR(skb))
2512 - return -EINVAL;
2513 -
2514 skb_scrub_packet(skb, xnet);
2515
2516 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
2517 @@ -1590,16 +1586,21 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
2518
2519 /* Need space for new headers (invalidates iph ptr) */
2520 err = skb_cow_head(skb, min_headroom);
2521 - if (unlikely(err))
2522 - return err;
2523 + if (unlikely(err)) {
2524 + kfree_skb(skb);
2525 + goto err;
2526 + }
2527
2528 - if (vlan_tx_tag_present(skb)) {
2529 - if (WARN_ON(!__vlan_put_tag(skb,
2530 - skb->vlan_proto,
2531 - vlan_tx_tag_get(skb))))
2532 - return -ENOMEM;
2533 + skb = vlan_hwaccel_push_inside(skb);
2534 + if (WARN_ON(!skb)) {
2535 + err = -ENOMEM;
2536 + goto err;
2537 + }
2538
2539 - skb->vlan_tci = 0;
2540 + skb = udp_tunnel_handle_offloads(skb, udp_sum);
2541 + if (IS_ERR(skb)) {
2542 + err = -EINVAL;
2543 + goto err;
2544 }
2545
2546 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
2547 @@ -1611,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
2548 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
2549 ttl, src_port, dst_port);
2550 return 0;
2551 +err:
2552 + dst_release(dst);
2553 + return err;
2554 }
2555 #endif
2556
2557 @@ -1624,27 +1628,24 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
2558 int err;
2559 bool udp_sum = !vs->sock->sk->sk_no_check_tx;
2560
2561 - skb = udp_tunnel_handle_offloads(skb, udp_sum);
2562 - if (IS_ERR(skb))
2563 - return -EINVAL;
2564 -
2565 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
2566 + VXLAN_HLEN + sizeof(struct iphdr)
2567 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
2568
2569 /* Need space for new headers (invalidates iph ptr) */
2570 err = skb_cow_head(skb, min_headroom);
2571 - if (unlikely(err))
2572 + if (unlikely(err)) {
2573 + kfree_skb(skb);
2574 return err;
2575 + }
2576
2577 - if (vlan_tx_tag_present(skb)) {
2578 - if (WARN_ON(!__vlan_put_tag(skb,
2579 - skb->vlan_proto,
2580 - vlan_tx_tag_get(skb))))
2581 - return -ENOMEM;
2582 + skb = vlan_hwaccel_push_inside(skb);
2583 + if (WARN_ON(!skb))
2584 + return -ENOMEM;
2585
2586 - skb->vlan_tci = 0;
2587 - }
2588 + skb = udp_tunnel_handle_offloads(skb, udp_sum);
2589 + if (IS_ERR(skb))
2590 + return PTR_ERR(skb);
2591
2592 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
2593 vxh->vx_flags = htonl(VXLAN_FLAGS);
2594 @@ -1786,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2595 tos, ttl, df, src_port, dst_port,
2596 htonl(vni << 8),
2597 !net_eq(vxlan->net, dev_net(vxlan->dev)));
2598 -
2599 - if (err < 0)
2600 + if (err < 0) {
2601 + /* skb is already freed. */
2602 + skb = NULL;
2603 goto rt_tx_error;
2604 + }
2605 +
2606 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
2607 #if IS_ENABLED(CONFIG_IPV6)
2608 } else {
2609 diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
2610 index ecb783beeec2..9ee8ed0ffb94 100644
2611 --- a/drivers/net/wireless/ath/ath9k/beacon.c
2612 +++ b/drivers/net/wireless/ath/ath9k/beacon.c
2613 @@ -218,12 +218,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
2614 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2615 struct ath_vif *avp = (void *)vif->drv_priv;
2616 struct ath_buf *bf = avp->av_bcbuf;
2617 + struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
2618
2619 ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
2620 avp->av_bslot);
2621
2622 tasklet_disable(&sc->bcon_tasklet);
2623
2624 + cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
2625 +
2626 if (bf && bf->bf_mpdu) {
2627 struct sk_buff *skb = bf->bf_mpdu;
2628 dma_unmap_single(sc->dev, bf->bf_buf_addr,
2629 @@ -520,8 +523,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
2630 }
2631
2632 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
2633 - if ((vif->type != NL80211_IFTYPE_AP) ||
2634 - (sc->nbcnvifs > 1)) {
2635 + if (vif->type != NL80211_IFTYPE_AP) {
2636 ath_dbg(common, CONFIG,
2637 "An AP interface is already present !\n");
2638 return false;
2639 @@ -615,12 +617,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
2640 * enabling/disabling SWBA.
2641 */
2642 if (changed & BSS_CHANGED_BEACON_ENABLED) {
2643 - if (!bss_conf->enable_beacon &&
2644 - (sc->nbcnvifs <= 1)) {
2645 - cur_conf->enable_beacon = false;
2646 - } else if (bss_conf->enable_beacon) {
2647 - cur_conf->enable_beacon = true;
2648 - ath9k_cache_beacon_config(sc, ctx, bss_conf);
2649 + bool enabled = cur_conf->enable_beacon;
2650 +
2651 + if (!bss_conf->enable_beacon) {
2652 + cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
2653 + } else {
2654 + cur_conf->enable_beacon |= BIT(avp->av_bslot);
2655 + if (!enabled)
2656 + ath9k_cache_beacon_config(sc, ctx, bss_conf);
2657 }
2658 }
2659
2660 diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
2661 index ffc454b18637..81899e7e2a20 100644
2662 --- a/drivers/net/wireless/ath/ath9k/common.h
2663 +++ b/drivers/net/wireless/ath/ath9k/common.h
2664 @@ -53,7 +53,7 @@ struct ath_beacon_config {
2665 u16 dtim_period;
2666 u16 bmiss_timeout;
2667 u8 dtim_count;
2668 - bool enable_beacon;
2669 + u8 enable_beacon;
2670 bool ibss_creator;
2671 u32 nexttbtt;
2672 u32 intval;
2673 diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
2674 index a6f22c32a279..3811878ab9cd 100644
2675 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h
2676 +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
2677 @@ -708,7 +708,6 @@ struct iwl_priv {
2678 unsigned long reload_jiffies;
2679 int reload_count;
2680 bool ucode_loaded;
2681 - bool init_ucode_run; /* Don't run init uCode again */
2682
2683 u8 plcp_delta_threshold;
2684
2685 diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
2686 index d5cee1530597..80b8094deed1 100644
2687 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
2688 +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
2689 @@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
2690 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
2691 return 0;
2692
2693 - if (priv->init_ucode_run)
2694 - return 0;
2695 -
2696 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
2697 calib_complete, ARRAY_SIZE(calib_complete),
2698 iwlagn_wait_calib, priv);
2699 @@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
2700 */
2701 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
2702 UCODE_CALIB_TIMEOUT);
2703 - if (!ret)
2704 - priv->init_ucode_run = true;
2705
2706 goto out;
2707
2708 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
2709 index e25faacf58b7..a5186bb7c63e 100644
2710 --- a/drivers/net/wireless/rtlwifi/pci.c
2711 +++ b/drivers/net/wireless/rtlwifi/pci.c
2712 @@ -1118,12 +1118,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
2713 /*This is for new trx flow*/
2714 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
2715 u8 temp_one = 1;
2716 + u8 *entry;
2717
2718 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
2719 ring = &rtlpci->tx_ring[BEACON_QUEUE];
2720 pskb = __skb_dequeue(&ring->queue);
2721 - if (pskb)
2722 + if (rtlpriv->use_new_trx_flow)
2723 + entry = (u8 *)(&ring->buffer_desc[ring->idx]);
2724 + else
2725 + entry = (u8 *)(&ring->desc[ring->idx]);
2726 + if (pskb) {
2727 + pci_unmap_single(rtlpci->pdev,
2728 + rtlpriv->cfg->ops->get_desc(
2729 + (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
2730 + pskb->len, PCI_DMA_TODEVICE);
2731 kfree_skb(pskb);
2732 + }
2733
2734 /*NB: the beacon data buffer must be 32-bit aligned. */
2735 pskb = ieee80211_beacon_get(hw, mac->vif);
2736 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2737 index eeed0ce620f3..2b0b4e62f171 100644
2738 --- a/drivers/net/xen-netfront.c
2739 +++ b/drivers/net/xen-netfront.c
2740 @@ -1098,8 +1098,7 @@ err:
2741
2742 static int xennet_change_mtu(struct net_device *dev, int mtu)
2743 {
2744 - int max = xennet_can_sg(dev) ?
2745 - XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
2746 + int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
2747
2748 if (mtu > max)
2749 return -EINVAL;
2750 @@ -1353,8 +1352,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
2751 netdev->ethtool_ops = &xennet_ethtool_ops;
2752 SET_NETDEV_DEV(netdev, &dev->dev);
2753
2754 - netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
2755 -
2756 np->netdev = netdev;
2757
2758 netif_carrier_off(netdev);
2759 diff --git a/drivers/of/address.c b/drivers/of/address.c
2760 index 06af494184d6..216b00d99bd8 100644
2761 --- a/drivers/of/address.c
2762 +++ b/drivers/of/address.c
2763 @@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
2764 return NULL;
2765 }
2766
2767 -static int of_empty_ranges_quirk(void)
2768 +static int of_empty_ranges_quirk(struct device_node *np)
2769 {
2770 if (IS_ENABLED(CONFIG_PPC)) {
2771 - /* To save cycles, we cache the result */
2772 + /* To save cycles, we cache the result for global "Mac" setting */
2773 static int quirk_state = -1;
2774
2775 + /* PA-SEMI sdc DT bug */
2776 + if (of_device_is_compatible(np, "1682m-sdc"))
2777 + return true;
2778 +
2779 + /* Make quirk cached */
2780 if (quirk_state < 0)
2781 quirk_state =
2782 of_machine_is_compatible("Power Macintosh") ||
2783 @@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
2784 * This code is only enabled on powerpc. --gcl
2785 */
2786 ranges = of_get_property(parent, rprop, &rlen);
2787 - if (ranges == NULL && !of_empty_ranges_quirk()) {
2788 + if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
2789 pr_err("OF: no ranges; cannot translate\n");
2790 return 1;
2791 }
2792 diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
2793 index f0fcbceee209..f69b0d0a5ee1 100644
2794 --- a/drivers/pci/host/pcie-designware.c
2795 +++ b/drivers/pci/host/pcie-designware.c
2796 @@ -342,7 +342,7 @@ static const struct irq_domain_ops msi_domain_ops = {
2797 .map = dw_pcie_msi_map,
2798 };
2799
2800 -int __init dw_pcie_host_init(struct pcie_port *pp)
2801 +int dw_pcie_host_init(struct pcie_port *pp)
2802 {
2803 struct device_node *np = pp->dev->of_node;
2804 struct platform_device *pdev = to_platform_device(pp->dev);
2805 diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
2806 index 85f594e1708f..b4ba6ff56cf6 100644
2807 --- a/drivers/pci/host/pcie-spear13xx.c
2808 +++ b/drivers/pci/host/pcie-spear13xx.c
2809 @@ -298,7 +298,7 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
2810 return 0;
2811 }
2812
2813 -static int __init spear13xx_pcie_probe(struct platform_device *pdev)
2814 +static int spear13xx_pcie_probe(struct platform_device *pdev)
2815 {
2816 struct spear13xx_pcie *spear13xx_pcie;
2817 struct pcie_port *pp;
2818 @@ -371,7 +371,7 @@ static const struct of_device_id spear13xx_pcie_of_match[] = {
2819 };
2820 MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
2821
2822 -static struct platform_driver spear13xx_pcie_driver __initdata = {
2823 +static struct platform_driver spear13xx_pcie_driver = {
2824 .probe = spear13xx_pcie_probe,
2825 .driver = {
2826 .name = "spear-pcie",
2827 diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
2828 index 7d48ecae6695..788db48dbbad 100644
2829 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c
2830 +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
2831 @@ -286,11 +286,12 @@ int cpci_configure_slot(struct slot *slot)
2832 }
2833 parent = slot->dev->bus;
2834
2835 - list_for_each_entry(dev, &parent->devices, bus_list)
2836 + list_for_each_entry(dev, &parent->devices, bus_list) {
2837 if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
2838 continue;
2839 if (pci_is_bridge(dev))
2840 pci_hp_add_bridge(dev);
2841 + }
2842
2843
2844 pci_assign_unassigned_bridge_resources(parent->self);
2845 diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
2846 index 6ebf8edc5f3c..09499548d42a 100644
2847 --- a/drivers/pci/pci-acpi.c
2848 +++ b/drivers/pci/pci-acpi.c
2849 @@ -248,6 +248,9 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
2850 acpi_handle handle, phandle;
2851 struct pci_bus *pbus;
2852
2853 + if (acpi_pci_disabled)
2854 + return -ENODEV;
2855 +
2856 handle = NULL;
2857 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
2858 handle = acpi_pci_get_bridge_handle(pbus);
2859 diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
2860 index c6849d9e86ce..167fe411ce2e 100644
2861 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c
2862 +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
2863 @@ -132,16 +132,8 @@ static const char *aer_agent_string[] = {
2864 static void __print_tlp_header(struct pci_dev *dev,
2865 struct aer_header_log_regs *t)
2866 {
2867 - unsigned char *tlp = (unsigned char *)&t;
2868 -
2869 - dev_err(&dev->dev, " TLP Header:"
2870 - " %02x%02x%02x%02x %02x%02x%02x%02x"
2871 - " %02x%02x%02x%02x %02x%02x%02x%02x\n",
2872 - *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
2873 - *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
2874 - *(tlp + 11), *(tlp + 10), *(tlp + 9),
2875 - *(tlp + 8), *(tlp + 15), *(tlp + 14),
2876 - *(tlp + 13), *(tlp + 12));
2877 + dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n",
2878 + t->dw0, t->dw1, t->dw2, t->dw3);
2879 }
2880
2881 static void __aer_print_error(struct pci_dev *dev,
2882 diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
2883 index 15a8998bd161..be4586b788d1 100644
2884 --- a/drivers/scsi/be2iscsi/be_main.c
2885 +++ b/drivers/scsi/be2iscsi/be_main.c
2886 @@ -5735,9 +5735,9 @@ free_port:
2887 hba_free:
2888 if (phba->msix_enabled)
2889 pci_disable_msix(phba->pcidev);
2890 - iscsi_host_remove(phba->shost);
2891 pci_dev_put(phba->pcidev);
2892 iscsi_host_free(phba->shost);
2893 + pci_set_drvdata(pcidev, NULL);
2894 disable_pci:
2895 pci_disable_device(pcidev);
2896 return ret;
2897 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2898 index 17fb0518c9c1..b1ab509d0e05 100644
2899 --- a/drivers/scsi/scsi_lib.c
2900 +++ b/drivers/scsi/scsi_lib.c
2901 @@ -1299,9 +1299,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
2902 "rejecting I/O to dead device\n");
2903 ret = BLKPREP_KILL;
2904 break;
2905 - case SDEV_QUIESCE:
2906 case SDEV_BLOCK:
2907 case SDEV_CREATED_BLOCK:
2908 + ret = BLKPREP_DEFER;
2909 + break;
2910 + case SDEV_QUIESCE:
2911 /*
2912 * If the devices is blocked we defer normal commands.
2913 */
2914 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2915 index 6446490854cb..2eb6fa8e645e 100644
2916 --- a/drivers/target/iscsi/iscsi_target.c
2917 +++ b/drivers/target/iscsi/iscsi_target.c
2918 @@ -1184,7 +1184,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2919 * traditional iSCSI block I/O.
2920 */
2921 if (iscsit_allocate_iovecs(cmd) < 0) {
2922 - return iscsit_add_reject_cmd(cmd,
2923 + return iscsit_reject_cmd(cmd,
2924 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2925 }
2926 immed_data = cmd->immediate_data;
2927 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2928 index 47ca0f3b8c85..72824659f628 100644
2929 --- a/drivers/tty/n_tty.c
2930 +++ b/drivers/tty/n_tty.c
2931 @@ -247,8 +247,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
2932
2933 static void n_tty_check_throttle(struct tty_struct *tty)
2934 {
2935 - if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
2936 - return;
2937 /*
2938 * Check the remaining room for the input canonicalization
2939 * mode. We don't want to throttle the driver if we're in
2940 @@ -1512,23 +1510,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
2941 n_tty_receive_char_flagged(tty, c, flag);
2942 }
2943
2944 -/**
2945 - * n_tty_receive_buf - data receive
2946 - * @tty: terminal device
2947 - * @cp: buffer
2948 - * @fp: flag buffer
2949 - * @count: characters
2950 - *
2951 - * Called by the terminal driver when a block of characters has
2952 - * been received. This function must be called from soft contexts
2953 - * not from interrupt context. The driver is responsible for making
2954 - * calls one at a time and in order (or using flush_to_ldisc)
2955 - *
2956 - * n_tty_receive_buf()/producer path:
2957 - * claims non-exclusive termios_rwsem
2958 - * publishes read_head and canon_head
2959 - */
2960 -
2961 static void
2962 n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
2963 char *fp, int count)
2964 @@ -1684,24 +1665,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
2965 }
2966 }
2967
2968 +/**
2969 + * n_tty_receive_buf_common - process input
2970 + * @tty: device to receive input
2971 + * @cp: input chars
2972 + * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
2973 + * @count: number of input chars in @cp
2974 + *
2975 + * Called by the terminal driver when a block of characters has
2976 + * been received. This function must be called from soft contexts
2977 + * not from interrupt context. The driver is responsible for making
2978 + * calls one at a time and in order (or using flush_to_ldisc)
2979 + *
2980 + * Returns the # of input chars from @cp which were processed.
2981 + *
2982 + * In canonical mode, the maximum line length is 4096 chars (including
2983 + * the line termination char); lines longer than 4096 chars are
2984 + * truncated. After 4095 chars, input data is still processed but
2985 + * not stored. Overflow processing ensures the tty can always
2986 + * receive more input until at least one line can be read.
2987 + *
2988 + * In non-canonical mode, the read buffer will only accept 4095 chars;
2989 + * this provides the necessary space for a newline char if the input
2990 + * mode is switched to canonical.
2991 + *
2992 + * Note it is possible for the read buffer to _contain_ 4096 chars
2993 + * in non-canonical mode: the read buffer could already contain the
2994 + * maximum canon line of 4096 chars when the mode is switched to
2995 + * non-canonical.
2996 + *
2997 + * n_tty_receive_buf()/producer path:
2998 + * claims non-exclusive termios_rwsem
2999 + * publishes commit_head or canon_head
3000 + */
3001 static int
3002 n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
3003 char *fp, int count, int flow)
3004 {
3005 struct n_tty_data *ldata = tty->disc_data;
3006 - int room, n, rcvd = 0;
3007 + int room, n, rcvd = 0, overflow;
3008
3009 down_read(&tty->termios_rwsem);
3010
3011 while (1) {
3012 - room = receive_room(tty);
3013 + /*
3014 + * When PARMRK is set, each input char may take up to 3 chars
3015 + * in the read buf; reduce the buffer space avail by 3x
3016 + *
3017 + * If we are doing input canonicalization, and there are no
3018 + * pending newlines, let characters through without limit, so
3019 + * that erase characters will be handled. Other excess
3020 + * characters will be beeped.
3021 + *
3022 + * paired with store in *_copy_from_read_buf() -- guarantees
3023 + * the consumer has loaded the data in read_buf up to the new
3024 + * read_tail (so this producer will not overwrite unread data)
3025 + */
3026 + size_t tail = ldata->read_tail;
3027 +
3028 + room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
3029 + if (I_PARMRK(tty))
3030 + room = (room + 2) / 3;
3031 + room--;
3032 + if (room <= 0) {
3033 + overflow = ldata->icanon && ldata->canon_head == tail;
3034 + if (overflow && room < 0)
3035 + ldata->read_head--;
3036 + room = overflow;
3037 + ldata->no_room = flow && !room;
3038 + } else
3039 + overflow = 0;
3040 +
3041 n = min(count, room);
3042 - if (!n) {
3043 - if (flow && !room)
3044 - ldata->no_room = 1;
3045 + if (!n)
3046 break;
3047 - }
3048 - __receive_buf(tty, cp, fp, n);
3049 +
3050 + /* ignore parity errors if handling overflow */
3051 + if (!overflow || !fp || *fp != TTY_PARITY)
3052 + __receive_buf(tty, cp, fp, n);
3053 +
3054 cp += n;
3055 if (fp)
3056 fp += n;
3057 @@ -1710,7 +1752,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
3058 }
3059
3060 tty->receive_room = room;
3061 - n_tty_check_throttle(tty);
3062 +
3063 + /* Unthrottle if handling overflow on pty */
3064 + if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
3065 + if (overflow) {
3066 + tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
3067 + tty_unthrottle_safe(tty);
3068 + __tty_set_flow_change(tty, 0);
3069 + }
3070 + } else
3071 + n_tty_check_throttle(tty);
3072 +
3073 up_read(&tty->termios_rwsem);
3074
3075 return rcvd;
3076 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
3077 index eb9bc7e1dbaa..1883478d6a8d 100644
3078 --- a/drivers/tty/serial/fsl_lpuart.c
3079 +++ b/drivers/tty/serial/fsl_lpuart.c
3080 @@ -910,6 +910,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
3081 writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
3082 sport->port.membase + UARTPFIFO);
3083
3084 + /* explicitly clear RDRF */
3085 + readb(sport->port.membase + UARTSR1);
3086 +
3087 /* flush Tx and Rx FIFO */
3088 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
3089 sport->port.membase + UARTCFIFO);
3090 @@ -1095,6 +1098,8 @@ static int lpuart_startup(struct uart_port *port)
3091 sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
3092 UARTPFIFO_FIFOSIZE_MASK) + 1);
3093
3094 + sport->port.fifosize = sport->txfifo_size;
3095 +
3096 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
3097 UARTPFIFO_FIFOSIZE_MASK) + 1);
3098
3099 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3100 index 388cfd83b6b6..7ff97c39c8b4 100644
3101 --- a/drivers/usb/host/xhci-hub.c
3102 +++ b/drivers/usb/host/xhci-hub.c
3103 @@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
3104 status = PORT_PLC;
3105 port_change_bit = "link state";
3106 break;
3107 + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
3108 + status = PORT_CEC;
3109 + port_change_bit = "config error";
3110 + break;
3111 default:
3112 /* Should never happen */
3113 return;
3114 @@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
3115 status |= USB_PORT_STAT_C_LINK_STATE << 16;
3116 if ((raw_port_status & PORT_WRC))
3117 status |= USB_PORT_STAT_C_BH_RESET << 16;
3118 + if ((raw_port_status & PORT_CEC))
3119 + status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
3120 }
3121
3122 if (hcd->speed != HCD_USB3) {
3123 @@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
3124 case USB_PORT_FEAT_C_OVER_CURRENT:
3125 case USB_PORT_FEAT_C_ENABLE:
3126 case USB_PORT_FEAT_C_PORT_LINK_STATE:
3127 + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
3128 xhci_clear_port_change_bit(xhci, wValue, wIndex,
3129 port_array[wIndex], temp);
3130 break;
3131 @@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
3132 */
3133 status = bus_state->resuming_ports;
3134
3135 - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
3136 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
3137
3138 spin_lock_irqsave(&xhci->lock, flags);
3139 /* For each port, did anything change? If so, set that bit in buf. */
3140 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3141 index fd53c9ebd662..2af32e26fafc 100644
3142 --- a/drivers/usb/host/xhci-pci.c
3143 +++ b/drivers/usb/host/xhci-pci.c
3144 @@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3145 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
3146 xhci->quirks |= XHCI_LPM_SUPPORT;
3147 xhci->quirks |= XHCI_INTEL_HOST;
3148 + xhci->quirks |= XHCI_AVOID_BEI;
3149 }
3150 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3151 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
3152 @@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3153 * PPT chipsets.
3154 */
3155 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
3156 - xhci->quirks |= XHCI_AVOID_BEI;
3157 }
3158 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3159 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
3160 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3161 index 3086dec0ef53..8eb68a31cab6 100644
3162 --- a/drivers/usb/serial/ftdi_sio.c
3163 +++ b/drivers/usb/serial/ftdi_sio.c
3164 @@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
3165 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
3166 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
3167 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
3168 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
3169 /*
3170 * ELV devices:
3171 */
3172 @@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
3173 {
3174 struct usb_device *udev = serial->dev;
3175
3176 - if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
3177 - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
3178 + if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
3179 + return ftdi_jtag_probe(serial);
3180 +
3181 + if (udev->product &&
3182 + (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
3183 + !strcmp(udev->product, "SNAP Connect E10")))
3184 return ftdi_jtag_probe(serial);
3185
3186 return 0;
3187 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3188 index 56b1b55c4751..4e4f46f3c89c 100644
3189 --- a/drivers/usb/serial/ftdi_sio_ids.h
3190 +++ b/drivers/usb/serial/ftdi_sio_ids.h
3191 @@ -561,6 +561,12 @@
3192 */
3193 #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
3194
3195 +/*
3196 + * Synapse Wireless product ids (FTDI_VID)
3197 + * http://www.synapse-wireless.com
3198 + */
3199 +#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
3200 +
3201
3202 /********************************/
3203 /** third-party VID/PID combos **/
3204 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
3205 index 904ab353ecf2..c44b911937e8 100644
3206 --- a/drivers/usb/serial/generic.c
3207 +++ b/drivers/usb/serial/generic.c
3208 @@ -374,7 +374,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
3209 __func__, urb->status);
3210 return;
3211 default:
3212 - dev_err(&port->dev, "%s - nonzero urb status: %d\n",
3213 + dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
3214 __func__, urb->status);
3215 goto resubmit;
3216 }
3217 diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
3218 index 742d827f876c..04a217af29b7 100644
3219 --- a/drivers/usb/serial/keyspan_pda.c
3220 +++ b/drivers/usb/serial/keyspan_pda.c
3221 @@ -61,6 +61,7 @@ struct keyspan_pda_private {
3222 /* For Xircom PGSDB9 and older Entregra version of the same device */
3223 #define XIRCOM_VENDOR_ID 0x085a
3224 #define XIRCOM_FAKE_ID 0x8027
3225 +#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */
3226 #define ENTREGRA_VENDOR_ID 0x1645
3227 #define ENTREGRA_FAKE_ID 0x8093
3228
3229 @@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
3230 #endif
3231 #ifdef XIRCOM
3232 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
3233 + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
3234 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
3235 #endif
3236 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
3237 @@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
3238 #ifdef XIRCOM
3239 static const struct usb_device_id id_table_fake_xircom[] = {
3240 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
3241 + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
3242 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
3243 { }
3244 };
3245 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
3246 index 3860d02729dc..a325814341b7 100644
3247 --- a/drivers/xen/balloon.c
3248 +++ b/drivers/xen/balloon.c
3249 @@ -230,6 +230,29 @@ static enum bp_state reserve_additional_memory(long credit)
3250 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
3251 nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
3252
3253 +#ifdef CONFIG_XEN_HAVE_PVMMU
3254 + /*
3255 + * add_memory() will build page tables for the new memory so
3256 + * the p2m must contain invalid entries so the correct
3257 + * non-present PTEs will be written.
3258 + *
3259 + * If a failure occurs, the original (identity) p2m entries
3260 + * are not restored since this region is now known not to
3261 + * conflict with any devices.
3262 + */
3263 + if (!xen_feature(XENFEAT_auto_translated_physmap)) {
3264 + unsigned long pfn, i;
3265 +
3266 + pfn = PFN_DOWN(hotplug_start_paddr);
3267 + for (i = 0; i < balloon_hotplug; i++) {
3268 + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
3269 + pr_warn("set_phys_to_machine() failed, no memory added\n");
3270 + return BP_ECANCELED;
3271 + }
3272 + }
3273 + }
3274 +#endif
3275 +
3276 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
3277
3278 if (rc) {
3279 diff --git a/fs/aio.c b/fs/aio.c
3280 index 14b93159ef83..58caa7e5d81c 100644
3281 --- a/fs/aio.c
3282 +++ b/fs/aio.c
3283 @@ -715,6 +715,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
3284 err_cleanup:
3285 aio_nr_sub(ctx->max_reqs);
3286 err_ctx:
3287 + atomic_set(&ctx->dead, 1);
3288 + if (ctx->mmap_size)
3289 + vm_munmap(ctx->mmap_base, ctx->mmap_size);
3290 aio_free_ring(ctx);
3291 err:
3292 mutex_unlock(&ctx->ring_lock);
3293 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3294 index 8b40b35e5e0e..3b68c75eccea 100644
3295 --- a/fs/btrfs/tree-log.c
3296 +++ b/fs/btrfs/tree-log.c
3297 @@ -1266,21 +1266,13 @@ out:
3298 }
3299
3300 static int insert_orphan_item(struct btrfs_trans_handle *trans,
3301 - struct btrfs_root *root, u64 offset)
3302 + struct btrfs_root *root, u64 ino)
3303 {
3304 int ret;
3305 - struct btrfs_path *path;
3306 -
3307 - path = btrfs_alloc_path();
3308 - if (!path)
3309 - return -ENOMEM;
3310
3311 - ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID,
3312 - offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
3313 - if (ret > 0)
3314 - ret = btrfs_insert_orphan_item(trans, root, offset);
3315 -
3316 - btrfs_free_path(path);
3317 + ret = btrfs_insert_orphan_item(trans, root, ino);
3318 + if (ret == -EEXIST)
3319 + ret = 0;
3320
3321 return ret;
3322 }
3323 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3324 index 9a7b6947874a..9431449a73ab 100644
3325 --- a/fs/cifs/file.c
3326 +++ b/fs/cifs/file.c
3327 @@ -1829,6 +1829,7 @@ refind_writable:
3328 cifsFileInfo_put(inv_file);
3329 spin_lock(&cifs_file_list_lock);
3330 ++refind;
3331 + inv_file = NULL;
3332 goto refind_writable;
3333 }
3334 }
3335 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3336 index c5f521bcdee2..cc93a7ffe8e4 100644
3337 --- a/fs/cifs/smb2ops.c
3338 +++ b/fs/cifs/smb2ops.c
3339 @@ -683,7 +683,8 @@ smb2_clone_range(const unsigned int xid,
3340
3341 /* No need to change MaxChunks since already set to 1 */
3342 chunk_sizes_updated = true;
3343 - }
3344 + } else
3345 + goto cchunk_out;
3346 }
3347
3348 cchunk_out:
3349 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
3350 index 36b369697a13..5e7af1c69577 100644
3351 --- a/fs/ext4/indirect.c
3352 +++ b/fs/ext4/indirect.c
3353 @@ -1393,10 +1393,7 @@ end_range:
3354 * to free. Everything was covered by the start
3355 * of the range.
3356 */
3357 - return 0;
3358 - } else {
3359 - /* Shared branch grows from an indirect block */
3360 - partial2--;
3361 + goto do_indirects;
3362 }
3363 } else {
3364 /*
3365 @@ -1427,56 +1424,96 @@ end_range:
3366 /* Punch happened within the same level (n == n2) */
3367 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
3368 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
3369 - /*
3370 - * ext4_find_shared returns Indirect structure which
3371 - * points to the last element which should not be
3372 - * removed by truncate. But this is end of the range
3373 - * in punch_hole so we need to point to the next element
3374 - */
3375 - partial2->p++;
3376 - while ((partial > chain) || (partial2 > chain2)) {
3377 - /* We're at the same block, so we're almost finished */
3378 - if ((partial->bh && partial2->bh) &&
3379 - (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
3380 - if ((partial > chain) && (partial2 > chain2)) {
3381 +
3382 + /* Free top, but only if partial2 isn't its subtree. */
3383 + if (nr) {
3384 + int level = min(partial - chain, partial2 - chain2);
3385 + int i;
3386 + int subtree = 1;
3387 +
3388 + for (i = 0; i <= level; i++) {
3389 + if (offsets[i] != offsets2[i]) {
3390 + subtree = 0;
3391 + break;
3392 + }
3393 + }
3394 +
3395 + if (!subtree) {
3396 + if (partial == chain) {
3397 + /* Shared branch grows from the inode */
3398 + ext4_free_branches(handle, inode, NULL,
3399 + &nr, &nr+1,
3400 + (chain+n-1) - partial);
3401 + *partial->p = 0;
3402 + } else {
3403 + /* Shared branch grows from an indirect block */
3404 + BUFFER_TRACE(partial->bh, "get_write_access");
3405 ext4_free_branches(handle, inode, partial->bh,
3406 - partial->p + 1,
3407 - partial2->p,
3408 + partial->p,
3409 + partial->p+1,
3410 (chain+n-1) - partial);
3411 - BUFFER_TRACE(partial->bh, "call brelse");
3412 - brelse(partial->bh);
3413 - BUFFER_TRACE(partial2->bh, "call brelse");
3414 - brelse(partial2->bh);
3415 }
3416 - return 0;
3417 }
3418 + }
3419 +
3420 + if (!nr2) {
3421 /*
3422 - * Clear the ends of indirect blocks on the shared branch
3423 - * at the start of the range
3424 + * ext4_find_shared returns Indirect structure which
3425 + * points to the last element which should not be
3426 + * removed by truncate. But this is end of the range
3427 + * in punch_hole so we need to point to the next element
3428 */
3429 - if (partial > chain) {
3430 + partial2->p++;
3431 + }
3432 +
3433 + while (partial > chain || partial2 > chain2) {
3434 + int depth = (chain+n-1) - partial;
3435 + int depth2 = (chain2+n2-1) - partial2;
3436 +
3437 + if (partial > chain && partial2 > chain2 &&
3438 + partial->bh->b_blocknr == partial2->bh->b_blocknr) {
3439 + /*
3440 + * We've converged on the same block. Clear the range,
3441 + * then we're done.
3442 + */
3443 ext4_free_branches(handle, inode, partial->bh,
3444 - partial->p + 1,
3445 - (__le32 *)partial->bh->b_data+addr_per_block,
3446 - (chain+n-1) - partial);
3447 + partial->p + 1,
3448 + partial2->p,
3449 + (chain+n-1) - partial);
3450 BUFFER_TRACE(partial->bh, "call brelse");
3451 brelse(partial->bh);
3452 - partial--;
3453 + BUFFER_TRACE(partial2->bh, "call brelse");
3454 + brelse(partial2->bh);
3455 + return 0;
3456 }
3457 +
3458 /*
3459 - * Clear the ends of indirect blocks on the shared branch
3460 - * at the end of the range
3461 + * The start and end partial branches may not be at the same
3462 + * level even though the punch happened within one level. So, we
3463 + * give them a chance to arrive at the same level, then walk
3464 + * them in step with each other until we converge on the same
3465 + * block.
3466 */
3467 - if (partial2 > chain2) {
3468 + if (partial > chain && depth <= depth2) {
3469 + ext4_free_branches(handle, inode, partial->bh,
3470 + partial->p + 1,
3471 + (__le32 *)partial->bh->b_data+addr_per_block,
3472 + (chain+n-1) - partial);
3473 + BUFFER_TRACE(partial->bh, "call brelse");
3474 + brelse(partial->bh);
3475 + partial--;
3476 + }
3477 + if (partial2 > chain2 && depth2 <= depth) {
3478 ext4_free_branches(handle, inode, partial2->bh,
3479 (__le32 *)partial2->bh->b_data,
3480 partial2->p,
3481 - (chain2+n-1) - partial2);
3482 + (chain2+n2-1) - partial2);
3483 BUFFER_TRACE(partial2->bh, "call brelse");
3484 brelse(partial2->bh);
3485 partial2--;
3486 }
3487 }
3488 + return 0;
3489
3490 do_indirects:
3491 /* Kill the remaining (whole) subtrees */
3492 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3493 index 5c9c13ee72f9..4460d45ae138 100644
3494 --- a/fs/nfsd/nfs4state.c
3495 +++ b/fs/nfsd/nfs4state.c
3496 @@ -3226,7 +3226,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3497 } else
3498 nfs4_free_openowner(&oo->oo_owner);
3499 spin_unlock(&clp->cl_lock);
3500 - return oo;
3501 + return ret;
3502 }
3503
3504 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
3505 @@ -5058,7 +5058,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
3506 } else
3507 nfs4_free_lockowner(&lo->lo_owner);
3508 spin_unlock(&clp->cl_lock);
3509 - return lo;
3510 + return ret;
3511 }
3512
3513 static void
3514 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
3515 index 324dc93ac896..e6e8d6449b47 100644
3516 --- a/fs/ocfs2/file.c
3517 +++ b/fs/ocfs2/file.c
3518 @@ -2374,10 +2374,14 @@ out_dio:
3519 /* buffered aio wouldn't have proper lock coverage today */
3520 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
3521
3522 + if (unlikely(written <= 0))
3523 + goto no_sync;
3524 +
3525 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
3526 ((file->f_flags & O_DIRECT) && !direct_io)) {
3527 - ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
3528 - *ppos + count - 1);
3529 + ret = filemap_fdatawrite_range(file->f_mapping,
3530 + iocb->ki_pos - written,
3531 + iocb->ki_pos - 1);
3532 if (ret < 0)
3533 written = ret;
3534
3535 @@ -2390,10 +2394,12 @@ out_dio:
3536 }
3537
3538 if (!ret)
3539 - ret = filemap_fdatawait_range(file->f_mapping, *ppos,
3540 - *ppos + count - 1);
3541 + ret = filemap_fdatawait_range(file->f_mapping,
3542 + iocb->ki_pos - written,
3543 + iocb->ki_pos - 1);
3544 }
3545
3546 +no_sync:
3547 /*
3548 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
3549 * function pointer which is called when o_direct io completes so that
3550 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
3551 index eb596b419942..b28f0d680cb5 100644
3552 --- a/fs/xfs/xfs_file.c
3553 +++ b/fs/xfs/xfs_file.c
3554 @@ -363,7 +363,8 @@ STATIC int /* error (positive) */
3555 xfs_zero_last_block(
3556 struct xfs_inode *ip,
3557 xfs_fsize_t offset,
3558 - xfs_fsize_t isize)
3559 + xfs_fsize_t isize,
3560 + bool *did_zeroing)
3561 {
3562 struct xfs_mount *mp = ip->i_mount;
3563 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
3564 @@ -391,6 +392,7 @@ xfs_zero_last_block(
3565 zero_len = mp->m_sb.sb_blocksize - zero_offset;
3566 if (isize + zero_len > offset)
3567 zero_len = offset - isize;
3568 + *did_zeroing = true;
3569 return xfs_iozero(ip, isize, zero_len);
3570 }
3571
3572 @@ -409,7 +411,8 @@ int /* error (positive) */
3573 xfs_zero_eof(
3574 struct xfs_inode *ip,
3575 xfs_off_t offset, /* starting I/O offset */
3576 - xfs_fsize_t isize) /* current inode size */
3577 + xfs_fsize_t isize, /* current inode size */
3578 + bool *did_zeroing)
3579 {
3580 struct xfs_mount *mp = ip->i_mount;
3581 xfs_fileoff_t start_zero_fsb;
3582 @@ -431,7 +434,7 @@ xfs_zero_eof(
3583 * We only zero a part of that block so it is handled specially.
3584 */
3585 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
3586 - error = xfs_zero_last_block(ip, offset, isize);
3587 + error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
3588 if (error)
3589 return error;
3590 }
3591 @@ -491,6 +494,7 @@ xfs_zero_eof(
3592 if (error)
3593 return error;
3594
3595 + *did_zeroing = true;
3596 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
3597 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
3598 }
3599 @@ -529,13 +533,15 @@ restart:
3600 * having to redo all checks before.
3601 */
3602 if (*pos > i_size_read(inode)) {
3603 + bool zero = false;
3604 +
3605 if (*iolock == XFS_IOLOCK_SHARED) {
3606 xfs_rw_iunlock(ip, *iolock);
3607 *iolock = XFS_IOLOCK_EXCL;
3608 xfs_rw_ilock(ip, *iolock);
3609 goto restart;
3610 }
3611 - error = xfs_zero_eof(ip, *pos, i_size_read(inode));
3612 + error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
3613 if (error)
3614 return error;
3615 }
3616 diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
3617 index 9af2882e1f4c..cb6ab0ad91a4 100644
3618 --- a/fs/xfs/xfs_inode.h
3619 +++ b/fs/xfs/xfs_inode.h
3620 @@ -379,8 +379,9 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
3621 int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
3622
3623 /* from xfs_file.c */
3624 -int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
3625 -int xfs_iozero(struct xfs_inode *, loff_t, size_t);
3626 +int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
3627 + xfs_fsize_t isize, bool *did_zeroing);
3628 +int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
3629
3630
3631 #define IHOLD(ip) \
3632 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
3633 index ec6dcdc181ee..d2273d243990 100644
3634 --- a/fs/xfs/xfs_iops.c
3635 +++ b/fs/xfs/xfs_iops.c
3636 @@ -744,6 +744,7 @@ xfs_setattr_size(
3637 int error;
3638 uint lock_flags = 0;
3639 uint commit_flags = 0;
3640 + bool did_zeroing = false;
3641
3642 trace_xfs_setattr(ip);
3643
3644 @@ -787,20 +788,16 @@ xfs_setattr_size(
3645 return error;
3646
3647 /*
3648 - * Now we can make the changes. Before we join the inode to the
3649 - * transaction, take care of the part of the truncation that must be
3650 - * done without the inode lock. This needs to be done before joining
3651 - * the inode to the transaction, because the inode cannot be unlocked
3652 - * once it is a part of the transaction.
3653 + * File data changes must be complete before we start the transaction to
3654 + * modify the inode. This needs to be done before joining the inode to
3655 + * the transaction because the inode cannot be unlocked once it is a
3656 + * part of the transaction.
3657 + *
3658 + * Start with zeroing any data block beyond EOF that we may expose on
3659 + * file extension.
3660 */
3661 if (newsize > oldsize) {
3662 - /*
3663 - * Do the first part of growing a file: zero any data in the
3664 - * last block that is beyond the old EOF. We need to do this
3665 - * before the inode is joined to the transaction to modify
3666 - * i_size.
3667 - */
3668 - error = xfs_zero_eof(ip, newsize, oldsize);
3669 + error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
3670 if (error)
3671 return error;
3672 }
3673 @@ -810,23 +807,18 @@ xfs_setattr_size(
3674 * any previous writes that are beyond the on disk EOF and the new
3675 * EOF that have not been written out need to be written here. If we
3676 * do not write the data out, we expose ourselves to the null files
3677 - * problem.
3678 - *
3679 - * Only flush from the on disk size to the smaller of the in memory
3680 - * file size or the new size as that's the range we really care about
3681 - * here and prevents waiting for other data not within the range we
3682 - * care about here.
3683 + * problem. Note that this includes any block zeroing we did above;
3684 + * otherwise those blocks may not be zeroed after a crash.
3685 */
3686 - if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
3687 + if (newsize > ip->i_d.di_size &&
3688 + (oldsize != ip->i_d.di_size || did_zeroing)) {
3689 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
3690 ip->i_d.di_size, newsize);
3691 if (error)
3692 return error;
3693 }
3694
3695 - /*
3696 - * Wait for all direct I/O to complete.
3697 - */
3698 + /* Now wait for all direct I/O to complete. */
3699 inode_dio_wait(inode);
3700
3701 /*
3702 diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
3703 index 445d59231bc4..c50182a4a949 100644
3704 --- a/include/linux/blk_types.h
3705 +++ b/include/linux/blk_types.h
3706 @@ -181,7 +181,9 @@ enum rq_flag_bits {
3707 __REQ_ELVPRIV, /* elevator private data attached */
3708 __REQ_FAILED, /* set if the request failed */
3709 __REQ_QUIET, /* don't worry about errors */
3710 - __REQ_PREEMPT, /* set for "ide_preempt" requests */
3711 + __REQ_PREEMPT, /* set for "ide_preempt" requests and also
3712 + for requests for which the SCSI "quiesce"
3713 + state must be ignored. */
3714 __REQ_ALLOCED, /* request came from our alloc pool */
3715 __REQ_COPY_USER, /* contains copies of user pages */
3716 __REQ_FLUSH_SEQ, /* request for flush sequence */
3717 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
3718 index d5ad7b1118fc..33063f872ee3 100644
3719 --- a/include/linux/compiler.h
3720 +++ b/include/linux/compiler.h
3721 @@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
3722 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
3723 #endif
3724
3725 +#include <uapi/linux/types.h>
3726 +
3727 +static __always_inline void data_access_exceeds_word_size(void)
3728 +#ifdef __compiletime_warning
3729 +__compiletime_warning("data access exceeds word size and won't be atomic")
3730 +#endif
3731 +;
3732 +
3733 +static __always_inline void data_access_exceeds_word_size(void)
3734 +{
3735 +}
3736 +
3737 +static __always_inline void __read_once_size(volatile void *p, void *res, int size)
3738 +{
3739 + switch (size) {
3740 + case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
3741 + case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
3742 + case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
3743 +#ifdef CONFIG_64BIT
3744 + case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
3745 +#endif
3746 + default:
3747 + barrier();
3748 + __builtin_memcpy((void *)res, (const void *)p, size);
3749 + data_access_exceeds_word_size();
3750 + barrier();
3751 + }
3752 +}
3753 +
3754 +static __always_inline void __write_once_size(volatile void *p, void *res, int size)
3755 +{
3756 + switch (size) {
3757 + case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
3758 + case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
3759 + case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
3760 +#ifdef CONFIG_64BIT
3761 + case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
3762 +#endif
3763 + default:
3764 + barrier();
3765 + __builtin_memcpy((void *)p, (const void *)res, size);
3766 + data_access_exceeds_word_size();
3767 + barrier();
3768 + }
3769 +}
3770 +
3771 +/*
3772 + * Prevent the compiler from merging or refetching reads or writes. The
3773 + * compiler is also forbidden from reordering successive instances of
3774 + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
3775 + * compiler is aware of some particular ordering. One way to make the
3776 + * compiler aware of ordering is to put the two invocations of READ_ONCE,
3777 + * WRITE_ONCE or ACCESS_ONCE() in different C statements.
3778 + *
3779 + * In contrast to ACCESS_ONCE these two macros will also work on aggregate
3780 + * data types like structs or unions. If the size of the accessed data
3781 + * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
3782 + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
3783 + * compile-time warning.
3784 + *
3785 + * Their two major use cases are: (1) Mediating communication between
3786 + * process-level code and irq/NMI handlers, all running on the same CPU,
3787 + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
3788 + * mutilate accesses that either do not require ordering or that interact
3789 + * with an explicit memory barrier or atomic instruction that provides the
3790 + * required ordering.
3791 + */
3792 +
3793 +#define READ_ONCE(x) \
3794 + ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
3795 +
3796 +#define WRITE_ONCE(x, val) \
3797 + ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
3798 +
3799 #endif /* __KERNEL__ */
3800
3801 #endif /* __ASSEMBLY__ */
3802 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
3803 index 25e0df6155a4..575b7166cb08 100644
3804 --- a/include/linux/cpuidle.h
3805 +++ b/include/linux/cpuidle.h
3806 @@ -69,7 +69,6 @@ struct cpuidle_device {
3807 unsigned int cpu;
3808
3809 int last_residency;
3810 - int state_count;
3811 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
3812 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
3813 struct cpuidle_driver_kobj *kobj_driver;
3814 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
3815 index d69f0577a319..3a3c4fadcc64 100644
3816 --- a/include/linux/if_vlan.h
3817 +++ b/include/linux/if_vlan.h
3818 @@ -320,8 +320,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
3819 }
3820
3821 /**
3822 - * __vlan_put_tag - regular VLAN tag inserting
3823 + * vlan_insert_tag_set_proto - regular VLAN tag inserting
3824 * @skb: skbuff to tag
3825 + * @vlan_proto: VLAN encapsulation protocol
3826 * @vlan_tci: VLAN TCI to insert
3827 *
3828 * Inserts the VLAN tag into @skb as part of the payload
3829 @@ -330,8 +331,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
3830 * Following the skb_unshare() example, in case of error, the calling function
3831 * doesn't have to worry about freeing the original skb.
3832 */
3833 -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
3834 - __be16 vlan_proto, u16 vlan_tci)
3835 +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
3836 + __be16 vlan_proto,
3837 + u16 vlan_tci)
3838 {
3839 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
3840 if (skb)
3841 @@ -339,6 +341,40 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
3842 return skb;
3843 }
3844
3845 +/*
3846 + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
3847 + * @skb: skbuff to tag
3848 + *
3849 + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
3850 + *
3851 + * Following the skb_unshare() example, in case of error, the calling function
3852 + * doesn't have to worry about freeing the original skb.
3853 + */
3854 +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
3855 +{
3856 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
3857 + vlan_tx_tag_get(skb));
3858 + if (likely(skb))
3859 + skb->vlan_tci = 0;
3860 + return skb;
3861 +}
3862 +/*
3863 + * vlan_hwaccel_push_inside - pushes vlan tag to the payload
3864 + * @skb: skbuff to tag
3865 + *
3866 + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
3867 + * VLAN tag from @skb->vlan_tci inside to the payload.
3868 + *
3869 + * Following the skb_unshare() example, in case of error, the calling function
3870 + * doesn't have to worry about freeing the original skb.
3871 + */
3872 +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
3873 +{
3874 + if (vlan_tx_tag_present(skb))
3875 + skb = __vlan_hwaccel_push_inside(skb);
3876 + return skb;
3877 +}
3878 +
3879 /**
3880 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
3881 * @skb: skbuff to tag
3882 @@ -357,24 +393,6 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
3883 }
3884
3885 /**
3886 - * vlan_put_tag - inserts VLAN tag according to device features
3887 - * @skb: skbuff to tag
3888 - * @vlan_tci: VLAN TCI to insert
3889 - *
3890 - * Assumes skb->dev is the target that will xmit this frame.
3891 - * Returns a VLAN tagged skb.
3892 - */
3893 -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
3894 - __be16 vlan_proto, u16 vlan_tci)
3895 -{
3896 - if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
3897 - return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
3898 - } else {
3899 - return __vlan_put_tag(skb, vlan_proto, vlan_tci);
3900 - }
3901 -}
3902 -
3903 -/**
3904 * __vlan_get_tag - get the VLAN ID that is part of the payload
3905 * @skb: skbuff to query
3906 * @vlan_tci: buffer to store vlaue
3907 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3908 index 22339b4b1c8c..c3fd34da6c08 100644
3909 --- a/include/linux/netdevice.h
3910 +++ b/include/linux/netdevice.h
3911 @@ -2122,6 +2122,12 @@ void netdev_freemem(struct net_device *dev);
3912 void synchronize_net(void);
3913 int init_dummy_netdev(struct net_device *dev);
3914
3915 +DECLARE_PER_CPU(int, xmit_recursion);
3916 +static inline int dev_recursion_level(void)
3917 +{
3918 + return this_cpu_read(xmit_recursion);
3919 +}
3920 +
3921 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3922 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3923 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3924 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
3925 index c0c2bce6b0b7..d9d7e7e56352 100644
3926 --- a/include/linux/rmap.h
3927 +++ b/include/linux/rmap.h
3928 @@ -37,6 +37,16 @@ struct anon_vma {
3929 atomic_t refcount;
3930
3931 /*
3932 + * Count of child anon_vmas and VMAs which points to this anon_vma.
3933 + *
3934 + * This counter is used for making decision about reusing anon_vma
3935 + * instead of forking new one. See comments in function anon_vma_clone.
3936 + */
3937 + unsigned degree;
3938 +
3939 + struct anon_vma *parent; /* Parent of this anon_vma */
3940 +
3941 + /*
3942 * NOTE: the LSB of the rb_root.rb_node is set by
3943 * mm_take_all_locks() _after_ taking the above lock. So the
3944 * rb_root must only be read/written after taking the above lock
3945 diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
3946 index d9a4905e01d0..6e0ce8c7b8cb 100644
3947 --- a/include/linux/usb/usbnet.h
3948 +++ b/include/linux/usb/usbnet.h
3949 @@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */
3950 struct urb *urb;
3951 struct usbnet *dev;
3952 enum skb_state state;
3953 - size_t length;
3954 + long length;
3955 + unsigned long packets;
3956 };
3957
3958 +/* Drivers that set FLAG_MULTI_PACKET must call this in their
3959 + * tx_fixup method before returning an skb.
3960 + */
3961 +static inline void
3962 +usbnet_set_skb_tx_stats(struct sk_buff *skb,
3963 + unsigned long packets, long bytes_delta)
3964 +{
3965 + struct skb_data *entry = (struct skb_data *) skb->cb;
3966 +
3967 + entry->packets = packets;
3968 + entry->length = bytes_delta;
3969 +}
3970 +
3971 extern int usbnet_open(struct net_device *net);
3972 extern int usbnet_stop(struct net_device *net);
3973 extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
3974 diff --git a/include/net/ip.h b/include/net/ip.h
3975 index 09cf5aebb283..c0c26c3deeb5 100644
3976 --- a/include/net/ip.h
3977 +++ b/include/net/ip.h
3978 @@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
3979
3980 #endif
3981
3982 -static inline int sk_mc_loop(struct sock *sk)
3983 -{
3984 - if (!sk)
3985 - return 1;
3986 - switch (sk->sk_family) {
3987 - case AF_INET:
3988 - return inet_sk(sk)->mc_loop;
3989 -#if IS_ENABLED(CONFIG_IPV6)
3990 - case AF_INET6:
3991 - return inet6_sk(sk)->mc_loop;
3992 -#endif
3993 - }
3994 - WARN_ON(1);
3995 - return 1;
3996 -}
3997 -
3998 bool ip_call_ra_chain(struct sk_buff *skb);
3999
4000 /*
4001 diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
4002 index 1d09b46c1e48..eda131d179d9 100644
4003 --- a/include/net/ip6_route.h
4004 +++ b/include/net/ip6_route.h
4005 @@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
4006
4007 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
4008 {
4009 - struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
4010 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
4011 + inet6_sk(skb->sk) : NULL;
4012
4013 return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
4014 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
4015 diff --git a/include/net/sock.h b/include/net/sock.h
4016 index 7db3db112baa..c8146ed9e66a 100644
4017 --- a/include/net/sock.h
4018 +++ b/include/net/sock.h
4019 @@ -1806,6 +1806,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
4020
4021 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
4022
4023 +bool sk_mc_loop(struct sock *sk);
4024 +
4025 static inline bool sk_can_gso(const struct sock *sk)
4026 {
4027 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
4028 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4029 index 9f81818f2941..d8dcc80472c1 100644
4030 --- a/kernel/bpf/verifier.c
4031 +++ b/kernel/bpf/verifier.c
4032 @@ -1324,7 +1324,8 @@ peek_stack:
4033 /* tell verifier to check for equivalent states
4034 * after every call and jump
4035 */
4036 - env->explored_states[t + 1] = STATE_LIST_MARK;
4037 + if (t + 1 < insn_cnt)
4038 + env->explored_states[t + 1] = STATE_LIST_MARK;
4039 } else {
4040 /* conditional jump with two edges */
4041 ret = push_insn(t, t + 1, FALLTHROUGH, env);
4042 diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
4043 index 791a61892bb5..7325e35403c3 100644
4044 --- a/kernel/power/snapshot.c
4045 +++ b/kernel/power/snapshot.c
4046 @@ -954,25 +954,6 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
4047 }
4048 }
4049
4050 -static bool is_nosave_page(unsigned long pfn)
4051 -{
4052 - struct nosave_region *region;
4053 -
4054 - list_for_each_entry(region, &nosave_regions, list) {
4055 - if (pfn >= region->start_pfn && pfn < region->end_pfn) {
4056 - pr_err("PM: %#010llx in e820 nosave region: "
4057 - "[mem %#010llx-%#010llx]\n",
4058 - (unsigned long long) pfn << PAGE_SHIFT,
4059 - (unsigned long long) region->start_pfn << PAGE_SHIFT,
4060 - ((unsigned long long) region->end_pfn << PAGE_SHIFT)
4061 - - 1);
4062 - return true;
4063 - }
4064 - }
4065 -
4066 - return false;
4067 -}
4068 -
4069 /**
4070 * create_basic_memory_bitmaps - create bitmaps needed for marking page
4071 * frames that should not be saved and free page frames. The pointers
4072 @@ -2038,7 +2019,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
4073 do {
4074 pfn = memory_bm_next_pfn(bm);
4075 if (likely(pfn != BM_END_OF_MAP)) {
4076 - if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
4077 + if (likely(pfn_valid(pfn)))
4078 swsusp_set_page_free(pfn_to_page(pfn));
4079 else
4080 return -EFAULT;
4081 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4082 index 9f5ed5e70eaa..b794bde3f5e1 100644
4083 --- a/kernel/sched/core.c
4084 +++ b/kernel/sched/core.c
4085 @@ -3097,6 +3097,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4086 } else {
4087 if (dl_prio(oldprio))
4088 p->dl.dl_boosted = 0;
4089 + if (rt_prio(oldprio))
4090 + p->rt.timeout = 0;
4091 p->sched_class = &fair_sched_class;
4092 }
4093
4094 diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
4095 index eb682d5c697c..6aac4beedbbe 100644
4096 --- a/kernel/time/tick-broadcast-hrtimer.c
4097 +++ b/kernel/time/tick-broadcast-hrtimer.c
4098 @@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
4099 */
4100 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
4101 {
4102 + int bc_moved;
4103 /*
4104 * We try to cancel the timer first. If the callback is on
4105 * flight on some other cpu then we let it handle it. If we
4106 @@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
4107 * restart the timer because we are in the callback, but we
4108 * can set the expiry time and let the callback return
4109 * HRTIMER_RESTART.
4110 + *
4111 + * Since we are in the idle loop at this point and because
4112 + * hrtimer_{start/cancel} functions call into tracing,
4113 + * calls to these functions must be bound within RCU_NONIDLE.
4114 */
4115 - if (hrtimer_try_to_cancel(&bctimer) >= 0) {
4116 - hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
4117 + RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
4118 + !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
4119 + 0);
4120 + if (bc_moved) {
4121 /* Bind the "device" to the cpu */
4122 bc->bound_on = smp_processor_id();
4123 } else if (bc->bound_on == smp_processor_id()) {
4124 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4125 index 8639f6b28746..3415e7ad3973 100644
4126 --- a/mm/memory-failure.c
4127 +++ b/mm/memory-failure.c
4128 @@ -1659,8 +1659,6 @@ static int __soft_offline_page(struct page *page, int flags)
4129 * setting PG_hwpoison.
4130 */
4131 if (!is_free_buddy_page(page))
4132 - lru_add_drain_all();
4133 - if (!is_free_buddy_page(page))
4134 drain_all_pages();
4135 SetPageHWPoison(page);
4136 if (!is_free_buddy_page(page))
4137 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4138 index 1bf4807cb21e..8c71654e261f 100644
4139 --- a/mm/memory_hotplug.c
4140 +++ b/mm/memory_hotplug.c
4141 @@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
4142 return NULL;
4143
4144 arch_refresh_nodedata(nid, pgdat);
4145 + } else {
4146 + /* Reset the nr_zones and classzone_idx to 0 before reuse */
4147 + pgdat->nr_zones = 0;
4148 + pgdat->classzone_idx = 0;
4149 }
4150
4151 /* we can use NODE_DATA(nid) from here */
4152 @@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
4153 if (is_vmalloc_addr(zone->wait_table))
4154 vfree(zone->wait_table);
4155 }
4156 -
4157 - /*
4158 - * Since there is no way to guarentee the address of pgdat/zone is not
4159 - * on stack of any kernel threads or used by other kernel objects
4160 - * without reference counting or other symchronizing method, do not
4161 - * reset node_data and free pgdat here. Just reset it to 0 and reuse
4162 - * the memory when the node is online again.
4163 - */
4164 - memset(pgdat, 0, sizeof(*pgdat));
4165 }
4166 EXPORT_SYMBOL(try_offline_node);
4167
4168 diff --git a/mm/mmap.c b/mm/mmap.c
4169 index 3c83bec2274c..f88b4f940327 100644
4170 --- a/mm/mmap.c
4171 +++ b/mm/mmap.c
4172 @@ -778,10 +778,10 @@ again: remove_next = 1 + (end > next->vm_end);
4173 if (exporter && exporter->anon_vma && !importer->anon_vma) {
4174 int error;
4175
4176 + importer->anon_vma = exporter->anon_vma;
4177 error = anon_vma_clone(importer, exporter);
4178 if (error)
4179 return error;
4180 - importer->anon_vma = exporter->anon_vma;
4181 }
4182 }
4183
4184 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4185 index 437174a2aaa3..c8abd208432d 100644
4186 --- a/mm/page-writeback.c
4187 +++ b/mm/page-writeback.c
4188 @@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
4189 * bw * elapsed + write_bandwidth * (period - elapsed)
4190 * write_bandwidth = ---------------------------------------------------
4191 * period
4192 + *
4193 + * @written may have decreased due to account_page_redirty().
4194 + * Avoid underflowing @bw calculation.
4195 */
4196 - bw = written - bdi->written_stamp;
4197 + bw = written - min(written, bdi->written_stamp);
4198 bw *= HZ;
4199 if (unlikely(elapsed > period)) {
4200 do_div(bw, elapsed);
4201 @@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
4202 unsigned long now)
4203 {
4204 static DEFINE_SPINLOCK(dirty_lock);
4205 - static unsigned long update_time;
4206 + static unsigned long update_time = INITIAL_JIFFIES;
4207
4208 /*
4209 * check locklessly first to optimize away locking for the most time
4210 diff --git a/mm/page_isolation.c b/mm/page_isolation.c
4211 index c8778f7e208e..ec66134fb2a5 100644
4212 --- a/mm/page_isolation.c
4213 +++ b/mm/page_isolation.c
4214 @@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
4215
4216 if (!is_migrate_isolate_page(buddy)) {
4217 __isolate_free_page(page, order);
4218 + kernel_map_pages(page, (1 << order), 1);
4219 set_page_refcounted(page);
4220 isolated_page = page;
4221 }
4222 diff --git a/mm/rmap.c b/mm/rmap.c
4223 index 3e4c7213210c..5fc824b7311a 100644
4224 --- a/mm/rmap.c
4225 +++ b/mm/rmap.c
4226 @@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
4227 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
4228 if (anon_vma) {
4229 atomic_set(&anon_vma->refcount, 1);
4230 + anon_vma->degree = 1; /* Reference for first vma */
4231 + anon_vma->parent = anon_vma;
4232 /*
4233 * Initialise the anon_vma root to point to itself. If called
4234 * from fork, the root will be reset to the parents anon_vma.
4235 @@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
4236 if (likely(!vma->anon_vma)) {
4237 vma->anon_vma = anon_vma;
4238 anon_vma_chain_link(vma, avc, anon_vma);
4239 + /* vma reference or self-parent link for new root */
4240 + anon_vma->degree++;
4241 allocated = NULL;
4242 avc = NULL;
4243 }
4244 @@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
4245 /*
4246 * Attach the anon_vmas from src to dst.
4247 * Returns 0 on success, -ENOMEM on failure.
4248 + *
4249 + * If dst->anon_vma is NULL this function tries to find and reuse existing
4250 + * anon_vma which has no vmas and only one child anon_vma. This prevents
4251 + * degradation of anon_vma hierarchy to endless linear chain in case of
4252 + * constantly forking task. On the other hand, an anon_vma with more than one
4253 + * child isn't reused even if there was no alive vma, thus rmap walker has a
4254 + * good chance of avoiding scanning the whole hierarchy when it searches where
4255 + * page is mapped.
4256 */
4257 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
4258 {
4259 @@ -256,11 +268,32 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
4260 anon_vma = pavc->anon_vma;
4261 root = lock_anon_vma_root(root, anon_vma);
4262 anon_vma_chain_link(dst, avc, anon_vma);
4263 +
4264 + /*
4265 + * Reuse existing anon_vma if its degree lower than two,
4266 + * that means it has no vma and only one anon_vma child.
4267 + *
4268 + * Do not chose parent anon_vma, otherwise first child
4269 + * will always reuse it. Root anon_vma is never reused:
4270 + * it has self-parent reference and at least one child.
4271 + */
4272 + if (!dst->anon_vma && anon_vma != src->anon_vma &&
4273 + anon_vma->degree < 2)
4274 + dst->anon_vma = anon_vma;
4275 }
4276 + if (dst->anon_vma)
4277 + dst->anon_vma->degree++;
4278 unlock_anon_vma_root(root);
4279 return 0;
4280
4281 enomem_failure:
4282 + /*
4283 + * dst->anon_vma is dropped here otherwise its degree can be incorrectly
4284 + * decremented in unlink_anon_vmas().
4285 + * We can safely do this because callers of anon_vma_clone() don't care
4286 + * about dst->anon_vma if anon_vma_clone() failed.
4287 + */
4288 + dst->anon_vma = NULL;
4289 unlink_anon_vmas(dst);
4290 return -ENOMEM;
4291 }
4292 @@ -280,6 +313,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
4293 if (!pvma->anon_vma)
4294 return 0;
4295
4296 + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
4297 + vma->anon_vma = NULL;
4298 +
4299 /*
4300 * First, attach the new VMA to the parent VMA's anon_vmas,
4301 * so rmap can find non-COWed pages in child processes.
4302 @@ -288,6 +324,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
4303 if (error)
4304 return error;
4305
4306 + /* An existing anon_vma has been reused, all done then. */
4307 + if (vma->anon_vma)
4308 + return 0;
4309 +
4310 /* Then add our own anon_vma. */
4311 anon_vma = anon_vma_alloc();
4312 if (!anon_vma)
4313 @@ -301,6 +341,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
4314 * lock any of the anon_vmas in this anon_vma tree.
4315 */
4316 anon_vma->root = pvma->anon_vma->root;
4317 + anon_vma->parent = pvma->anon_vma;
4318 /*
4319 * With refcounts, an anon_vma can stay around longer than the
4320 * process it belongs to. The root anon_vma needs to be pinned until
4321 @@ -311,6 +352,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
4322 vma->anon_vma = anon_vma;
4323 anon_vma_lock_write(anon_vma);
4324 anon_vma_chain_link(vma, avc, anon_vma);
4325 + anon_vma->parent->degree++;
4326 anon_vma_unlock_write(anon_vma);
4327
4328 return 0;
4329 @@ -341,12 +383,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
4330 * Leave empty anon_vmas on the list - we'll need
4331 * to free them outside the lock.
4332 */
4333 - if (RB_EMPTY_ROOT(&anon_vma->rb_root))
4334 + if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
4335 + anon_vma->parent->degree--;
4336 continue;
4337 + }
4338
4339 list_del(&avc->same_vma);
4340 anon_vma_chain_free(avc);
4341 }
4342 + if (vma->anon_vma)
4343 + vma->anon_vma->degree--;
4344 unlock_anon_vma_root(root);
4345
4346 /*
4347 @@ -357,6 +403,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
4348 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
4349 struct anon_vma *anon_vma = avc->anon_vma;
4350
4351 + BUG_ON(anon_vma->degree);
4352 put_anon_vma(anon_vma);
4353
4354 list_del(&avc->same_vma);
4355 diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
4356 index 150048fb99b0..97b8ddf57363 100644
4357 --- a/net/bridge/br_vlan.c
4358 +++ b/net/bridge/br_vlan.c
4359 @@ -199,8 +199,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
4360 if (skb->vlan_proto != proto) {
4361 /* Protocol-mismatch, empty out vlan_tci for new tag */
4362 skb_push(skb, ETH_HLEN);
4363 - skb = __vlan_put_tag(skb, skb->vlan_proto,
4364 - vlan_tx_tag_get(skb));
4365 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
4366 + vlan_tx_tag_get(skb));
4367 if (unlikely(!skb))
4368 return false;
4369
4370 diff --git a/net/core/dev.c b/net/core/dev.c
4371 index 5db3a3f96198..5cdbc1bd9783 100644
4372 --- a/net/core/dev.c
4373 +++ b/net/core/dev.c
4374 @@ -2663,12 +2663,8 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
4375 netdev_features_t features)
4376 {
4377 if (vlan_tx_tag_present(skb) &&
4378 - !vlan_hw_offload_capable(features, skb->vlan_proto)) {
4379 - skb = __vlan_put_tag(skb, skb->vlan_proto,
4380 - vlan_tx_tag_get(skb));
4381 - if (skb)
4382 - skb->vlan_tci = 0;
4383 - }
4384 + !vlan_hw_offload_capable(features, skb->vlan_proto))
4385 + skb = __vlan_hwaccel_push_inside(skb);
4386 return skb;
4387 }
4388
4389 @@ -2857,7 +2853,9 @@ static void skb_update_prio(struct sk_buff *skb)
4390 #define skb_update_prio(skb)
4391 #endif
4392
4393 -static DEFINE_PER_CPU(int, xmit_recursion);
4394 +DEFINE_PER_CPU(int, xmit_recursion);
4395 +EXPORT_SYMBOL(xmit_recursion);
4396 +
4397 #define RECURSION_LIMIT 10
4398
4399 /**
4400 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
4401 index e6645b4f330a..e0ad5d16c9c5 100644
4402 --- a/net/core/netpoll.c
4403 +++ b/net/core/netpoll.c
4404 @@ -79,8 +79,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
4405
4406 if (vlan_tx_tag_present(skb) &&
4407 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
4408 - skb = __vlan_put_tag(skb, skb->vlan_proto,
4409 - vlan_tx_tag_get(skb));
4410 + skb = __vlan_hwaccel_push_inside(skb);
4411 if (unlikely(!skb)) {
4412 /* This is actually a packet drop, but we
4413 * don't want the code that calls this
4414 @@ -88,7 +87,6 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
4415 */
4416 goto out;
4417 }
4418 - skb->vlan_tci = 0;
4419 }
4420
4421 status = netdev_start_xmit(skb, dev, txq, false);
4422 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4423 index 79589ae84a5d..17fd8dca921e 100644
4424 --- a/net/core/skbuff.c
4425 +++ b/net/core/skbuff.c
4426 @@ -4033,18 +4033,20 @@ EXPORT_SYMBOL(skb_try_coalesce);
4427 */
4428 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4429 {
4430 - if (xnet)
4431 - skb_orphan(skb);
4432 skb->tstamp.tv64 = 0;
4433 skb->pkt_type = PACKET_HOST;
4434 skb->skb_iif = 0;
4435 skb->ignore_df = 0;
4436 skb_dst_drop(skb);
4437 - skb->mark = 0;
4438 - skb_init_secmark(skb);
4439 secpath_reset(skb);
4440 nf_reset(skb);
4441 nf_reset_trace(skb);
4442 +
4443 + if (!xnet)
4444 + return;
4445 +
4446 + skb_orphan(skb);
4447 + skb->mark = 0;
4448 }
4449 EXPORT_SYMBOL_GPL(skb_scrub_packet);
4450
4451 diff --git a/net/core/sock.c b/net/core/sock.c
4452 index 15e0c67b1069..852acbc52f96 100644
4453 --- a/net/core/sock.c
4454 +++ b/net/core/sock.c
4455 @@ -651,6 +651,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
4456 sock_reset_flag(sk, bit);
4457 }
4458
4459 +bool sk_mc_loop(struct sock *sk)
4460 +{
4461 + if (dev_recursion_level())
4462 + return false;
4463 + if (!sk)
4464 + return true;
4465 + switch (sk->sk_family) {
4466 + case AF_INET:
4467 + return inet_sk(sk)->mc_loop;
4468 +#if IS_ENABLED(CONFIG_IPV6)
4469 + case AF_INET6:
4470 + return inet6_sk(sk)->mc_loop;
4471 +#endif
4472 + }
4473 + WARN_ON(1);
4474 + return true;
4475 +}
4476 +EXPORT_SYMBOL(sk_mc_loop);
4477 +
4478 /*
4479 * This is meant for all protocols to use and covers goings on
4480 * at the socket level. Everything here is generic.
4481 diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
4482 index 2caa6ad965a6..d5423e33d32b 100644
4483 --- a/net/ipv4/geneve.c
4484 +++ b/net/ipv4/geneve.c
4485 @@ -121,8 +121,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
4486 int min_headroom;
4487 int err;
4488
4489 - skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
4490 -
4491 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
4492 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
4493 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
4494 @@ -131,15 +129,13 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
4495 if (unlikely(err))
4496 return err;
4497
4498 - if (vlan_tx_tag_present(skb)) {
4499 - if (unlikely(!__vlan_put_tag(skb,
4500 - skb->vlan_proto,
4501 - vlan_tx_tag_get(skb)))) {
4502 - err = -ENOMEM;
4503 - return err;
4504 - }
4505 - skb->vlan_tci = 0;
4506 - }
4507 + skb = vlan_hwaccel_push_inside(skb);
4508 + if (unlikely(!skb))
4509 + return -ENOMEM;
4510 +
4511 + skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
4512 + if (IS_ERR(skb))
4513 + return PTR_ERR(skb);
4514
4515 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
4516 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
4517 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4518 index d107ee246a1d..6f46cde58e54 100644
4519 --- a/net/ipv4/tcp_input.c
4520 +++ b/net/ipv4/tcp_input.c
4521 @@ -3103,10 +3103,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
4522 if (!first_ackt.v64)
4523 first_ackt = last_ackt;
4524
4525 - if (!(sacked & TCPCB_SACKED_ACKED))
4526 + if (!(sacked & TCPCB_SACKED_ACKED)) {
4527 reord = min(pkts_acked, reord);
4528 - if (!after(scb->end_seq, tp->high_seq))
4529 - flag |= FLAG_ORIG_SACK_ACKED;
4530 + if (!after(scb->end_seq, tp->high_seq))
4531 + flag |= FLAG_ORIG_SACK_ACKED;
4532 + }
4533 }
4534
4535 if (sacked & TCPCB_SACKED_ACKED)
4536 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
4537 index 944ce5edbfb7..a5fdfe9fa542 100644
4538 --- a/net/ipv4/tcp_ipv4.c
4539 +++ b/net/ipv4/tcp_ipv4.c
4540 @@ -1514,7 +1514,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
4541 skb->sk = sk;
4542 skb->destructor = sock_edemux;
4543 if (sk->sk_state != TCP_TIME_WAIT) {
4544 - struct dst_entry *dst = sk->sk_rx_dst;
4545 + struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
4546
4547 if (dst)
4548 dst = dst_check(dst, 0);
4549 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
4550 index 022ecbc9322d..32dcb4e05b6b 100644
4551 --- a/net/ipv4/tcp_output.c
4552 +++ b/net/ipv4/tcp_output.c
4553 @@ -2895,6 +2895,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
4554 }
4555 #endif
4556
4557 + /* Do not fool tcpdump (if any), clean our debris */
4558 + skb->tstamp.tv64 = 0;
4559 return skb;
4560 }
4561 EXPORT_SYMBOL(tcp_make_synack);
4562 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4563 index 51add023b723..7b5cb003ee22 100644
4564 --- a/net/ipv6/ip6_output.c
4565 +++ b/net/ipv6/ip6_output.c
4566 @@ -555,7 +555,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
4567 {
4568 struct sk_buff *frag;
4569 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
4570 - struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
4571 + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
4572 + inet6_sk(skb->sk) : NULL;
4573 struct ipv6hdr *tmp_hdr;
4574 struct frag_hdr *fh;
4575 unsigned int mtu, hlen, left, len;
4576 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
4577 index 4cb45c1079a2..a46c50423aec 100644
4578 --- a/net/ipv6/ndisc.c
4579 +++ b/net/ipv6/ndisc.c
4580 @@ -1215,7 +1215,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
4581 if (rt)
4582 rt6_set_expires(rt, jiffies + (HZ * lifetime));
4583 if (ra_msg->icmph.icmp6_hop_limit) {
4584 - in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
4585 + /* Only set hop_limit on the interface if it is higher than
4586 + * the current hop_limit.
4587 + */
4588 + if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
4589 + in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
4590 + } else {
4591 + ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
4592 + }
4593 if (rt)
4594 dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
4595 ra_msg->icmph.icmp6_hop_limit);
4596 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
4597 index c1136022d8d9..79fe58510ee8 100644
4598 --- a/net/ipv6/tcp_ipv6.c
4599 +++ b/net/ipv6/tcp_ipv6.c
4600 @@ -1407,6 +1407,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
4601 TCP_SKB_CB(skb)->sacked = 0;
4602 }
4603
4604 +static void tcp_v6_restore_cb(struct sk_buff *skb)
4605 +{
4606 + /* We need to move header back to the beginning if xfrm6_policy_check()
4607 + * and tcp_v6_fill_cb() are going to be called again.
4608 + */
4609 + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
4610 + sizeof(struct inet6_skb_parm));
4611 +}
4612 +
4613 static int tcp_v6_rcv(struct sk_buff *skb)
4614 {
4615 const struct tcphdr *th;
4616 @@ -1539,6 +1548,7 @@ do_time_wait:
4617 inet_twsk_deschedule(tw, &tcp_death_row);
4618 inet_twsk_put(tw);
4619 sk = sk2;
4620 + tcp_v6_restore_cb(skb);
4621 goto process;
4622 }
4623 /* Fall through to ACK */
4624 @@ -1547,6 +1557,7 @@ do_time_wait:
4625 tcp_v6_timewait_ack(sk, skb);
4626 break;
4627 case TCP_TW_RST:
4628 + tcp_v6_restore_cb(skb);
4629 goto no_tcp_socket;
4630 case TCP_TW_SUCCESS:
4631 ;
4632 @@ -1581,7 +1592,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
4633 skb->sk = sk;
4634 skb->destructor = sock_edemux;
4635 if (sk->sk_state != TCP_TIME_WAIT) {
4636 - struct dst_entry *dst = sk->sk_rx_dst;
4637 + struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
4638
4639 if (dst)
4640 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
4641 diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
4642 index a48bad468880..7702978a4c99 100644
4643 --- a/net/mac80211/agg-rx.c
4644 +++ b/net/mac80211/agg-rx.c
4645 @@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
4646 container_of(h, struct tid_ampdu_rx, rcu_head);
4647 int i;
4648
4649 - del_timer_sync(&tid_rx->reorder_timer);
4650 -
4651 for (i = 0; i < tid_rx->buf_size; i++)
4652 __skb_queue_purge(&tid_rx->reorder_buf[i]);
4653 kfree(tid_rx->reorder_buf);
4654 @@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
4655
4656 del_timer_sync(&tid_rx->session_timer);
4657
4658 + /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
4659 + spin_lock_bh(&tid_rx->reorder_lock);
4660 + tid_rx->removed = true;
4661 + spin_unlock_bh(&tid_rx->reorder_lock);
4662 + del_timer_sync(&tid_rx->reorder_timer);
4663 +
4664 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
4665 }
4666
4667 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4668 index bb77d6d7258a..631d59f540d1 100644
4669 --- a/net/mac80211/rx.c
4670 +++ b/net/mac80211/rx.c
4671 @@ -808,9 +808,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
4672
4673 set_release_timer:
4674
4675 - mod_timer(&tid_agg_rx->reorder_timer,
4676 - tid_agg_rx->reorder_time[j] + 1 +
4677 - HT_RX_REORDER_BUF_TIMEOUT);
4678 + if (!tid_agg_rx->removed)
4679 + mod_timer(&tid_agg_rx->reorder_timer,
4680 + tid_agg_rx->reorder_time[j] + 1 +
4681 + HT_RX_REORDER_BUF_TIMEOUT);
4682 } else {
4683 del_timer(&tid_agg_rx->reorder_timer);
4684 }
4685 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
4686 index bcda2ac7d844..bdba4ca5e76a 100644
4687 --- a/net/mac80211/sta_info.h
4688 +++ b/net/mac80211/sta_info.h
4689 @@ -170,6 +170,7 @@ struct tid_ampdu_tx {
4690 * @reorder_lock: serializes access to reorder buffer, see below.
4691 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
4692 * and ssn.
4693 + * @removed: this session is removed (but might have been found due to RCU)
4694 *
4695 * This structure's lifetime is managed by RCU, assignments to
4696 * the array holding it must hold the aggregation mutex.
4697 @@ -194,6 +195,7 @@ struct tid_ampdu_rx {
4698 u16 timeout;
4699 u8 dialog_token;
4700 bool auto_seq;
4701 + bool removed;
4702 };
4703
4704 /**
4705 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
4706 index 8c4229b11c34..4107eae4f452 100644
4707 --- a/net/openvswitch/actions.c
4708 +++ b/net/openvswitch/actions.c
4709 @@ -184,7 +184,9 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
4710 /* push down current VLAN tag */
4711 current_tag = vlan_tx_tag_get(skb);
4712
4713 - if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
4714 + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
4715 + current_tag);
4716 + if (!skb)
4717 return -ENOMEM;
4718
4719 if (skb->ip_summed == CHECKSUM_COMPLETE)
4720 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
4721 index 68ccddb5e2c4..28213dff723d 100644
4722 --- a/net/openvswitch/datapath.c
4723 +++ b/net/openvswitch/datapath.c
4724 @@ -423,11 +423,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
4725 if (!nskb)
4726 return -ENOMEM;
4727
4728 - nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
4729 + nskb = __vlan_hwaccel_push_inside(nskb);
4730 if (!nskb)
4731 return -ENOMEM;
4732
4733 - nskb->vlan_tci = 0;
4734 skb = nskb;
4735 }
4736
4737 diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
4738 index 108b82da2fd9..e88fa34b0041 100644
4739 --- a/net/openvswitch/vport-gre.c
4740 +++ b/net/openvswitch/vport-gre.c
4741 @@ -172,14 +172,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
4742 goto err_free_rt;
4743 }
4744
4745 - if (vlan_tx_tag_present(skb)) {
4746 - if (unlikely(!__vlan_put_tag(skb,
4747 - skb->vlan_proto,
4748 - vlan_tx_tag_get(skb)))) {
4749 - err = -ENOMEM;
4750 - goto err_free_rt;
4751 - }
4752 - skb->vlan_tci = 0;
4753 + skb = vlan_hwaccel_push_inside(skb);
4754 + if (unlikely(!skb)) {
4755 + err = -ENOMEM;
4756 + goto err_free_rt;
4757 }
4758
4759 /* Push Tunnel header. */
4760 diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
4761 index 33db1ad4fd10..138949a31eab 100644
4762 --- a/security/selinux/selinuxfs.c
4763 +++ b/security/selinux/selinuxfs.c
4764 @@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
4765 goto out;
4766
4767 /* No partial writes. */
4768 - length = EINVAL;
4769 + length = -EINVAL;
4770 if (*ppos != 0)
4771 goto out;
4772
4773 diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
4774 index 70faa3a32526..0215a9194d88 100644
4775 --- a/sound/firewire/bebob/bebob_maudio.c
4776 +++ b/sound/firewire/bebob/bebob_maudio.c
4777 @@ -96,10 +96,10 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
4778 struct fw_device *device = fw_parent_device(unit);
4779 int err, rcode;
4780 u64 date;
4781 - __be32 cues[3] = {
4782 - MAUDIO_BOOTLOADER_CUE1,
4783 - MAUDIO_BOOTLOADER_CUE2,
4784 - MAUDIO_BOOTLOADER_CUE3
4785 + __le32 cues[3] = {
4786 + cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
4787 + cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
4788 + cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
4789 };
4790
4791 /* check date of software used to build */
4792 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4793 index 8375bc424e2d..1783a3332984 100644
4794 --- a/sound/pci/hda/patch_realtek.c
4795 +++ b/sound/pci/hda/patch_realtek.c
4796 @@ -392,7 +392,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
4797 {
4798 /* We currently only handle front, HP */
4799 static hda_nid_t pins[] = {
4800 - 0x0f, 0x10, 0x14, 0x15, 0
4801 + 0x0f, 0x10, 0x14, 0x15, 0x17, 0
4802 };
4803 hda_nid_t *p;
4804 for (p = pins; *p; p++)
4805 @@ -2908,6 +2908,8 @@ static void alc283_init(struct hda_codec *codec)
4806
4807 if (!hp_pin)
4808 return;
4809 +
4810 + msleep(30);
4811 hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
4812
4813 /* Index 0x43 Direct Drive HP AMP LPM Control 1 */
4814 @@ -4255,6 +4257,7 @@ enum {
4815 ALC269_FIXUP_QUANTA_MUTE,
4816 ALC269_FIXUP_LIFEBOOK,
4817 ALC269_FIXUP_LIFEBOOK_EXTMIC,
4818 + ALC269_FIXUP_LIFEBOOK_HP_PIN,
4819 ALC269_FIXUP_AMIC,
4820 ALC269_FIXUP_DMIC,
4821 ALC269VB_FIXUP_AMIC,
4822 @@ -4405,6 +4408,13 @@ static const struct hda_fixup alc269_fixups[] = {
4823 { }
4824 },
4825 },
4826 + [ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
4827 + .type = HDA_FIXUP_PINS,
4828 + .v.pins = (const struct hda_pintbl[]) {
4829 + { 0x21, 0x0221102f }, /* HP out */
4830 + { }
4831 + },
4832 + },
4833 [ALC269_FIXUP_AMIC] = {
4834 .type = HDA_FIXUP_PINS,
4835 .v.pins = (const struct hda_pintbl[]) {
4836 @@ -4875,6 +4885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4837 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
4838 SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
4839 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
4840 + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
4841 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
4842 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
4843 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
4844 @@ -4901,6 +4912,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4845 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
4846 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
4847 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4848 + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
4849 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4850 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
4851 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
4852 diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
4853 index 8c9bf4b7aaf0..524366f9d32d 100644
4854 --- a/sound/usb/mixer_quirks.c
4855 +++ b/sound/usb/mixer_quirks.c
4856 @@ -178,6 +178,7 @@ static const struct rc_config {
4857 { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
4858 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
4859 { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
4860 + { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
4861 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
4862 };
4863
4864 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4865 index cfbe0e7d1c45..272fee82f89e 100644
4866 --- a/virt/kvm/kvm_main.c
4867 +++ b/virt/kvm/kvm_main.c
4868 @@ -478,7 +478,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
4869 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
4870
4871 r = -ENOMEM;
4872 - kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
4873 + kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
4874 if (!kvm->memslots)
4875 goto out_err_no_srcu;
4876
4877 @@ -529,7 +529,7 @@ out_err_no_srcu:
4878 out_err_no_disable:
4879 for (i = 0; i < KVM_NR_BUSES; i++)
4880 kfree(kvm->buses[i]);
4881 - kfree(kvm->memslots);
4882 + kvfree(kvm->memslots);
4883 kvm_arch_free_vm(kvm);
4884 return ERR_PTR(r);
4885 }
4886 @@ -585,7 +585,7 @@ static void kvm_free_physmem(struct kvm *kvm)
4887 kvm_for_each_memslot(memslot, slots)
4888 kvm_free_physmem_slot(kvm, memslot, NULL);
4889
4890 - kfree(kvm->memslots);
4891 + kvfree(kvm->memslots);
4892 }
4893
4894 static void kvm_destroy_devices(struct kvm *kvm)
4895 @@ -867,10 +867,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
4896 }
4897
4898 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
4899 - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
4900 - GFP_KERNEL);
4901 + slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
4902 if (!slots)
4903 goto out_free;
4904 + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
4905 +
4906 slot = id_to_memslot(slots, mem->slot);
4907 slot->flags |= KVM_MEMSLOT_INVALID;
4908
4909 @@ -900,10 +901,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
4910 * will get overwritten by update_memslots anyway.
4911 */
4912 if (!slots) {
4913 - slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
4914 - GFP_KERNEL);
4915 + slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
4916 if (!slots)
4917 goto out_free;
4918 + memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
4919 }
4920
4921 /* actual memory is freed via old in kvm_free_physmem_slot below */
4922 @@ -917,7 +918,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
4923 kvm_arch_commit_memory_region(kvm, mem, &old, change);
4924
4925 kvm_free_physmem_slot(kvm, &old, &new);
4926 - kfree(old_memslots);
4927 + kvfree(old_memslots);
4928
4929 /*
4930 * IOMMU mapping: New slots need to be mapped. Old slots need to be
4931 @@ -936,7 +937,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
4932 return 0;
4933
4934 out_slots:
4935 - kfree(slots);
4936 + kvfree(slots);
4937 out_free:
4938 kvm_free_physmem_slot(kvm, &new, &old);
4939 out: