Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0147-5.4.48-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3528 - (show annotations) (download)
Thu Jun 25 11:14:59 2020 UTC (3 years, 10 months ago) by niro
File size: 323276 byte(s)
-linux-5.4.48
1 diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
2 index b6a7e7397b8b..b944fe067188 100644
3 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
4 +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
5 @@ -16,6 +16,9 @@ Required properties:
6 Documentation/devicetree/bindings/graph.txt. This port should be connected
7 to the input port of an attached HDMI or LVDS encoder chip.
8
9 +Optional properties:
10 +- pinctrl-names: Contain "default" and "sleep".
11 +
12 Example:
13
14 dpi0: dpi@1401d000 {
15 @@ -26,6 +29,9 @@ dpi0: dpi@1401d000 {
16 <&mmsys CLK_MM_DPI_ENGINE>,
17 <&apmixedsys CLK_APMIXED_TVDPLL>;
18 clock-names = "pixel", "engine", "pll";
19 + pinctrl-names = "default", "sleep";
20 + pinctrl-0 = <&dpi_pin_func>;
21 + pinctrl-1 = <&dpi_pin_idle>;
22
23 port {
24 dpi0_out: endpoint {
25 diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt
26 index 4833904d32a5..a18e996fa54b 100644
27 --- a/Documentation/virt/kvm/api.txt
28 +++ b/Documentation/virt/kvm/api.txt
29 @@ -4444,9 +4444,11 @@ EOI was received.
30 #define KVM_EXIT_HYPERV_SYNIC 1
31 #define KVM_EXIT_HYPERV_HCALL 2
32 __u32 type;
33 + __u32 pad1;
34 union {
35 struct {
36 __u32 msr;
37 + __u32 pad2;
38 __u64 control;
39 __u64 evt_page;
40 __u64 msg_page;
41 diff --git a/Makefile b/Makefile
42 index 1da2944b842e..fee4101b5d22 100644
43 --- a/Makefile
44 +++ b/Makefile
45 @@ -1,7 +1,7 @@
46 # SPDX-License-Identifier: GPL-2.0
47 VERSION = 5
48 PATCHLEVEL = 4
49 -SUBLEVEL = 47
50 +SUBLEVEL = 48
51 EXTRAVERSION =
52 NAME = Kleptomaniac Octopus
53
54 @@ -587,12 +587,8 @@ KBUILD_MODULES :=
55 KBUILD_BUILTIN := 1
56
57 # If we have only "make modules", don't compile built-in objects.
58 -# When we're building modules with modversions, we need to consider
59 -# the built-in objects during the descend as well, in order to
60 -# make sure the checksums are up to date before we record them.
61 -
62 ifeq ($(MAKECMDGOALS),modules)
63 - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
64 + KBUILD_BUILTIN :=
65 endif
66
67 # If we have "make <whatever> modules", compile modules
68 @@ -1282,6 +1278,13 @@ ifdef CONFIG_MODULES
69
70 all: modules
71
72 +# When we're building modules with modversions, we need to consider
73 +# the built-in objects during the descend as well, in order to
74 +# make sure the checksums are up to date before we record them.
75 +ifdef CONFIG_MODVERSIONS
76 + KBUILD_BUILTIN := 1
77 +endif
78 +
79 # Build modules
80 #
81 # A module can be listed more than once in obj-m resulting in
82 diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
83 index af2c0063dc75..b771bf1b5352 100644
84 --- a/arch/alpha/include/asm/io.h
85 +++ b/arch/alpha/include/asm/io.h
86 @@ -322,14 +322,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
87 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
88 extern inline unsigned int ioread8(void __iomem *addr)
89 {
90 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
91 + unsigned int ret;
92 + mb();
93 + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
94 mb();
95 return ret;
96 }
97
98 extern inline unsigned int ioread16(void __iomem *addr)
99 {
100 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
101 + unsigned int ret;
102 + mb();
103 + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
104 mb();
105 return ret;
106 }
107 @@ -370,7 +374,9 @@ extern inline void outw(u16 b, unsigned long port)
108 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
109 extern inline unsigned int ioread32(void __iomem *addr)
110 {
111 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
112 + unsigned int ret;
113 + mb();
114 + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
115 mb();
116 return ret;
117 }
118 @@ -415,14 +421,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
119
120 extern inline u8 readb(const volatile void __iomem *addr)
121 {
122 - u8 ret = __raw_readb(addr);
123 + u8 ret;
124 + mb();
125 + ret = __raw_readb(addr);
126 mb();
127 return ret;
128 }
129
130 extern inline u16 readw(const volatile void __iomem *addr)
131 {
132 - u16 ret = __raw_readw(addr);
133 + u16 ret;
134 + mb();
135 + ret = __raw_readw(addr);
136 mb();
137 return ret;
138 }
139 @@ -463,14 +473,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
140
141 extern inline u32 readl(const volatile void __iomem *addr)
142 {
143 - u32 ret = __raw_readl(addr);
144 + u32 ret;
145 + mb();
146 + ret = __raw_readl(addr);
147 mb();
148 return ret;
149 }
150
151 extern inline u64 readq(const volatile void __iomem *addr)
152 {
153 - u64 ret = __raw_readq(addr);
154 + u64 ret;
155 + mb();
156 + ret = __raw_readq(addr);
157 mb();
158 return ret;
159 }
160 @@ -499,14 +513,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
161 #define outb_p outb
162 #define outw_p outw
163 #define outl_p outl
164 -#define readb_relaxed(addr) __raw_readb(addr)
165 -#define readw_relaxed(addr) __raw_readw(addr)
166 -#define readl_relaxed(addr) __raw_readl(addr)
167 -#define readq_relaxed(addr) __raw_readq(addr)
168 -#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
169 -#define writew_relaxed(b, addr) __raw_writew(b, addr)
170 -#define writel_relaxed(b, addr) __raw_writel(b, addr)
171 -#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
172 +
173 +extern u8 readb_relaxed(const volatile void __iomem *addr);
174 +extern u16 readw_relaxed(const volatile void __iomem *addr);
175 +extern u32 readl_relaxed(const volatile void __iomem *addr);
176 +extern u64 readq_relaxed(const volatile void __iomem *addr);
177 +
178 +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
179 +extern inline u8 readb_relaxed(const volatile void __iomem *addr)
180 +{
181 + mb();
182 + return __raw_readb(addr);
183 +}
184 +
185 +extern inline u16 readw_relaxed(const volatile void __iomem *addr)
186 +{
187 + mb();
188 + return __raw_readw(addr);
189 +}
190 +#endif
191 +
192 +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
193 +extern inline u32 readl_relaxed(const volatile void __iomem *addr)
194 +{
195 + mb();
196 + return __raw_readl(addr);
197 +}
198 +
199 +extern inline u64 readq_relaxed(const volatile void __iomem *addr)
200 +{
201 + mb();
202 + return __raw_readq(addr);
203 +}
204 +#endif
205 +
206 +#define writeb_relaxed writeb
207 +#define writew_relaxed writew
208 +#define writel_relaxed writel
209 +#define writeq_relaxed writeq
210
211 /*
212 * String version of IO memory access ops:
213 diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
214 index c025a3e5e357..938de13adfbf 100644
215 --- a/arch/alpha/kernel/io.c
216 +++ b/arch/alpha/kernel/io.c
217 @@ -16,21 +16,27 @@
218 unsigned int
219 ioread8(void __iomem *addr)
220 {
221 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
222 + unsigned int ret;
223 + mb();
224 + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
225 mb();
226 return ret;
227 }
228
229 unsigned int ioread16(void __iomem *addr)
230 {
231 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
232 + unsigned int ret;
233 + mb();
234 + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
235 mb();
236 return ret;
237 }
238
239 unsigned int ioread32(void __iomem *addr)
240 {
241 - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
242 + unsigned int ret;
243 + mb();
244 + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
245 mb();
246 return ret;
247 }
248 @@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq);
249
250 u8 readb(const volatile void __iomem *addr)
251 {
252 - u8 ret = __raw_readb(addr);
253 + u8 ret;
254 + mb();
255 + ret = __raw_readb(addr);
256 mb();
257 return ret;
258 }
259
260 u16 readw(const volatile void __iomem *addr)
261 {
262 - u16 ret = __raw_readw(addr);
263 + u16 ret;
264 + mb();
265 + ret = __raw_readw(addr);
266 mb();
267 return ret;
268 }
269
270 u32 readl(const volatile void __iomem *addr)
271 {
272 - u32 ret = __raw_readl(addr);
273 + u32 ret;
274 + mb();
275 + ret = __raw_readl(addr);
276 mb();
277 return ret;
278 }
279
280 u64 readq(const volatile void __iomem *addr)
281 {
282 - u64 ret = __raw_readq(addr);
283 + u64 ret;
284 + mb();
285 + ret = __raw_readq(addr);
286 mb();
287 return ret;
288 }
289 @@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew);
290 EXPORT_SYMBOL(writel);
291 EXPORT_SYMBOL(writeq);
292
293 +/*
294 + * The _relaxed functions must be ordered w.r.t. each other, but they don't
295 + * have to be ordered w.r.t. other memory accesses.
296 + */
297 +u8 readb_relaxed(const volatile void __iomem *addr)
298 +{
299 + mb();
300 + return __raw_readb(addr);
301 +}
302 +
303 +u16 readw_relaxed(const volatile void __iomem *addr)
304 +{
305 + mb();
306 + return __raw_readw(addr);
307 +}
308 +
309 +u32 readl_relaxed(const volatile void __iomem *addr)
310 +{
311 + mb();
312 + return __raw_readl(addr);
313 +}
314 +
315 +u64 readq_relaxed(const volatile void __iomem *addr)
316 +{
317 + mb();
318 + return __raw_readq(addr);
319 +}
320 +
321 +EXPORT_SYMBOL(readb_relaxed);
322 +EXPORT_SYMBOL(readw_relaxed);
323 +EXPORT_SYMBOL(readl_relaxed);
324 +EXPORT_SYMBOL(readq_relaxed);
325
326 /*
327 * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
328 diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
329 index 1333a68b9373..b8db77b7f5d8 100644
330 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
331 +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
332 @@ -40,7 +40,7 @@
333
334 ahb {
335 usb0: gadget@300000 {
336 - atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
337 + atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>;
338 pinctrl-names = "default";
339 pinctrl-0 = <&pinctrl_usba_vbus>;
340 status = "okay";
341 diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
342 index ce87d2ff27aa..4b9c4cab0314 100644
343 --- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
344 +++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
345 @@ -68,7 +68,7 @@
346
347 i2c_cm36651: i2c-gpio-2 {
348 compatible = "i2c-gpio";
349 - gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>;
350 + gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>;
351 i2c-gpio,delay-us = <2>;
352 #address-cells = <1>;
353 #size-cells = <0>;
354 diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
355 index 8ff70b856334..d419b77201f7 100644
356 --- a/arch/arm/boot/dts/s5pv210-aries.dtsi
357 +++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
358 @@ -454,6 +454,7 @@
359 pinctrl-names = "default";
360 cap-sd-highspeed;
361 cap-mmc-highspeed;
362 + keep-power-in-suspend;
363
364 mmc-pwrseq = <&wifi_pwrseq>;
365 non-removable;
366 diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
367 index e512e606eabd..5ea3421fa1e8 100644
368 --- a/arch/arm/mach-tegra/tegra.c
369 +++ b/arch/arm/mach-tegra/tegra.c
370 @@ -106,8 +106,8 @@ static const char * const tegra_dt_board_compat[] = {
371 };
372
373 DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
374 - .l2c_aux_val = 0x3c400001,
375 - .l2c_aux_mask = 0xc20fc3fe,
376 + .l2c_aux_val = 0x3c400000,
377 + .l2c_aux_mask = 0xc20fc3ff,
378 .smp = smp_ops(tegra_smp_ops),
379 .map_io = tegra_map_common_io,
380 .init_early = tegra_init_early,
381 diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
382 index 5461d589a1e2..60ac7c5999a9 100644
383 --- a/arch/arm/mm/proc-macros.S
384 +++ b/arch/arm/mm/proc-macros.S
385 @@ -5,6 +5,7 @@
386 * VMA_VM_FLAGS
387 * VM_EXEC
388 */
389 +#include <linux/const.h>
390 #include <asm/asm-offsets.h>
391 #include <asm/thread_info.h>
392
393 @@ -30,7 +31,7 @@
394 * act_mm - get current->active_mm
395 */
396 .macro act_mm, rd
397 - bic \rd, sp, #8128
398 + bic \rd, sp, #(THREAD_SIZE - 1) & ~63
399 bic \rd, \rd, #63
400 ldr \rd, [\rd, #TI_TASK]
401 .if (TSK_ACTIVE_MM > IMM12_MASK)
402 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
403 index 665c78e0665a..3e7dda6f1ab1 100644
404 --- a/arch/arm64/include/asm/cacheflush.h
405 +++ b/arch/arm64/include/asm/cacheflush.h
406 @@ -79,7 +79,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
407 * IPI all online CPUs so that they undergo a context synchronization
408 * event and are forced to refetch the new instructions.
409 */
410 -#ifdef CONFIG_KGDB
411 +
412 /*
413 * KGDB performs cache maintenance with interrupts disabled, so we
414 * will deadlock trying to IPI the secondary CPUs. In theory, we can
415 @@ -89,9 +89,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
416 * the patching operation, so we don't need extra IPIs here anyway.
417 * In which case, add a KGDB-specific bodge and return early.
418 */
419 - if (kgdb_connected && irqs_disabled())
420 + if (in_dbg_master())
421 return;
422 -#endif
423 +
424 kick_all_cpus_sync();
425 }
426
427 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
428 index 13ebe2bad79f..41dd4b1f0ccb 100644
429 --- a/arch/arm64/include/asm/pgtable.h
430 +++ b/arch/arm64/include/asm/pgtable.h
431 @@ -456,6 +456,7 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD];
432 extern pgd_t init_pg_end[];
433 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
434 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
435 +extern pgd_t idmap_pg_end[];
436 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
437
438 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
439 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
440 index 989b1944cb71..bdb5ec341900 100644
441 --- a/arch/arm64/kernel/head.S
442 +++ b/arch/arm64/kernel/head.S
443 @@ -393,13 +393,19 @@ __create_page_tables:
444
445 /*
446 * Since the page tables have been populated with non-cacheable
447 - * accesses (MMU disabled), invalidate the idmap and swapper page
448 - * tables again to remove any speculatively loaded cache lines.
449 + * accesses (MMU disabled), invalidate those tables again to
450 + * remove any speculatively loaded cache lines.
451 */
452 + dmb sy
453 +
454 adrp x0, idmap_pg_dir
455 + adrp x1, idmap_pg_end
456 + sub x1, x1, x0
457 + bl __inval_dcache_area
458 +
459 + adrp x0, init_pg_dir
460 adrp x1, init_pg_end
461 sub x1, x1, x0
462 - dmb sy
463 bl __inval_dcache_area
464
465 ret x28
466 diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
467 index d801a7094076..a612da533ea2 100644
468 --- a/arch/arm64/kernel/insn.c
469 +++ b/arch/arm64/kernel/insn.c
470 @@ -1508,16 +1508,10 @@ static u32 aarch64_encode_immediate(u64 imm,
471 u32 insn)
472 {
473 unsigned int immr, imms, n, ones, ror, esz, tmp;
474 - u64 mask = ~0UL;
475 -
476 - /* Can't encode full zeroes or full ones */
477 - if (!imm || !~imm)
478 - return AARCH64_BREAK_FAULT;
479 + u64 mask;
480
481 switch (variant) {
482 case AARCH64_INSN_VARIANT_32BIT:
483 - if (upper_32_bits(imm))
484 - return AARCH64_BREAK_FAULT;
485 esz = 32;
486 break;
487 case AARCH64_INSN_VARIANT_64BIT:
488 @@ -1529,6 +1523,12 @@ static u32 aarch64_encode_immediate(u64 imm,
489 return AARCH64_BREAK_FAULT;
490 }
491
492 + mask = GENMASK(esz - 1, 0);
493 +
494 + /* Can't encode full zeroes, full ones, or value wider than the mask */
495 + if (!imm || imm == mask || imm & ~mask)
496 + return AARCH64_BREAK_FAULT;
497 +
498 /*
499 * Inverse of Replicate(). Try to spot a repeating pattern
500 * with a pow2 stride.
501 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
502 index aa76f7259668..e1af25dbc57e 100644
503 --- a/arch/arm64/kernel/vmlinux.lds.S
504 +++ b/arch/arm64/kernel/vmlinux.lds.S
505 @@ -142,6 +142,7 @@ SECTIONS
506 . = ALIGN(PAGE_SIZE);
507 idmap_pg_dir = .;
508 . += IDMAP_DIR_SIZE;
509 + idmap_pg_end = .;
510
511 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
512 tramp_pg_dir = .;
513 diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
514 index de1470c4d829..1149251ea58d 100644
515 --- a/arch/m68k/include/asm/mac_via.h
516 +++ b/arch/m68k/include/asm/mac_via.h
517 @@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping;
518
519 struct irq_desc;
520
521 +extern void via_l2_flush(int writeback);
522 extern void via_register_interrupts(void);
523 extern void via_irq_enable(int);
524 extern void via_irq_disable(int);
525 diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
526 index 611f73bfc87c..d0126ab01360 100644
527 --- a/arch/m68k/mac/config.c
528 +++ b/arch/m68k/mac/config.c
529 @@ -59,7 +59,6 @@ extern void iop_preinit(void);
530 extern void iop_init(void);
531 extern void via_init(void);
532 extern void via_init_clock(irq_handler_t func);
533 -extern void via_flush_cache(void);
534 extern void oss_init(void);
535 extern void psc_init(void);
536 extern void baboon_init(void);
537 @@ -130,21 +129,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record)
538 return unknown;
539 }
540
541 -/*
542 - * Flip into 24bit mode for an instant - flushes the L2 cache card. We
543 - * have to disable interrupts for this. Our IRQ handlers will crap
544 - * themselves if they take an IRQ in 24bit mode!
545 - */
546 -
547 -static void mac_cache_card_flush(int writeback)
548 -{
549 - unsigned long flags;
550 -
551 - local_irq_save(flags);
552 - via_flush_cache();
553 - local_irq_restore(flags);
554 -}
555 -
556 void __init config_mac(void)
557 {
558 if (!MACH_IS_MAC)
559 @@ -175,9 +159,8 @@ void __init config_mac(void)
560 * not.
561 */
562
563 - if (macintosh_config->ident == MAC_MODEL_IICI
564 - || macintosh_config->ident == MAC_MODEL_IIFX)
565 - mach_l2_flush = mac_cache_card_flush;
566 + if (macintosh_config->ident == MAC_MODEL_IICI)
567 + mach_l2_flush = via_l2_flush;
568 }
569
570
571 diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
572 index 3c2cfcb74982..1f0fad2a98a0 100644
573 --- a/arch/m68k/mac/via.c
574 +++ b/arch/m68k/mac/via.c
575 @@ -294,10 +294,14 @@ void via_debug_dump(void)
576 * the system into 24-bit mode for an instant.
577 */
578
579 -void via_flush_cache(void)
580 +void via_l2_flush(int writeback)
581 {
582 + unsigned long flags;
583 +
584 + local_irq_save(flags);
585 via2[gBufB] &= ~VIA2B_vMode32;
586 via2[gBufB] |= VIA2B_vMode32;
587 + local_irq_restore(flags);
588 }
589
590 /*
591 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
592 index cdc09b71febe..5403a91ce098 100644
593 --- a/arch/mips/Makefile
594 +++ b/arch/mips/Makefile
595 @@ -285,12 +285,23 @@ ifdef CONFIG_64BIT
596 endif
597 endif
598
599 +# When linking a 32-bit executable the LLVM linker cannot cope with a
600 +# 32-bit load address that has been sign-extended to 64 bits. Simply
601 +# remove the upper 32 bits then, as it is safe to do so with other
602 +# linkers.
603 +ifdef CONFIG_64BIT
604 + load-ld = $(load-y)
605 +else
606 + load-ld = $(subst 0xffffffff,0x,$(load-y))
607 +endif
608 +
609 KBUILD_AFLAGS += $(cflags-y)
610 KBUILD_CFLAGS += $(cflags-y)
611 -KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
612 +KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
613 KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
614
615 bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
616 + LINKER_LOAD_ADDRESS=$(load-ld) \
617 VMLINUX_ENTRY_ADDRESS=$(entry-y) \
618 PLATFORM="$(platform-y)" \
619 ITS_INPUTS="$(its-y)"
620 diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
621 index d859f079b771..378cbfb31ee7 100644
622 --- a/arch/mips/boot/compressed/Makefile
623 +++ b/arch/mips/boot/compressed/Makefile
624 @@ -90,7 +90,7 @@ ifneq ($(zload-y),)
625 VMLINUZ_LOAD_ADDRESS := $(zload-y)
626 else
627 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
628 - $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
629 + $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
630 endif
631 UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)
632
633 diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
634 index 90ee0084d786..e41f4841cb4d 100644
635 --- a/arch/mips/configs/loongson3_defconfig
636 +++ b/arch/mips/configs/loongson3_defconfig
637 @@ -231,7 +231,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
638 CONFIG_MEDIA_USB_SUPPORT=y
639 CONFIG_USB_VIDEO_CLASS=m
640 CONFIG_DRM=y
641 -CONFIG_DRM_RADEON=y
642 +CONFIG_DRM_RADEON=m
643 CONFIG_FB_RADEON=y
644 CONFIG_LCD_CLASS_DEVICE=y
645 CONFIG_LCD_PLATFORM=m
646 diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
647 index 983a6a7f43a1..3e26b0c7391b 100644
648 --- a/arch/mips/include/asm/cpu-features.h
649 +++ b/arch/mips/include/asm/cpu-features.h
650 @@ -288,10 +288,12 @@
651 # define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6)
652 #endif
653 #ifndef cpu_has_mips64r1
654 -# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)
655 +# define cpu_has_mips64r1 (cpu_has_64bits && \
656 + __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1))
657 #endif
658 #ifndef cpu_has_mips64r2
659 -# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)
660 +# define cpu_has_mips64r2 (cpu_has_64bits && \
661 + __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2))
662 #endif
663 #ifndef cpu_has_mips64r6
664 # define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6)
665 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
666 index bdbdc19a2b8f..3afdb39d092a 100644
667 --- a/arch/mips/include/asm/mipsregs.h
668 +++ b/arch/mips/include/asm/mipsregs.h
669 @@ -750,7 +750,7 @@
670
671 /* MAAR bit definitions */
672 #define MIPS_MAAR_VH (_U64CAST_(1) << 63)
673 -#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
674 +#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
675 #define MIPS_MAAR_ADDR_SHIFT 12
676 #define MIPS_MAAR_S (_ULCAST_(1) << 1)
677 #define MIPS_MAAR_VL (_ULCAST_(1) << 0)
678 diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
679 index efde27c99414..9c5f8a5d097f 100644
680 --- a/arch/mips/kernel/genex.S
681 +++ b/arch/mips/kernel/genex.S
682 @@ -474,20 +474,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
683 .endm
684
685 .macro __build_clear_fpe
686 + CLI
687 + TRACE_IRQS_OFF
688 .set push
689 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
690 .set mips1
691 SET_HARDFLOAT
692 cfc1 a1, fcr31
693 .set pop
694 - CLI
695 - TRACE_IRQS_OFF
696 .endm
697
698 .macro __build_clear_msa_fpe
699 - _cfcmsa a1, MSA_CSR
700 CLI
701 TRACE_IRQS_OFF
702 + _cfcmsa a1, MSA_CSR
703 .endm
704
705 .macro __build_clear_ade
706 diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
707 index e5ea3db23d6b..a9eab83d9148 100644
708 --- a/arch/mips/kernel/mips-cm.c
709 +++ b/arch/mips/kernel/mips-cm.c
710 @@ -119,9 +119,9 @@ static char *cm2_causes[32] = {
711 "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
712 "0x08", "0x09", "0x0a", "0x0b",
713 "0x0c", "0x0d", "0x0e", "0x0f",
714 - "0x10", "0x11", "0x12", "0x13",
715 - "0x14", "0x15", "0x16", "INTVN_WR_ERR",
716 - "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
717 + "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
718 + "0x14", "0x15", "0x16", "0x17",
719 + "0x18", "0x19", "0x1a", "0x1b",
720 "0x1c", "0x1d", "0x1e", "0x1f"
721 };
722
723 diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
724 index 5eec13b8d222..7b06e6ee6817 100644
725 --- a/arch/mips/kernel/setup.c
726 +++ b/arch/mips/kernel/setup.c
727 @@ -653,7 +653,17 @@ static void __init arch_mem_init(char **cmdline_p)
728 crashk_res.end - crashk_res.start + 1);
729 #endif
730 device_tree_init();
731 +
732 + /*
733 + * In order to reduce the possibility of kernel panic when failed to
734 + * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
735 + * low memory as small as possible before plat_swiotlb_setup(), so
736 + * make sparse_init() using top-down allocation.
737 + */
738 + memblock_set_bottom_up(false);
739 sparse_init();
740 + memblock_set_bottom_up(true);
741 +
742 plat_swiotlb_setup();
743
744 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
745 diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
746 index 37e9413a393d..caa01457dce6 100644
747 --- a/arch/mips/kernel/time.c
748 +++ b/arch/mips/kernel/time.c
749 @@ -18,12 +18,82 @@
750 #include <linux/smp.h>
751 #include <linux/spinlock.h>
752 #include <linux/export.h>
753 +#include <linux/cpufreq.h>
754 +#include <linux/delay.h>
755
756 #include <asm/cpu-features.h>
757 #include <asm/cpu-type.h>
758 #include <asm/div64.h>
759 #include <asm/time.h>
760
761 +#ifdef CONFIG_CPU_FREQ
762 +
763 +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
764 +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
765 +static unsigned long glb_lpj_ref;
766 +static unsigned long glb_lpj_ref_freq;
767 +
768 +static int cpufreq_callback(struct notifier_block *nb,
769 + unsigned long val, void *data)
770 +{
771 + struct cpufreq_freqs *freq = data;
772 + struct cpumask *cpus = freq->policy->cpus;
773 + unsigned long lpj;
774 + int cpu;
775 +
776 + /*
777 + * Skip lpj numbers adjustment if the CPU-freq transition is safe for
778 + * the loops delay. (Is this possible?)
779 + */
780 + if (freq->flags & CPUFREQ_CONST_LOOPS)
781 + return NOTIFY_OK;
782 +
783 + /* Save the initial values of the lpjes for future scaling. */
784 + if (!glb_lpj_ref) {
785 + glb_lpj_ref = boot_cpu_data.udelay_val;
786 + glb_lpj_ref_freq = freq->old;
787 +
788 + for_each_online_cpu(cpu) {
789 + per_cpu(pcp_lpj_ref, cpu) =
790 + cpu_data[cpu].udelay_val;
791 + per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
792 + }
793 + }
794 +
795 + /*
796 + * Adjust global lpj variable and per-CPU udelay_val number in
797 + * accordance with the new CPU frequency.
798 + */
799 + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
800 + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
801 + loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
802 + glb_lpj_ref_freq,
803 + freq->new);
804 +
805 + for_each_cpu(cpu, cpus) {
806 + lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
807 + per_cpu(pcp_lpj_ref_freq, cpu),
808 + freq->new);
809 + cpu_data[cpu].udelay_val = (unsigned int)lpj;
810 + }
811 + }
812 +
813 + return NOTIFY_OK;
814 +}
815 +
816 +static struct notifier_block cpufreq_notifier = {
817 + .notifier_call = cpufreq_callback,
818 +};
819 +
820 +static int __init register_cpufreq_notifier(void)
821 +{
822 + return cpufreq_register_notifier(&cpufreq_notifier,
823 + CPUFREQ_TRANSITION_NOTIFIER);
824 +}
825 +core_initcall(register_cpufreq_notifier);
826 +
827 +#endif /* CONFIG_CPU_FREQ */
828 +
829 /*
830 * forward reference
831 */
832 diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
833 index 33ee0d18fb0a..eb9d7af93836 100644
834 --- a/arch/mips/kernel/vmlinux.lds.S
835 +++ b/arch/mips/kernel/vmlinux.lds.S
836 @@ -50,7 +50,7 @@ SECTIONS
837 /* . = 0xa800000000300000; */
838 . = 0xffffffff80300000;
839 #endif
840 - . = VMLINUX_LOAD_ADDRESS;
841 + . = LINKER_LOAD_ADDRESS;
842 /* read-only */
843 _text = .; /* Text and read-only data */
844 .text : {
845 diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c
846 index adde79ce7fc0..dbd14ff05b4c 100644
847 --- a/arch/mips/tools/elf-entry.c
848 +++ b/arch/mips/tools/elf-entry.c
849 @@ -51,11 +51,14 @@ int main(int argc, const char *argv[])
850 nread = fread(&hdr, 1, sizeof(hdr), file);
851 if (nread != sizeof(hdr)) {
852 perror("Unable to read input file");
853 + fclose(file);
854 return EXIT_FAILURE;
855 }
856
857 - if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG))
858 + if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) {
859 + fclose(file);
860 die("Input is not an ELF\n");
861 + }
862
863 switch (hdr.ehdr32.e_ident[EI_CLASS]) {
864 case ELFCLASS32:
865 @@ -67,6 +70,7 @@ int main(int argc, const char *argv[])
866 entry = be32toh(hdr.ehdr32.e_entry);
867 break;
868 default:
869 + fclose(file);
870 die("Invalid ELF encoding\n");
871 }
872
873 @@ -83,14 +87,17 @@ int main(int argc, const char *argv[])
874 entry = be64toh(hdr.ehdr64.e_entry);
875 break;
876 default:
877 + fclose(file);
878 die("Invalid ELF encoding\n");
879 }
880 break;
881
882 default:
883 + fclose(file);
884 die("Invalid ELF class\n");
885 }
886
887 printf("0x%016" PRIx64 "\n", entry);
888 + fclose(file);
889 return EXIT_SUCCESS;
890 }
891 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
892 index 3dc5aecdd853..44431dc06982 100644
893 --- a/arch/powerpc/Kconfig
894 +++ b/arch/powerpc/Kconfig
895 @@ -171,7 +171,7 @@ config PPC
896 select HAVE_ARCH_AUDITSYSCALL
897 select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
898 select HAVE_ARCH_JUMP_LABEL
899 - select HAVE_ARCH_KASAN if PPC32
900 + select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
901 select HAVE_ARCH_KGDB
902 select HAVE_ARCH_MMAP_RND_BITS
903 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
904 diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
905 index 1a2c80e8be84..6eb311eb818b 100644
906 --- a/arch/powerpc/include/asm/book3s/32/kup.h
907 +++ b/arch/powerpc/include/asm/book3s/32/kup.h
908 @@ -2,6 +2,7 @@
909 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
910 #define _ASM_POWERPC_BOOK3S_32_KUP_H
911
912 +#include <asm/bug.h>
913 #include <asm/book3s/32/mmu-hash.h>
914
915 #ifdef __ASSEMBLY__
916 @@ -75,7 +76,7 @@
917
918 .macro kuap_check current, gpr
919 #ifdef CONFIG_PPC_KUAP_DEBUG
920 - lwz \gpr, KUAP(thread)
921 + lwz \gpr, THREAD + KUAP(\current)
922 999: twnei \gpr, 0
923 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
924 #endif
925 diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
926 index c814a2b55389..8d61c8f3fec4 100644
927 --- a/arch/powerpc/include/asm/fadump-internal.h
928 +++ b/arch/powerpc/include/asm/fadump-internal.h
929 @@ -64,12 +64,14 @@ struct fadump_memory_range {
930 };
931
932 /* fadump memory ranges info */
933 +#define RNG_NAME_SZ 16
934 struct fadump_mrange_info {
935 - char name[16];
936 + char name[RNG_NAME_SZ];
937 struct fadump_memory_range *mem_ranges;
938 u32 mem_ranges_sz;
939 u32 mem_range_cnt;
940 u32 max_mem_ranges;
941 + bool is_static;
942 };
943
944 /* Platform specific callback functions */
945 diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
946 index 296e51c2f066..b68eeff77806 100644
947 --- a/arch/powerpc/include/asm/kasan.h
948 +++ b/arch/powerpc/include/asm/kasan.h
949 @@ -23,17 +23,13 @@
950
951 #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
952
953 -#define KASAN_SHADOW_END 0UL
954 -
955 -#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
956 +#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
957
958 #ifdef CONFIG_KASAN
959 void kasan_early_init(void);
960 -void kasan_mmu_init(void);
961 void kasan_init(void);
962 #else
963 static inline void kasan_init(void) { }
964 -static inline void kasan_mmu_init(void) { }
965 #endif
966
967 #endif /* __ASSEMBLY */
968 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
969 index 05606025a131..3551f11accf0 100644
970 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
971 +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
972 @@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
973 {
974 u64 lpcr;
975
976 + /*
977 + * Linux relies on FSCR[DSCR] being clear, so that we can take the
978 + * facility unavailable interrupt and track the task's usage of DSCR.
979 + * See facility_unavailable_exception().
980 + * Clear the bit here so that feat_enable() doesn't set it.
981 + */
982 + f->fscr_bit_nr = -1;
983 +
984 feat_enable(f);
985
986 lpcr = mfspr(SPRN_LPCR);
987 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
988 index ed59855430b9..9b522152d8f0 100644
989 --- a/arch/powerpc/kernel/fadump.c
990 +++ b/arch/powerpc/kernel/fadump.c
991 @@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base);
992
993 #ifndef CONFIG_PRESERVE_FA_DUMP
994 static DEFINE_MUTEX(fadump_mutex);
995 -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
996 -struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
997 +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
998 +
999 +#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
1000 +#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
1001 + sizeof(struct fadump_memory_range))
1002 +static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
1003 +struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
1004 + RESERVED_RNGS_SZ, 0,
1005 + RESERVED_RNGS_CNT, true };
1006 +
1007 +static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
1008
1009 #ifdef CONFIG_CMA
1010 static struct cma *fadump_cma;
1011 @@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; }
1012 int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
1013 int depth, void *data)
1014 {
1015 + if (depth == 0) {
1016 + early_init_dt_scan_reserved_ranges(node);
1017 + return 0;
1018 + }
1019 +
1020 if (depth != 1)
1021 return 0;
1022
1023 @@ -429,10 +443,72 @@ static int __init fadump_get_boot_mem_regions(void)
1024 return ret;
1025 }
1026
1027 +/*
1028 + * Returns true, if the given range overlaps with reserved memory ranges
1029 + * starting at idx. Also, updates idx to index of overlapping memory range
1030 + * with the given memory range.
1031 + * False, otherwise.
1032 + */
1033 +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
1034 +{
1035 + bool ret = false;
1036 + int i;
1037 +
1038 + for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
1039 + u64 rbase = reserved_mrange_info.mem_ranges[i].base;
1040 + u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
1041 +
1042 + if (end <= rbase)
1043 + break;
1044 +
1045 + if ((end > rbase) && (base < rend)) {
1046 + *idx = i;
1047 + ret = true;
1048 + break;
1049 + }
1050 + }
1051 +
1052 + return ret;
1053 +}
1054 +
1055 +/*
1056 + * Locate a suitable memory area to reserve memory for FADump. While at it,
1057 + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
1058 + */
1059 +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
1060 +{
1061 + struct fadump_memory_range *mrngs;
1062 + phys_addr_t mstart, mend;
1063 + int idx = 0;
1064 + u64 i, ret = 0;
1065 +
1066 + mrngs = reserved_mrange_info.mem_ranges;
1067 + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
1068 + &mstart, &mend, NULL) {
1069 + pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
1070 + i, mstart, mend, base);
1071 +
1072 + if (mstart > base)
1073 + base = PAGE_ALIGN(mstart);
1074 +
1075 + while ((mend > base) && ((mend - base) >= size)) {
1076 + if (!overlaps_reserved_ranges(base, base+size, &idx)) {
1077 + ret = base;
1078 + goto out;
1079 + }
1080 +
1081 + base = mrngs[idx].base + mrngs[idx].size;
1082 + base = PAGE_ALIGN(base);
1083 + }
1084 + }
1085 +
1086 +out:
1087 + return ret;
1088 +}
1089 +
1090 int __init fadump_reserve_mem(void)
1091 {
1092 - u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
1093 - bool is_memblock_bottom_up = memblock_bottom_up();
1094 + u64 base, size, mem_boundary, bootmem_min;
1095 int ret = 1;
1096
1097 if (!fw_dump.fadump_enabled)
1098 @@ -453,9 +529,9 @@ int __init fadump_reserve_mem(void)
1099 PAGE_ALIGN(fadump_calculate_reserve_size());
1100 #ifdef CONFIG_CMA
1101 if (!fw_dump.nocma) {
1102 - align = FADUMP_CMA_ALIGNMENT;
1103 fw_dump.boot_memory_size =
1104 - ALIGN(fw_dump.boot_memory_size, align);
1105 + ALIGN(fw_dump.boot_memory_size,
1106 + FADUMP_CMA_ALIGNMENT);
1107 }
1108 #endif
1109
1110 @@ -523,13 +599,9 @@ int __init fadump_reserve_mem(void)
1111 * Reserve memory at an offset closer to bottom of the RAM to
1112 * minimize the impact of memory hot-remove operation.
1113 */
1114 - memblock_set_bottom_up(true);
1115 - base = memblock_find_in_range(base, mem_boundary, size, align);
1116 + base = fadump_locate_reserve_mem(base, size);
1117
1118 - /* Restore the previous allocation mode */
1119 - memblock_set_bottom_up(is_memblock_bottom_up);
1120 -
1121 - if (!base) {
1122 + if (!base || (base + size > mem_boundary)) {
1123 pr_err("Failed to find memory chunk for reservation!\n");
1124 goto error_out;
1125 }
1126 @@ -726,10 +798,14 @@ void fadump_free_cpu_notes_buf(void)
1127
1128 static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
1129 {
1130 + if (mrange_info->is_static) {
1131 + mrange_info->mem_range_cnt = 0;
1132 + return;
1133 + }
1134 +
1135 kfree(mrange_info->mem_ranges);
1136 - mrange_info->mem_ranges = NULL;
1137 - mrange_info->mem_ranges_sz = 0;
1138 - mrange_info->max_mem_ranges = 0;
1139 + memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
1140 + (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
1141 }
1142
1143 /*
1144 @@ -786,6 +862,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
1145 if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
1146 int ret;
1147
1148 + if (mrange_info->is_static) {
1149 + pr_err("Reached array size limit for %s memory ranges\n",
1150 + mrange_info->name);
1151 + return -ENOSPC;
1152 + }
1153 +
1154 ret = fadump_alloc_mem_ranges(mrange_info);
1155 if (ret)
1156 return ret;
1157 @@ -1202,20 +1284,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
1158 * Scan reserved-ranges to consider them while reserving/releasing
1159 * memory for FADump.
1160 */
1161 -static inline int fadump_scan_reserved_mem_ranges(void)
1162 +static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
1163 {
1164 - struct device_node *root;
1165 const __be32 *prop;
1166 int len, ret = -1;
1167 unsigned long i;
1168
1169 - root = of_find_node_by_path("/");
1170 - if (!root)
1171 - return ret;
1172 + /* reserved-ranges already scanned */
1173 + if (reserved_mrange_info.mem_range_cnt != 0)
1174 + return;
1175
1176 - prop = of_get_property(root, "reserved-ranges", &len);
1177 + prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
1178 if (!prop)
1179 - return ret;
1180 + return;
1181
1182 /*
1183 * Each reserved range is an (address,size) pair, 2 cells each,
1184 @@ -1237,7 +1318,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
1185 }
1186 }
1187
1188 - return ret;
1189 + /* Compact reserved ranges */
1190 + sort_and_merge_mem_ranges(&reserved_mrange_info);
1191 }
1192
1193 /*
1194 @@ -1251,32 +1333,21 @@ static void fadump_release_memory(u64 begin, u64 end)
1195 u64 ra_start, ra_end, tstart;
1196 int i, ret;
1197
1198 - fadump_scan_reserved_mem_ranges();
1199 -
1200 ra_start = fw_dump.reserve_dump_area_start;
1201 ra_end = ra_start + fw_dump.reserve_dump_area_size;
1202
1203 /*
1204 - * Add reserved dump area to reserved ranges list
1205 - * and exclude all these ranges while releasing memory.
1206 + * If reserved ranges array limit is hit, overwrite the last reserved
1207 + * memory range with reserved dump area to ensure it is excluded from
1208 + * the memory being released (reused for next FADump registration).
1209 */
1210 - ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
1211 - if (ret != 0) {
1212 - /*
1213 - * Not enough memory to setup reserved ranges but the system is
1214 - * running shortage of memory. So, release all the memory except
1215 - * Reserved dump area (reused for next fadump registration).
1216 - */
1217 - if (begin < ra_end && end > ra_start) {
1218 - if (begin < ra_start)
1219 - fadump_release_reserved_area(begin, ra_start);
1220 - if (end > ra_end)
1221 - fadump_release_reserved_area(ra_end, end);
1222 - } else
1223 - fadump_release_reserved_area(begin, end);
1224 + if (reserved_mrange_info.mem_range_cnt ==
1225 + reserved_mrange_info.max_mem_ranges)
1226 + reserved_mrange_info.mem_range_cnt--;
1227
1228 + ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
1229 + if (ret != 0)
1230 return;
1231 - }
1232
1233 /* Get the reserved ranges list in order first. */
1234 sort_and_merge_mem_ranges(&reserved_mrange_info);
1235 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
1236 index 6620f37abe73..e13e96e665e0 100644
1237 --- a/arch/powerpc/kernel/prom.c
1238 +++ b/arch/powerpc/kernel/prom.c
1239 @@ -685,6 +685,23 @@ static void __init tm_init(void)
1240 static void tm_init(void) { }
1241 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1242
1243 +#ifdef CONFIG_PPC64
1244 +static void __init save_fscr_to_task(void)
1245 +{
1246 + /*
1247 + * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
1248 + * have configured via the device tree features or via __init_FSCR().
1249 + * That value will then be propagated to pid 1 (init) and all future
1250 + * processes.
1251 + */
1252 + if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
1253 + init_task.thread.fscr = mfspr(SPRN_FSCR);
1254 +}
1255 +#else
1256 +static inline void save_fscr_to_task(void) {};
1257 +#endif
1258 +
1259 +
1260 void __init early_init_devtree(void *params)
1261 {
1262 phys_addr_t limit;
1263 @@ -773,6 +790,8 @@ void __init early_init_devtree(void *params)
1264 BUG();
1265 }
1266
1267 + save_fscr_to_task();
1268 +
1269 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
1270 /* We'll later wait for secondaries to check in; there are
1271 * NCPUS-1 non-boot CPUs :-)
1272 diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
1273 index b04896a88d79..68f7446193d1 100644
1274 --- a/arch/powerpc/mm/init_32.c
1275 +++ b/arch/powerpc/mm/init_32.c
1276 @@ -175,8 +175,6 @@ void __init MMU_init(void)
1277 btext_unmap();
1278 #endif
1279
1280 - kasan_mmu_init();
1281 -
1282 setup_kup();
1283
1284 /* Shortly after that, the entire linear mapping will be available */
1285 diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
1286 index 1cfe57b51d7e..b01d4b72eccf 100644
1287 --- a/arch/powerpc/mm/kasan/kasan_init_32.c
1288 +++ b/arch/powerpc/mm/kasan/kasan_init_32.c
1289 @@ -129,7 +129,7 @@ static void __init kasan_remap_early_shadow_ro(void)
1290 flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
1291 }
1292
1293 -void __init kasan_mmu_init(void)
1294 +static void __init kasan_mmu_init(void)
1295 {
1296 int ret;
1297 struct memblock_region *reg;
1298 @@ -156,6 +156,8 @@ void __init kasan_mmu_init(void)
1299
1300 void __init kasan_init(void)
1301 {
1302 + kasan_mmu_init();
1303 +
1304 kasan_remap_early_shadow_ro();
1305
1306 clear_page(kasan_early_shadow_page);
1307 diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
1308 index 784cae9f5697..da9f722d9f16 100644
1309 --- a/arch/powerpc/mm/pgtable_32.c
1310 +++ b/arch/powerpc/mm/pgtable_32.c
1311 @@ -207,7 +207,7 @@ void mark_initmem_nx(void)
1312 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
1313 PFN_DOWN((unsigned long)_sinittext);
1314
1315 - if (v_block_mapped((unsigned long)_stext + 1))
1316 + if (v_block_mapped((unsigned long)_sinittext))
1317 mmu_mark_initmem_nx();
1318 else
1319 change_page_attr(page, numpages, PAGE_KERNEL);
1320 @@ -219,7 +219,7 @@ void mark_rodata_ro(void)
1321 struct page *page;
1322 unsigned long numpages;
1323
1324 - if (v_block_mapped((unsigned long)_sinittext)) {
1325 + if (v_block_mapped((unsigned long)_stext + 1)) {
1326 mmu_mark_rodata_ro();
1327 ptdump_check_wx();
1328 return;
1329 diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
1330 index c0f950a3f4e1..f4a4dfb191e7 100644
1331 --- a/arch/powerpc/platforms/cell/spufs/file.c
1332 +++ b/arch/powerpc/platforms/cell/spufs/file.c
1333 @@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1334 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1335 size_t len, loff_t *pos)
1336 {
1337 - int ret;
1338 struct spu_context *ctx = file->private_data;
1339 + u32 stat, data;
1340 + int ret;
1341
1342 if (!access_ok(buf, len))
1343 return -EFAULT;
1344 @@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1345 if (ret)
1346 return ret;
1347 spin_lock(&ctx->csa.register_lock);
1348 - ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1349 + stat = ctx->csa.prob.mb_stat_R;
1350 + data = ctx->csa.prob.pu_mb_R;
1351 spin_unlock(&ctx->csa.register_lock);
1352 spu_release_saved(ctx);
1353
1354 - return ret;
1355 + /* EOF if there's no entry in the mbox */
1356 + if (!(stat & 0x0000ff))
1357 + return 0;
1358 +
1359 + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
1360 }
1361
1362 static const struct file_operations spufs_mbox_info_fops = {
1363 @@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1364 size_t len, loff_t *pos)
1365 {
1366 struct spu_context *ctx = file->private_data;
1367 + u32 stat, data;
1368 int ret;
1369
1370 if (!access_ok(buf, len))
1371 @@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1372 if (ret)
1373 return ret;
1374 spin_lock(&ctx->csa.register_lock);
1375 - ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1376 + stat = ctx->csa.prob.mb_stat_R;
1377 + data = ctx->csa.priv2.puint_mb_R;
1378 spin_unlock(&ctx->csa.register_lock);
1379 spu_release_saved(ctx);
1380
1381 - return ret;
1382 + /* EOF if there's no entry in the ibox */
1383 + if (!(stat & 0xff0000))
1384 + return 0;
1385 +
1386 + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
1387 }
1388
1389 static const struct file_operations spufs_ibox_info_fops = {
1390 @@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = {
1391 .llseek = generic_file_llseek,
1392 };
1393
1394 +static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
1395 +{
1396 + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
1397 +}
1398 +
1399 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1400 char __user *buf, size_t len, loff_t *pos)
1401 {
1402 @@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1403 u32 wbox_stat;
1404
1405 wbox_stat = ctx->csa.prob.mb_stat_R;
1406 - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1407 + cnt = spufs_wbox_info_cnt(ctx);
1408 for (i = 0; i < cnt; i++) {
1409 data[i] = ctx->csa.spu_mailbox_data[i];
1410 }
1411 @@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1412 size_t len, loff_t *pos)
1413 {
1414 struct spu_context *ctx = file->private_data;
1415 - int ret;
1416 + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
1417 + int ret, count;
1418
1419 if (!access_ok(buf, len))
1420 return -EFAULT;
1421 @@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1422 if (ret)
1423 return ret;
1424 spin_lock(&ctx->csa.register_lock);
1425 - ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1426 + count = spufs_wbox_info_cnt(ctx);
1427 + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
1428 spin_unlock(&ctx->csa.register_lock);
1429 spu_release_saved(ctx);
1430
1431 - return ret;
1432 + return simple_read_from_buffer(buf, len, pos, &data,
1433 + count * sizeof(u32));
1434 }
1435
1436 static const struct file_operations spufs_wbox_info_fops = {
1437 @@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = {
1438 .llseek = generic_file_llseek,
1439 };
1440
1441 -static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1442 - char __user *buf, size_t len, loff_t *pos)
1443 +static void spufs_get_dma_info(struct spu_context *ctx,
1444 + struct spu_dma_info *info)
1445 {
1446 - struct spu_dma_info info;
1447 - struct mfc_cq_sr *qp, *spuqp;
1448 int i;
1449
1450 - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1451 - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1452 - info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1453 - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1454 - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1455 + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1456 + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1457 + info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
1458 + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1459 + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1460 for (i = 0; i < 16; i++) {
1461 - qp = &info.dma_info_command_data[i];
1462 - spuqp = &ctx->csa.priv2.spuq[i];
1463 + struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
1464 + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
1465
1466 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1467 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1468 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1469 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1470 }
1471 +}
1472 +
1473 +static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1474 + char __user *buf, size_t len, loff_t *pos)
1475 +{
1476 + struct spu_dma_info info;
1477 +
1478 + spufs_get_dma_info(ctx, &info);
1479
1480 return simple_read_from_buffer(buf, len, pos, &info,
1481 sizeof info);
1482 @@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1483 size_t len, loff_t *pos)
1484 {
1485 struct spu_context *ctx = file->private_data;
1486 + struct spu_dma_info info;
1487 int ret;
1488
1489 if (!access_ok(buf, len))
1490 @@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1491 if (ret)
1492 return ret;
1493 spin_lock(&ctx->csa.register_lock);
1494 - ret = __spufs_dma_info_read(ctx, buf, len, pos);
1495 + spufs_get_dma_info(ctx, &info);
1496 spin_unlock(&ctx->csa.register_lock);
1497 spu_release_saved(ctx);
1498
1499 - return ret;
1500 + return simple_read_from_buffer(buf, len, pos, &info,
1501 + sizeof(info));
1502 }
1503
1504 static const struct file_operations spufs_dma_info_fops = {
1505 @@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = {
1506 .llseek = no_llseek,
1507 };
1508
1509 +static void spufs_get_proxydma_info(struct spu_context *ctx,
1510 + struct spu_proxydma_info *info)
1511 +{
1512 + int i;
1513 +
1514 + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1515 + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1516 + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1517 +
1518 + for (i = 0; i < 8; i++) {
1519 + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
1520 + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
1521 +
1522 + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1523 + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1524 + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1525 + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1526 + }
1527 +}
1528 +
1529 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1530 char __user *buf, size_t len, loff_t *pos)
1531 {
1532 struct spu_proxydma_info info;
1533 - struct mfc_cq_sr *qp, *puqp;
1534 int ret = sizeof info;
1535 - int i;
1536
1537 if (len < ret)
1538 return -EINVAL;
1539 @@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1540 if (!access_ok(buf, len))
1541 return -EFAULT;
1542
1543 - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1544 - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1545 - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1546 - for (i = 0; i < 8; i++) {
1547 - qp = &info.proxydma_info_command_data[i];
1548 - puqp = &ctx->csa.priv2.puq[i];
1549 -
1550 - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1551 - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1552 - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1553 - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1554 - }
1555 + spufs_get_proxydma_info(ctx, &info);
1556
1557 return simple_read_from_buffer(buf, len, pos, &info,
1558 sizeof info);
1559 @@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1560 size_t len, loff_t *pos)
1561 {
1562 struct spu_context *ctx = file->private_data;
1563 + struct spu_proxydma_info info;
1564 int ret;
1565
1566 ret = spu_acquire_saved(ctx);
1567 if (ret)
1568 return ret;
1569 spin_lock(&ctx->csa.register_lock);
1570 - ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1571 + spufs_get_proxydma_info(ctx, &info);
1572 spin_unlock(&ctx->csa.register_lock);
1573 spu_release_saved(ctx);
1574
1575 - return ret;
1576 + return simple_read_from_buffer(buf, len, pos, &info,
1577 + sizeof(info));
1578 }
1579
1580 static const struct file_operations spufs_proxydma_info_fops = {
1581 diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
1582 index 13e251699346..b2ba3e95bda7 100644
1583 --- a/arch/powerpc/platforms/powernv/smp.c
1584 +++ b/arch/powerpc/platforms/powernv/smp.c
1585 @@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void)
1586 /* Standard hot unplug procedure */
1587
1588 idle_task_exit();
1589 - current->active_mm = NULL; /* for sanity */
1590 cpu = smp_processor_id();
1591 DBG("CPU%d offline\n", cpu);
1592 generic_set_cpu_dead(cpu);
1593 diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
1594 index 16b50afe7b52..60f7205ebe40 100644
1595 --- a/arch/sparc/kernel/ptrace_32.c
1596 +++ b/arch/sparc/kernel/ptrace_32.c
1597 @@ -46,82 +46,79 @@ enum sparc_regset {
1598 REGSET_FP,
1599 };
1600
1601 +static int regwindow32_get(struct task_struct *target,
1602 + const struct pt_regs *regs,
1603 + u32 *uregs)
1604 +{
1605 + unsigned long reg_window = regs->u_regs[UREG_I6];
1606 + int size = 16 * sizeof(u32);
1607 +
1608 + if (target == current) {
1609 + if (copy_from_user(uregs, (void __user *)reg_window, size))
1610 + return -EFAULT;
1611 + } else {
1612 + if (access_process_vm(target, reg_window, uregs, size,
1613 + FOLL_FORCE) != size)
1614 + return -EFAULT;
1615 + }
1616 + return 0;
1617 +}
1618 +
1619 +static int regwindow32_set(struct task_struct *target,
1620 + const struct pt_regs *regs,
1621 + u32 *uregs)
1622 +{
1623 + unsigned long reg_window = regs->u_regs[UREG_I6];
1624 + int size = 16 * sizeof(u32);
1625 +
1626 + if (target == current) {
1627 + if (copy_to_user((void __user *)reg_window, uregs, size))
1628 + return -EFAULT;
1629 + } else {
1630 + if (access_process_vm(target, reg_window, uregs, size,
1631 + FOLL_FORCE | FOLL_WRITE) != size)
1632 + return -EFAULT;
1633 + }
1634 + return 0;
1635 +}
1636 +
1637 static int genregs32_get(struct task_struct *target,
1638 const struct user_regset *regset,
1639 unsigned int pos, unsigned int count,
1640 void *kbuf, void __user *ubuf)
1641 {
1642 const struct pt_regs *regs = target->thread.kregs;
1643 - unsigned long __user *reg_window;
1644 - unsigned long *k = kbuf;
1645 - unsigned long __user *u = ubuf;
1646 - unsigned long reg;
1647 + u32 uregs[16];
1648 + int ret;
1649
1650 if (target == current)
1651 flush_user_windows();
1652
1653 - pos /= sizeof(reg);
1654 - count /= sizeof(reg);
1655 -
1656 - if (kbuf) {
1657 - for (; count > 0 && pos < 16; count--)
1658 - *k++ = regs->u_regs[pos++];
1659 -
1660 - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
1661 - reg_window -= 16;
1662 - for (; count > 0 && pos < 32; count--) {
1663 - if (get_user(*k++, &reg_window[pos++]))
1664 - return -EFAULT;
1665 - }
1666 - } else {
1667 - for (; count > 0 && pos < 16; count--) {
1668 - if (put_user(regs->u_regs[pos++], u++))
1669 - return -EFAULT;
1670 - }
1671 -
1672 - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
1673 - reg_window -= 16;
1674 - for (; count > 0 && pos < 32; count--) {
1675 - if (get_user(reg, &reg_window[pos++]) ||
1676 - put_user(reg, u++))
1677 - return -EFAULT;
1678 - }
1679 - }
1680 - while (count > 0) {
1681 - switch (pos) {
1682 - case 32: /* PSR */
1683 - reg = regs->psr;
1684 - break;
1685 - case 33: /* PC */
1686 - reg = regs->pc;
1687 - break;
1688 - case 34: /* NPC */
1689 - reg = regs->npc;
1690 - break;
1691 - case 35: /* Y */
1692 - reg = regs->y;
1693 - break;
1694 - case 36: /* WIM */
1695 - case 37: /* TBR */
1696 - reg = 0;
1697 - break;
1698 - default:
1699 - goto finish;
1700 - }
1701 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1702 + regs->u_regs,
1703 + 0, 16 * sizeof(u32));
1704 + if (ret || !count)
1705 + return ret;
1706
1707 - if (kbuf)
1708 - *k++ = reg;
1709 - else if (put_user(reg, u++))
1710 + if (pos < 32 * sizeof(u32)) {
1711 + if (regwindow32_get(target, regs, uregs))
1712 return -EFAULT;
1713 - pos++;
1714 - count--;
1715 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1716 + uregs,
1717 + 16 * sizeof(u32), 32 * sizeof(u32));
1718 + if (ret || !count)
1719 + return ret;
1720 }
1721 -finish:
1722 - pos *= sizeof(reg);
1723 - count *= sizeof(reg);
1724
1725 - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1726 - 38 * sizeof(reg), -1);
1727 + uregs[0] = regs->psr;
1728 + uregs[1] = regs->pc;
1729 + uregs[2] = regs->npc;
1730 + uregs[3] = regs->y;
1731 + uregs[4] = 0; /* WIM */
1732 + uregs[5] = 0; /* TBR */
1733 + return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1734 + uregs,
1735 + 32 * sizeof(u32), 38 * sizeof(u32));
1736 }
1737
1738 static int genregs32_set(struct task_struct *target,
1739 @@ -130,82 +127,53 @@ static int genregs32_set(struct task_struct *target,
1740 const void *kbuf, const void __user *ubuf)
1741 {
1742 struct pt_regs *regs = target->thread.kregs;
1743 - unsigned long __user *reg_window;
1744 - const unsigned long *k = kbuf;
1745 - const unsigned long __user *u = ubuf;
1746 - unsigned long reg;
1747 + u32 uregs[16];
1748 + u32 psr;
1749 + int ret;
1750
1751 if (target == current)
1752 flush_user_windows();
1753
1754 - pos /= sizeof(reg);
1755 - count /= sizeof(reg);
1756 -
1757 - if (kbuf) {
1758 - for (; count > 0 && pos < 16; count--)
1759 - regs->u_regs[pos++] = *k++;
1760 -
1761 - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
1762 - reg_window -= 16;
1763 - for (; count > 0 && pos < 32; count--) {
1764 - if (put_user(*k++, &reg_window[pos++]))
1765 - return -EFAULT;
1766 - }
1767 - } else {
1768 - for (; count > 0 && pos < 16; count--) {
1769 - if (get_user(reg, u++))
1770 - return -EFAULT;
1771 - regs->u_regs[pos++] = reg;
1772 - }
1773 -
1774 - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
1775 - reg_window -= 16;
1776 - for (; count > 0 && pos < 32; count--) {
1777 - if (get_user(reg, u++) ||
1778 - put_user(reg, &reg_window[pos++]))
1779 - return -EFAULT;
1780 - }
1781 - }
1782 - while (count > 0) {
1783 - unsigned long psr;
1784 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1785 + regs->u_regs,
1786 + 0, 16 * sizeof(u32));
1787 + if (ret || !count)
1788 + return ret;
1789
1790 - if (kbuf)
1791 - reg = *k++;
1792 - else if (get_user(reg, u++))
1793 + if (pos < 32 * sizeof(u32)) {
1794 + if (regwindow32_get(target, regs, uregs))
1795 return -EFAULT;
1796 -
1797 - switch (pos) {
1798 - case 32: /* PSR */
1799 - psr = regs->psr;
1800 - psr &= ~(PSR_ICC | PSR_SYSCALL);
1801 - psr |= (reg & (PSR_ICC | PSR_SYSCALL));
1802 - regs->psr = psr;
1803 - break;
1804 - case 33: /* PC */
1805 - regs->pc = reg;
1806 - break;
1807 - case 34: /* NPC */
1808 - regs->npc = reg;
1809 - break;
1810 - case 35: /* Y */
1811 - regs->y = reg;
1812 - break;
1813 - case 36: /* WIM */
1814 - case 37: /* TBR */
1815 - break;
1816 - default:
1817 - goto finish;
1818 - }
1819 -
1820 - pos++;
1821 - count--;
1822 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1823 + uregs,
1824 + 16 * sizeof(u32), 32 * sizeof(u32));
1825 + if (ret)
1826 + return ret;
1827 + if (regwindow32_set(target, regs, uregs))
1828 + return -EFAULT;
1829 + if (!count)
1830 + return 0;
1831 }
1832 -finish:
1833 - pos *= sizeof(reg);
1834 - count *= sizeof(reg);
1835 -
1836 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1837 + &psr,
1838 + 32 * sizeof(u32), 33 * sizeof(u32));
1839 + if (ret)
1840 + return ret;
1841 + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
1842 + (psr & (PSR_ICC | PSR_SYSCALL));
1843 + if (!count)
1844 + return 0;
1845 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1846 + &regs->pc,
1847 + 33 * sizeof(u32), 34 * sizeof(u32));
1848 + if (ret || !count)
1849 + return ret;
1850 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1851 + &regs->y,
1852 + 34 * sizeof(u32), 35 * sizeof(u32));
1853 + if (ret || !count)
1854 + return ret;
1855 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
1856 - 38 * sizeof(reg), -1);
1857 + 35 * sizeof(u32), 38 * sizeof(u32));
1858 }
1859
1860 static int fpregs32_get(struct task_struct *target,
1861 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
1862 index c9d41a96468f..3f5930bfab06 100644
1863 --- a/arch/sparc/kernel/ptrace_64.c
1864 +++ b/arch/sparc/kernel/ptrace_64.c
1865 @@ -572,19 +572,13 @@ static int genregs32_get(struct task_struct *target,
1866 for (; count > 0 && pos < 32; count--) {
1867 if (access_process_vm(target,
1868 (unsigned long)
1869 - &reg_window[pos],
1870 + &reg_window[pos++],
1871 &reg, sizeof(reg),
1872 FOLL_FORCE)
1873 != sizeof(reg))
1874 return -EFAULT;
1875 - if (access_process_vm(target,
1876 - (unsigned long) u,
1877 - &reg, sizeof(reg),
1878 - FOLL_FORCE | FOLL_WRITE)
1879 - != sizeof(reg))
1880 + if (put_user(reg, u++))
1881 return -EFAULT;
1882 - pos++;
1883 - u++;
1884 }
1885 }
1886 }
1887 @@ -684,12 +678,7 @@ static int genregs32_set(struct task_struct *target,
1888 }
1889 } else {
1890 for (; count > 0 && pos < 32; count--) {
1891 - if (access_process_vm(target,
1892 - (unsigned long)
1893 - u,
1894 - &reg, sizeof(reg),
1895 - FOLL_FORCE)
1896 - != sizeof(reg))
1897 + if (get_user(reg, u++))
1898 return -EFAULT;
1899 if (access_process_vm(target,
1900 (unsigned long)
1901 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
1902 index 70ffce98c568..d7c0fcc1dbf9 100644
1903 --- a/arch/x86/boot/compressed/head_32.S
1904 +++ b/arch/x86/boot/compressed/head_32.S
1905 @@ -49,16 +49,17 @@
1906 * Position Independent Executable (PIE) so that linker won't optimize
1907 * R_386_GOT32X relocation to its fixed symbol address. Older
1908 * linkers generate R_386_32 relocations against locally defined symbols,
1909 - * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
1910 + * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less
1911 * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
1912 * R_386_32 relocations when relocating the kernel. To generate
1913 - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
1914 + * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
1915 * hidden:
1916 */
1917 .hidden _bss
1918 .hidden _ebss
1919 .hidden _got
1920 .hidden _egot
1921 + .hidden _end
1922
1923 __HEAD
1924 ENTRY(startup_32)
1925 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
1926 index 07d2002da642..50c9eeb36f0d 100644
1927 --- a/arch/x86/boot/compressed/head_64.S
1928 +++ b/arch/x86/boot/compressed/head_64.S
1929 @@ -42,6 +42,7 @@
1930 .hidden _ebss
1931 .hidden _got
1932 .hidden _egot
1933 + .hidden _end
1934
1935 __HEAD
1936 .code32
1937 diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
1938 index 27c47d183f4b..8b58d6975d5d 100644
1939 --- a/arch/x86/include/asm/smap.h
1940 +++ b/arch/x86/include/asm/smap.h
1941 @@ -57,8 +57,10 @@ static __always_inline unsigned long smap_save(void)
1942 {
1943 unsigned long flags;
1944
1945 - asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
1946 - X86_FEATURE_SMAP)
1947 + asm volatile ("# smap_save\n\t"
1948 + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
1949 + "pushf; pop %0; " __ASM_CLAC "\n\t"
1950 + "1:"
1951 : "=rm" (flags) : : "memory", "cc");
1952
1953 return flags;
1954 @@ -66,7 +68,10 @@ static __always_inline unsigned long smap_save(void)
1955
1956 static __always_inline void smap_restore(unsigned long flags)
1957 {
1958 - asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
1959 + asm volatile ("# smap_restore\n\t"
1960 + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
1961 + "push %0; popf\n\t"
1962 + "1:"
1963 : : "g" (flags) : "memory", "cc");
1964 }
1965
1966 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1967 index 251c795b4eb3..c4bc01da820e 100644
1968 --- a/arch/x86/kernel/amd_nb.c
1969 +++ b/arch/x86/kernel/amd_nb.c
1970 @@ -18,10 +18,13 @@
1971 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
1972 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
1973 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
1974 +#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
1975 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
1976 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
1977 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
1978 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
1979 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
1980 +#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
1981
1982 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
1983 static DEFINE_MUTEX(smn_mutex);
1984 @@ -32,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = {
1985 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
1986 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
1987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
1988 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
1989 {}
1990 };
1991
1992 @@ -50,8 +54,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
1993 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
1994 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
1995 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
1996 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
1997 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
1998 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
1999 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
2000 {}
2001 };
2002 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
2003 @@ -65,7 +71,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
2004 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
2005 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
2006 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
2007 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
2008 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
2009 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
2010 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
2011 {}
2012 };
2013 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
2014 index 12df3a4abfdd..6b32ab009c19 100644
2015 --- a/arch/x86/kernel/irq_64.c
2016 +++ b/arch/x86/kernel/irq_64.c
2017 @@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu)
2018 pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
2019 }
2020
2021 - va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
2022 + va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
2023 if (!va)
2024 return -ENOMEM;
2025
2026 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
2027 index fd10d91a6115..af352e228fa2 100644
2028 --- a/arch/x86/mm/init.c
2029 +++ b/arch/x86/mm/init.c
2030 @@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num)
2031 } else {
2032 pfn = pgt_buf_end;
2033 pgt_buf_end += num;
2034 - printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
2035 - pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
2036 }
2037
2038 for (i = 0; i < num; i++) {
2039 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
2040 index d083f7704082..4d2bda812d9b 100644
2041 --- a/block/blk-iocost.c
2042 +++ b/block/blk-iocost.c
2043 @@ -1546,19 +1546,39 @@ skip_surplus_transfers:
2044 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2045 missed_ppm[READ] > ppm_rthr ||
2046 missed_ppm[WRITE] > ppm_wthr) {
2047 + /* clearly missing QoS targets, slow down vrate */
2048 ioc->busy_level = max(ioc->busy_level, 0);
2049 ioc->busy_level++;
2050 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2051 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2052 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2053 - /* take action iff there is contention */
2054 - if (nr_shortages && !nr_lagging) {
2055 + /* QoS targets are being met with >25% margin */
2056 + if (nr_shortages) {
2057 + /*
2058 + * We're throttling while the device has spare
2059 + * capacity. If vrate was being slowed down, stop.
2060 + */
2061 ioc->busy_level = min(ioc->busy_level, 0);
2062 - /* redistribute surpluses first */
2063 - if (!nr_surpluses)
2064 +
2065 + /*
2066 + * If there are IOs spanning multiple periods, wait
2067 + * them out before pushing the device harder. If
2068 + * there are surpluses, let redistribution work it
2069 + * out first.
2070 + */
2071 + if (!nr_lagging && !nr_surpluses)
2072 ioc->busy_level--;
2073 + } else {
2074 + /*
2075 + * Nobody is being throttled and the users aren't
2076 + * issuing enough IOs to saturate the device. We
2077 + * simply don't know how close the device is to
2078 + * saturation. Coast.
2079 + */
2080 + ioc->busy_level = 0;
2081 }
2082 } else {
2083 + /* inside the hysterisis margin, we're good */
2084 ioc->busy_level = 0;
2085 }
2086
2087 diff --git a/block/blk-mq.c b/block/blk-mq.c
2088 index 757c0fd9f0cc..0550366e25d8 100644
2089 --- a/block/blk-mq.c
2090 +++ b/block/blk-mq.c
2091 @@ -2493,18 +2493,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2092 * If the cpu isn't present, the cpu is mapped to first hctx.
2093 */
2094 for_each_possible_cpu(i) {
2095 - hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
2096 - /* unmapped hw queue can be remapped after CPU topo changed */
2097 - if (!set->tags[hctx_idx] &&
2098 - !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2099 - /*
2100 - * If tags initialization fail for some hctx,
2101 - * that hctx won't be brought online. In this
2102 - * case, remap the current ctx to hctx[0] which
2103 - * is guaranteed to always have tags allocated
2104 - */
2105 - set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
2106 - }
2107
2108 ctx = per_cpu_ptr(q->queue_ctx, i);
2109 for (j = 0; j < set->nr_maps; j++) {
2110 @@ -2513,6 +2501,18 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2111 HCTX_TYPE_DEFAULT, i);
2112 continue;
2113 }
2114 + hctx_idx = set->map[j].mq_map[i];
2115 + /* unmapped hw queue can be remapped after CPU topo changed */
2116 + if (!set->tags[hctx_idx] &&
2117 + !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2118 + /*
2119 + * If tags initialization fail for some hctx,
2120 + * that hctx won't be brought online. In this
2121 + * case, remap the current ctx to hctx[0] which
2122 + * is guaranteed to always have tags allocated
2123 + */
2124 + set->map[j].mq_map[i] = 0;
2125 + }
2126
2127 hctx = blk_mq_map_queue_type(q, j, i);
2128 ctx->hctxs[j] = hctx;
2129 @@ -3304,8 +3304,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2130
2131 prev_nr_hw_queues = set->nr_hw_queues;
2132 set->nr_hw_queues = nr_hw_queues;
2133 - blk_mq_update_queue_map(set);
2134 fallback:
2135 + blk_mq_update_queue_map(set);
2136 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2137 blk_mq_realloc_hw_ctxs(set, q);
2138 if (q->nr_hw_queues != set->nr_hw_queues) {
2139 diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
2140 index 8438e33aa447..fd9028a6bc20 100644
2141 --- a/drivers/acpi/acpica/dsfield.c
2142 +++ b/drivers/acpi/acpica/dsfield.c
2143 @@ -518,13 +518,20 @@ acpi_ds_create_field(union acpi_parse_object *op,
2144 info.region_node = region_node;
2145
2146 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
2147 + if (ACPI_FAILURE(status)) {
2148 + return_ACPI_STATUS(status);
2149 + }
2150 +
2151 if (info.region_node->object->region.space_id ==
2152 - ACPI_ADR_SPACE_PLATFORM_COMM
2153 - && !(region_node->object->field.internal_pcc_buffer =
2154 - ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
2155 - length))) {
2156 - return_ACPI_STATUS(AE_NO_MEMORY);
2157 + ACPI_ADR_SPACE_PLATFORM_COMM) {
2158 + region_node->object->field.internal_pcc_buffer =
2159 + ACPI_ALLOCATE_ZEROED(info.region_node->object->region.
2160 + length);
2161 + if (!region_node->object->field.internal_pcc_buffer) {
2162 + return_ACPI_STATUS(AE_NO_MEMORY);
2163 + }
2164 }
2165 +
2166 return_ACPI_STATUS(status);
2167 }
2168
2169 diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
2170 index 5a7551d060f2..bc95a5eebd13 100644
2171 --- a/drivers/acpi/arm64/iort.c
2172 +++ b/drivers/acpi/arm64/iort.c
2173 @@ -361,6 +361,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
2174 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
2175 {
2176 struct acpi_iort_smmu_v3 *smmu;
2177 + struct acpi_iort_pmcg *pmcg;
2178
2179 switch (node->type) {
2180 case ACPI_IORT_NODE_SMMU_V3:
2181 @@ -388,6 +389,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
2182
2183 return smmu->id_mapping_index;
2184 case ACPI_IORT_NODE_PMCG:
2185 + pmcg = (struct acpi_iort_pmcg *)node->node_data;
2186 + if (pmcg->overflow_gsiv || node->mapping_count == 0)
2187 + return -EINVAL;
2188 +
2189 return 0;
2190 default:
2191 return -EINVAL;
2192 diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
2193 index 6d7a522952bf..ccd900690b6f 100644
2194 --- a/drivers/acpi/evged.c
2195 +++ b/drivers/acpi/evged.c
2196 @@ -94,7 +94,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
2197 trigger = p->triggering;
2198 } else {
2199 gsi = pext->interrupts[0];
2200 - trigger = p->triggering;
2201 + trigger = pext->triggering;
2202 }
2203
2204 irq = r.start;
2205 diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
2206 index f02a4bdc0ca7..dd29d687cd38 100644
2207 --- a/drivers/bluetooth/btbcm.c
2208 +++ b/drivers/bluetooth/btbcm.c
2209 @@ -329,6 +329,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
2210 { 0x410e, "BCM43341B0" }, /* 002.001.014 */
2211 { 0x4204, "BCM2076B1" }, /* 002.002.004 */
2212 { 0x4406, "BCM4324B3" }, /* 002.004.006 */
2213 + { 0x4606, "BCM4324B5" }, /* 002.006.006 */
2214 { 0x6109, "BCM4335C0" }, /* 003.001.009 */
2215 { 0x610c, "BCM4354" }, /* 003.001.012 */
2216 { 0x2122, "BCM4343A0" }, /* 001.001.034 */
2217 @@ -343,6 +344,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
2218 };
2219
2220 static const struct bcm_subver_table bcm_usb_subver_table[] = {
2221 + { 0x2105, "BCM20703A1" }, /* 001.001.005 */
2222 { 0x210b, "BCM43142A0" }, /* 001.001.011 */
2223 { 0x2112, "BCM4314A0" }, /* 001.001.018 */
2224 { 0x2118, "BCM20702A0" }, /* 001.001.024 */
2225 diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
2226 index e11169ad8247..8a81fbca5c9d 100644
2227 --- a/drivers/bluetooth/btmtkuart.c
2228 +++ b/drivers/bluetooth/btmtkuart.c
2229 @@ -1015,7 +1015,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
2230 if (btmtkuart_is_standalone(bdev)) {
2231 err = clk_prepare_enable(bdev->osc);
2232 if (err < 0)
2233 - return err;
2234 + goto err_hci_free_dev;
2235
2236 if (bdev->boot) {
2237 gpiod_set_value_cansleep(bdev->boot, 1);
2238 @@ -1028,10 +1028,8 @@ static int btmtkuart_probe(struct serdev_device *serdev)
2239
2240 /* Power on */
2241 err = regulator_enable(bdev->vcc);
2242 - if (err < 0) {
2243 - clk_disable_unprepare(bdev->osc);
2244 - return err;
2245 - }
2246 + if (err < 0)
2247 + goto err_clk_disable_unprepare;
2248
2249 /* Reset if the reset-gpios is available otherwise the board
2250 * -level design should be guaranteed.
2251 @@ -1063,7 +1061,6 @@ static int btmtkuart_probe(struct serdev_device *serdev)
2252 err = hci_register_dev(hdev);
2253 if (err < 0) {
2254 dev_err(&serdev->dev, "Can't register HCI device\n");
2255 - hci_free_dev(hdev);
2256 goto err_regulator_disable;
2257 }
2258
2259 @@ -1072,6 +1069,11 @@ static int btmtkuart_probe(struct serdev_device *serdev)
2260 err_regulator_disable:
2261 if (btmtkuart_is_standalone(bdev))
2262 regulator_disable(bdev->vcc);
2263 +err_clk_disable_unprepare:
2264 + if (btmtkuart_is_standalone(bdev))
2265 + clk_disable_unprepare(bdev->osc);
2266 +err_hci_free_dev:
2267 + hci_free_dev(hdev);
2268
2269 return err;
2270 }
2271 diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
2272 index 7646636f2d18..94ed734c1d7e 100644
2273 --- a/drivers/bluetooth/hci_bcm.c
2274 +++ b/drivers/bluetooth/hci_bcm.c
2275 @@ -107,6 +107,7 @@ struct bcm_device {
2276 u32 oper_speed;
2277 int irq;
2278 bool irq_active_low;
2279 + bool irq_acquired;
2280
2281 #ifdef CONFIG_PM
2282 struct hci_uart *hu;
2283 @@ -319,6 +320,8 @@ static int bcm_request_irq(struct bcm_data *bcm)
2284 goto unlock;
2285 }
2286
2287 + bdev->irq_acquired = true;
2288 +
2289 device_init_wakeup(bdev->dev, true);
2290
2291 pm_runtime_set_autosuspend_delay(bdev->dev,
2292 @@ -487,7 +490,7 @@ static int bcm_close(struct hci_uart *hu)
2293 }
2294
2295 if (bdev) {
2296 - if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
2297 + if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) {
2298 devm_free_irq(bdev->dev, bdev->irq, bdev);
2299 device_init_wakeup(bdev->dev, false);
2300 pm_runtime_disable(bdev->dev);
2301 diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
2302 index 76f9cd039195..14e127e9a740 100644
2303 --- a/drivers/clk/mediatek/clk-mux.c
2304 +++ b/drivers/clk/mediatek/clk-mux.c
2305 @@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
2306 spinlock_t *lock)
2307 {
2308 struct mtk_clk_mux *clk_mux;
2309 - struct clk_init_data init;
2310 + struct clk_init_data init = {};
2311 struct clk *clk;
2312
2313 clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
2314 diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
2315 index 654766538f93..10ce69548f1b 100644
2316 --- a/drivers/clocksource/dw_apb_timer.c
2317 +++ b/drivers/clocksource/dw_apb_timer.c
2318 @@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta,
2319 /**
2320 * dw_apb_clockevent_init() - use an APB timer as a clock_event_device
2321 *
2322 - * @cpu: The CPU the events will be targeted at.
2323 + * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation
2324 + * isn't required.
2325 * @name: The name used for the timer and the IRQ for it.
2326 * @rating: The rating to give the timer.
2327 * @base: I/O base for the timer registers.
2328 @@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
2329 dw_ced->ced.max_delta_ticks = 0x7fffffff;
2330 dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
2331 dw_ced->ced.min_delta_ticks = 5000;
2332 - dw_ced->ced.cpumask = cpumask_of(cpu);
2333 + dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu);
2334 dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
2335 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
2336 dw_ced->ced.set_state_shutdown = apbt_shutdown;
2337 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
2338 index 8c28b127759f..6921b91b61ef 100644
2339 --- a/drivers/clocksource/dw_apb_timer_of.c
2340 +++ b/drivers/clocksource/dw_apb_timer_of.c
2341 @@ -147,10 +147,6 @@ static int num_called;
2342 static int __init dw_apb_timer_init(struct device_node *timer)
2343 {
2344 switch (num_called) {
2345 - case 0:
2346 - pr_debug("%s: found clockevent timer\n", __func__);
2347 - add_clockevent(timer);
2348 - break;
2349 case 1:
2350 pr_debug("%s: found clocksource timer\n", __func__);
2351 add_clocksource(timer);
2352 @@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer)
2353 #endif
2354 break;
2355 default:
2356 + pr_debug("%s: found clockevent timer\n", __func__);
2357 + add_clockevent(timer);
2358 break;
2359 }
2360
2361 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
2362 index 2bb2683b493c..f8747322b3c7 100644
2363 --- a/drivers/cpuidle/sysfs.c
2364 +++ b/drivers/cpuidle/sysfs.c
2365 @@ -480,7 +480,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
2366 ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
2367 &kdev->kobj, "state%d", i);
2368 if (ret) {
2369 - kfree(kobj);
2370 + kobject_put(&kobj->kobj);
2371 goto error_state;
2372 }
2373 cpuidle_add_s2idle_attr_group(kobj);
2374 @@ -611,7 +611,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
2375 ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
2376 &kdev->kobj, "driver");
2377 if (ret) {
2378 - kfree(kdrv);
2379 + kobject_put(&kdrv->kobj);
2380 return ret;
2381 }
2382
2383 @@ -705,7 +705,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
2384 error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
2385 "cpuidle");
2386 if (error) {
2387 - kfree(kdev);
2388 + kobject_put(&kdev->kobj);
2389 return error;
2390 }
2391
2392 diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
2393 index 8fec733f567f..63e227adbb13 100644
2394 --- a/drivers/crypto/ccp/Kconfig
2395 +++ b/drivers/crypto/ccp/Kconfig
2396 @@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD
2397 config CRYPTO_DEV_SP_CCP
2398 bool "Cryptographic Coprocessor device"
2399 default y
2400 - depends on CRYPTO_DEV_CCP_DD
2401 + depends on CRYPTO_DEV_CCP_DD && DMADEVICES
2402 select HW_RANDOM
2403 select DMA_ENGINE
2404 - select DMADEVICES
2405 select CRYPTO_SHA1
2406 select CRYPTO_SHA256
2407 help
2408 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
2409 index 01dd418bdadc..fe2eadc0ce83 100644
2410 --- a/drivers/crypto/chelsio/chcr_algo.c
2411 +++ b/drivers/crypto/chelsio/chcr_algo.c
2412 @@ -2818,7 +2818,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2413 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2414 unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2415 unsigned int ccm_xtra;
2416 - unsigned char tag_offset = 0, auth_offset = 0;
2417 + unsigned int tag_offset = 0, auth_offset = 0;
2418 unsigned int assoclen;
2419
2420 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2421 diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
2422 index 9e11c3480353..e68b856d03b6 100644
2423 --- a/drivers/crypto/stm32/stm32-crc32.c
2424 +++ b/drivers/crypto/stm32/stm32-crc32.c
2425 @@ -28,8 +28,10 @@
2426
2427 /* Registers values */
2428 #define CRC_CR_RESET BIT(0)
2429 -#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
2430 -#define CRC_INIT_DEFAULT 0xFFFFFFFF
2431 +#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
2432 +#define CRC_CR_REV_IN_BYTE BIT(5)
2433 +#define CRC_CR_REV_OUT BIT(7)
2434 +#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
2435
2436 #define CRC_AUTOSUSPEND_DELAY 50
2437
2438 @@ -38,8 +40,6 @@ struct stm32_crc {
2439 struct device *dev;
2440 void __iomem *regs;
2441 struct clk *clk;
2442 - u8 pending_data[sizeof(u32)];
2443 - size_t nb_pending_bytes;
2444 };
2445
2446 struct stm32_crc_list {
2447 @@ -59,14 +59,13 @@ struct stm32_crc_ctx {
2448
2449 struct stm32_crc_desc_ctx {
2450 u32 partial; /* crc32c: partial in first 4 bytes of that struct */
2451 - struct stm32_crc *crc;
2452 };
2453
2454 static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
2455 {
2456 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
2457
2458 - mctx->key = CRC_INIT_DEFAULT;
2459 + mctx->key = 0;
2460 mctx->poly = CRC32_POLY_LE;
2461 return 0;
2462 }
2463 @@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
2464 {
2465 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
2466
2467 - mctx->key = CRC_INIT_DEFAULT;
2468 + mctx->key = CRC32C_INIT_DEFAULT;
2469 mctx->poly = CRC32C_POLY_LE;
2470 return 0;
2471 }
2472 @@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
2473 return 0;
2474 }
2475
2476 +static struct stm32_crc *stm32_crc_get_next_crc(void)
2477 +{
2478 + struct stm32_crc *crc;
2479 +
2480 + spin_lock_bh(&crc_list.lock);
2481 + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
2482 + if (crc)
2483 + list_move_tail(&crc->list, &crc_list.dev_list);
2484 + spin_unlock_bh(&crc_list.lock);
2485 +
2486 + return crc;
2487 +}
2488 +
2489 static int stm32_crc_init(struct shash_desc *desc)
2490 {
2491 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
2492 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
2493 struct stm32_crc *crc;
2494
2495 - spin_lock_bh(&crc_list.lock);
2496 - list_for_each_entry(crc, &crc_list.dev_list, list) {
2497 - ctx->crc = crc;
2498 - break;
2499 - }
2500 - spin_unlock_bh(&crc_list.lock);
2501 + crc = stm32_crc_get_next_crc();
2502 + if (!crc)
2503 + return -ENODEV;
2504
2505 - pm_runtime_get_sync(ctx->crc->dev);
2506 + pm_runtime_get_sync(crc->dev);
2507
2508 /* Reset, set key, poly and configure in bit reverse mode */
2509 - writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
2510 - writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
2511 - writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
2512 + writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
2513 + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
2514 + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
2515 + crc->regs + CRC_CR);
2516
2517 /* Store partial result */
2518 - ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
2519 - ctx->crc->nb_pending_bytes = 0;
2520 + ctx->partial = readl_relaxed(crc->regs + CRC_DR);
2521
2522 - pm_runtime_mark_last_busy(ctx->crc->dev);
2523 - pm_runtime_put_autosuspend(ctx->crc->dev);
2524 + pm_runtime_mark_last_busy(crc->dev);
2525 + pm_runtime_put_autosuspend(crc->dev);
2526
2527 return 0;
2528 }
2529 @@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
2530 unsigned int length)
2531 {
2532 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
2533 - struct stm32_crc *crc = ctx->crc;
2534 - u32 *d32;
2535 - unsigned int i;
2536 + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
2537 + struct stm32_crc *crc;
2538 +
2539 + crc = stm32_crc_get_next_crc();
2540 + if (!crc)
2541 + return -ENODEV;
2542
2543 pm_runtime_get_sync(crc->dev);
2544
2545 - if (unlikely(crc->nb_pending_bytes)) {
2546 - while (crc->nb_pending_bytes != sizeof(u32) && length) {
2547 - /* Fill in pending data */
2548 - crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
2549 + /*
2550 + * Restore previously calculated CRC for this context as init value
2551 + * Restore polynomial configuration
2552 + * Configure in register for word input data,
2553 + * Configure out register in reversed bit mode data.
2554 + */
2555 + writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
2556 + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
2557 + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
2558 + crc->regs + CRC_CR);
2559 +
2560 + if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
2561 + /* Configure for byte data */
2562 + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
2563 + crc->regs + CRC_CR);
2564 + while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
2565 + writeb_relaxed(*d8++, crc->regs + CRC_DR);
2566 length--;
2567 }
2568 -
2569 - if (crc->nb_pending_bytes == sizeof(u32)) {
2570 - /* Process completed pending data */
2571 - writel_relaxed(*(u32 *)crc->pending_data,
2572 - crc->regs + CRC_DR);
2573 - crc->nb_pending_bytes = 0;
2574 - }
2575 + /* Configure for word data */
2576 + writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
2577 + crc->regs + CRC_CR);
2578 }
2579
2580 - d32 = (u32 *)d8;
2581 - for (i = 0; i < length >> 2; i++)
2582 - /* Process 32 bits data */
2583 - writel_relaxed(*(d32++), crc->regs + CRC_DR);
2584 + for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
2585 + writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
2586 +
2587 + if (length) {
2588 + /* Configure for byte data */
2589 + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
2590 + crc->regs + CRC_CR);
2591 + while (length--)
2592 + writeb_relaxed(*d8++, crc->regs + CRC_DR);
2593 + }
2594
2595 /* Store partial result */
2596 ctx->partial = readl_relaxed(crc->regs + CRC_DR);
2597 @@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
2598 pm_runtime_mark_last_busy(crc->dev);
2599 pm_runtime_put_autosuspend(crc->dev);
2600
2601 - /* Check for pending data (non 32 bits) */
2602 - length &= 3;
2603 - if (likely(!length))
2604 - return 0;
2605 -
2606 - if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
2607 - /* Shall not happen */
2608 - dev_err(crc->dev, "Pending data overflow\n");
2609 - return -EINVAL;
2610 - }
2611 -
2612 - d8 = (const u8 *)d32;
2613 - for (i = 0; i < length; i++)
2614 - /* Store pending data */
2615 - crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
2616 -
2617 return 0;
2618 }
2619
2620 @@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
2621 return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
2622 }
2623
2624 +static unsigned int refcnt;
2625 +static DEFINE_MUTEX(refcnt_lock);
2626 static struct shash_alg algs[] = {
2627 /* CRC-32 */
2628 {
2629 @@ -294,12 +307,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
2630 list_add(&crc->list, &crc_list.dev_list);
2631 spin_unlock(&crc_list.lock);
2632
2633 - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
2634 - if (ret) {
2635 - dev_err(dev, "Failed to register\n");
2636 - clk_disable_unprepare(crc->clk);
2637 - return ret;
2638 + mutex_lock(&refcnt_lock);
2639 + if (!refcnt) {
2640 + ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
2641 + if (ret) {
2642 + mutex_unlock(&refcnt_lock);
2643 + dev_err(dev, "Failed to register\n");
2644 + clk_disable_unprepare(crc->clk);
2645 + return ret;
2646 + }
2647 }
2648 + refcnt++;
2649 + mutex_unlock(&refcnt_lock);
2650
2651 dev_info(dev, "Initialized\n");
2652
2653 @@ -320,7 +339,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
2654 list_del(&crc->list);
2655 spin_unlock(&crc_list.lock);
2656
2657 - crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
2658 + mutex_lock(&refcnt_lock);
2659 + if (!--refcnt)
2660 + crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
2661 + mutex_unlock(&refcnt_lock);
2662
2663 pm_runtime_disable(crc->dev);
2664 pm_runtime_put_noidle(crc->dev);
2665 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
2666 index cc5e56d752c8..ad7d2bce91cd 100644
2667 --- a/drivers/edac/amd64_edac.c
2668 +++ b/drivers/edac/amd64_edac.c
2669 @@ -2317,6 +2317,15 @@ static struct amd64_family_type family_types[] = {
2670 .dbam_to_cs = f17_addr_mask_to_cs_size,
2671 }
2672 },
2673 + [F17_M60H_CPUS] = {
2674 + .ctl_name = "F17h_M60h",
2675 + .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2676 + .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2677 + .ops = {
2678 + .early_channel_count = f17_early_channel_count,
2679 + .dbam_to_cs = f17_addr_mask_to_cs_size,
2680 + }
2681 + },
2682 [F17_M70H_CPUS] = {
2683 .ctl_name = "F17h_M70h",
2684 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2685 @@ -3366,6 +3375,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2686 fam_type = &family_types[F17_M30H_CPUS];
2687 pvt->ops = &family_types[F17_M30H_CPUS].ops;
2688 break;
2689 + } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
2690 + fam_type = &family_types[F17_M60H_CPUS];
2691 + pvt->ops = &family_types[F17_M60H_CPUS].ops;
2692 + break;
2693 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
2694 fam_type = &family_types[F17_M70H_CPUS];
2695 pvt->ops = &family_types[F17_M70H_CPUS].ops;
2696 diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
2697 index 8c3cda81e619..d280b91f97cb 100644
2698 --- a/drivers/edac/amd64_edac.h
2699 +++ b/drivers/edac/amd64_edac.h
2700 @@ -120,6 +120,8 @@
2701 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
2702 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
2703 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
2704 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
2705 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
2706 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
2707 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
2708
2709 @@ -291,6 +293,7 @@ enum amd_families {
2710 F17_CPUS,
2711 F17_M10H_CPUS,
2712 F17_M30H_CPUS,
2713 + F17_M60H_CPUS,
2714 F17_M70H_CPUS,
2715 NUM_FAMILIES,
2716 };
2717 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
2718 index ee0661ddb25b..8c5b5529dbc0 100644
2719 --- a/drivers/firmware/efi/libstub/Makefile
2720 +++ b/drivers/firmware/efi/libstub/Makefile
2721 @@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
2722 -D__NO_FORTIFY \
2723 $(call cc-option,-ffreestanding) \
2724 $(call cc-option,-fno-stack-protector) \
2725 + $(call cc-option,-fno-addrsig) \
2726 -D__DISABLE_EXPORTS
2727
2728 GCOV_PROFILE := n
2729 diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
2730 index effed3a8d398..2ecb1d3e8eeb 100644
2731 --- a/drivers/gnss/sirf.c
2732 +++ b/drivers/gnss/sirf.c
2733 @@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev)
2734
2735 data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
2736 GPIOD_OUT_LOW);
2737 - if (IS_ERR(data->on_off))
2738 + if (IS_ERR(data->on_off)) {
2739 + ret = PTR_ERR(data->on_off);
2740 goto err_put_device;
2741 + }
2742
2743 if (data->on_off) {
2744 data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
2745 GPIOD_IN);
2746 - if (IS_ERR(data->wakeup))
2747 + if (IS_ERR(data->wakeup)) {
2748 + ret = PTR_ERR(data->wakeup);
2749 goto err_put_device;
2750 + }
2751
2752 ret = regulator_enable(data->vcc);
2753 if (ret)
2754 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2755 index 8ceb44925947..5fa5158d18ee 100644
2756 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2757 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2758 @@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
2759
2760 struct amdgpu_bo_list_entry vm_pd;
2761 struct list_head list, duplicates;
2762 + struct dma_fence *fence = NULL;
2763 struct ttm_validate_buffer tv;
2764 struct ww_acquire_ctx ticket;
2765 struct amdgpu_bo_va *bo_va;
2766 - int r;
2767 + long r;
2768
2769 INIT_LIST_HEAD(&list);
2770 INIT_LIST_HEAD(&duplicates);
2771
2772 tv.bo = &bo->tbo;
2773 - tv.num_shared = 1;
2774 + tv.num_shared = 2;
2775 list_add(&tv.head, &list);
2776
2777 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
2778 @@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
2779 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
2780 if (r) {
2781 dev_err(adev->dev, "leaking bo va because "
2782 - "we fail to reserve bo (%d)\n", r);
2783 + "we fail to reserve bo (%ld)\n", r);
2784 return;
2785 }
2786 bo_va = amdgpu_vm_bo_find(vm, bo);
2787 - if (bo_va && --bo_va->ref_count == 0) {
2788 - amdgpu_vm_bo_rmv(adev, bo_va);
2789 -
2790 - if (amdgpu_vm_ready(vm)) {
2791 - struct dma_fence *fence = NULL;
2792 + if (!bo_va || --bo_va->ref_count)
2793 + goto out_unlock;
2794
2795 - r = amdgpu_vm_clear_freed(adev, vm, &fence);
2796 - if (unlikely(r)) {
2797 - dev_err(adev->dev, "failed to clear page "
2798 - "tables on GEM object close (%d)\n", r);
2799 - }
2800 + amdgpu_vm_bo_rmv(adev, bo_va);
2801 + if (!amdgpu_vm_ready(vm))
2802 + goto out_unlock;
2803
2804 - if (fence) {
2805 - amdgpu_bo_fence(bo, fence, true);
2806 - dma_fence_put(fence);
2807 - }
2808 - }
2809 + fence = dma_resv_get_excl(bo->tbo.base.resv);
2810 + if (fence) {
2811 + amdgpu_bo_fence(bo, fence, true);
2812 + fence = NULL;
2813 }
2814 +
2815 + r = amdgpu_vm_clear_freed(adev, vm, &fence);
2816 + if (r || !fence)
2817 + goto out_unlock;
2818 +
2819 + amdgpu_bo_fence(bo, fence, true);
2820 + dma_fence_put(fence);
2821 +
2822 +out_unlock:
2823 + if (unlikely(r < 0))
2824 + dev_err(adev->dev, "failed to clear page "
2825 + "tables on GEM object close (%ld)\n", r);
2826 ttm_eu_backoff_reservation(&ticket, &list);
2827 }
2828
2829 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2830 index 51263b8d94b1..d1d2372ab7ca 100644
2831 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2832 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2833 @@ -370,6 +370,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
2834 if (current_level == level)
2835 return count;
2836
2837 + if (adev->asic_type == CHIP_RAVEN) {
2838 + if (adev->rev_id < 8) {
2839 + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
2840 + amdgpu_gfx_off_ctrl(adev, false);
2841 + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
2842 + amdgpu_gfx_off_ctrl(adev, true);
2843 + }
2844 + }
2845 +
2846 /* profile_exit setting is valid only when current mode is in profile mode */
2847 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2848 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2849 @@ -416,8 +425,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
2850 ret = smu_get_power_num_states(&adev->smu, &data);
2851 if (ret)
2852 return ret;
2853 - } else if (adev->powerplay.pp_funcs->get_pp_num_states)
2854 + } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
2855 amdgpu_dpm_get_pp_num_states(adev, &data);
2856 + } else {
2857 + memset(&data, 0, sizeof(data));
2858 + }
2859
2860 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
2861 for (i = 0; i < data.nums; i++)
2862 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2863 index c7514f743409..6335bd4ae374 100644
2864 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2865 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2866 @@ -2867,10 +2867,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
2867 WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2868 "CPU update of VM recommended only for large BAR system\n");
2869
2870 - if (vm->use_cpu_for_update)
2871 + if (vm->use_cpu_for_update) {
2872 + /* Sync with last SDMA update/clear before switching to CPU */
2873 + r = amdgpu_bo_sync_wait(vm->root.base.bo,
2874 + AMDGPU_FENCE_OWNER_UNDEFINED, true);
2875 + if (r)
2876 + goto free_idr;
2877 +
2878 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2879 - else
2880 + } else {
2881 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2882 + }
2883 dma_fence_put(vm->last_update);
2884 vm->last_update = NULL;
2885
2886 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
2887 index a428185be2c1..d05b3033b510 100644
2888 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
2889 +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
2890 @@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
2891 {
2892 switch (fs) {
2893 case 32000:
2894 - *n = 4096;
2895 + case 48000:
2896 + case 96000:
2897 + case 192000:
2898 + *n = fs * 128 / 1000;
2899 break;
2900 case 44100:
2901 - *n = 6272;
2902 - break;
2903 - case 48000:
2904 - *n = 6144;
2905 + case 88200:
2906 + case 176400:
2907 + *n = fs * 128 / 900;
2908 break;
2909 }
2910
2911 diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
2912 index 35bb825d1918..8c8c92fc82e9 100644
2913 --- a/drivers/gpu/drm/mcde/mcde_dsi.c
2914 +++ b/drivers/gpu/drm/mcde/mcde_dsi.c
2915 @@ -940,10 +940,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
2916 panel = NULL;
2917
2918 bridge = of_drm_find_bridge(child);
2919 - if (IS_ERR(bridge)) {
2920 - dev_err(dev, "failed to find bridge (%ld)\n",
2921 - PTR_ERR(bridge));
2922 - return PTR_ERR(bridge);
2923 + if (!bridge) {
2924 + dev_err(dev, "failed to find bridge\n");
2925 + return -EINVAL;
2926 }
2927 }
2928 }
2929 diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
2930 index be6d95c5ff25..48de07e9059e 100644
2931 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c
2932 +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
2933 @@ -10,7 +10,9 @@
2934 #include <linux/kernel.h>
2935 #include <linux/of.h>
2936 #include <linux/of_device.h>
2937 +#include <linux/of_gpio.h>
2938 #include <linux/of_graph.h>
2939 +#include <linux/pinctrl/consumer.h>
2940 #include <linux/platform_device.h>
2941 #include <linux/types.h>
2942
2943 @@ -73,6 +75,9 @@ struct mtk_dpi {
2944 enum mtk_dpi_out_yc_map yc_map;
2945 enum mtk_dpi_out_bit_num bit_num;
2946 enum mtk_dpi_out_channel_swap channel_swap;
2947 + struct pinctrl *pinctrl;
2948 + struct pinctrl_state *pins_gpio;
2949 + struct pinctrl_state *pins_dpi;
2950 int refcount;
2951 };
2952
2953 @@ -378,6 +383,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
2954 if (--dpi->refcount != 0)
2955 return;
2956
2957 + if (dpi->pinctrl && dpi->pins_gpio)
2958 + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
2959 +
2960 mtk_dpi_disable(dpi);
2961 clk_disable_unprepare(dpi->pixel_clk);
2962 clk_disable_unprepare(dpi->engine_clk);
2963 @@ -402,6 +410,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
2964 goto err_pixel;
2965 }
2966
2967 + if (dpi->pinctrl && dpi->pins_dpi)
2968 + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
2969 +
2970 mtk_dpi_enable(dpi);
2971 return 0;
2972
2973 @@ -689,6 +700,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
2974 dpi->dev = dev;
2975 dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
2976
2977 + dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
2978 + if (IS_ERR(dpi->pinctrl)) {
2979 + dpi->pinctrl = NULL;
2980 + dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
2981 + }
2982 + if (dpi->pinctrl) {
2983 + dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
2984 + if (IS_ERR(dpi->pins_gpio)) {
2985 + dpi->pins_gpio = NULL;
2986 + dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
2987 + }
2988 + if (dpi->pins_gpio)
2989 + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
2990 +
2991 + dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
2992 + if (IS_ERR(dpi->pins_dpi)) {
2993 + dpi->pins_dpi = NULL;
2994 + dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
2995 + }
2996 + }
2997 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2998 dpi->regs = devm_ioremap_resource(dev, mem);
2999 if (IS_ERR(dpi->regs)) {
3000 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
3001 index c6430027169f..a0021fc25b27 100644
3002 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
3003 +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
3004 @@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
3005
3006 drm_plane_create_alpha_property(&plane->plane);
3007
3008 - if (type == DRM_PLANE_TYPE_PRIMARY)
3009 - continue;
3010 -
3011 - drm_object_attach_property(&plane->plane.base,
3012 - rcdu->props.colorkey,
3013 - RCAR_DU_COLORKEY_NONE);
3014 - drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
3015 + if (type == DRM_PLANE_TYPE_PRIMARY) {
3016 + drm_plane_create_zpos_immutable_property(&plane->plane,
3017 + 0);
3018 + } else {
3019 + drm_object_attach_property(&plane->plane.base,
3020 + rcdu->props.colorkey,
3021 + RCAR_DU_COLORKEY_NONE);
3022 + drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
3023 + }
3024 }
3025
3026 return 0;
3027 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
3028 index 5e4faf258c31..f1a81c9b184d 100644
3029 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
3030 +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
3031 @@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
3032 drm_plane_helper_add(&plane->plane,
3033 &rcar_du_vsp_plane_helper_funcs);
3034
3035 - if (type == DRM_PLANE_TYPE_PRIMARY)
3036 - continue;
3037 -
3038 - drm_plane_create_alpha_property(&plane->plane);
3039 - drm_plane_create_zpos_property(&plane->plane, 1, 1,
3040 - vsp->num_planes - 1);
3041 + if (type == DRM_PLANE_TYPE_PRIMARY) {
3042 + drm_plane_create_zpos_immutable_property(&plane->plane,
3043 + 0);
3044 + } else {
3045 + drm_plane_create_alpha_property(&plane->plane);
3046 + drm_plane_create_zpos_property(&plane->plane, 1, 1,
3047 + vsp->num_planes - 1);
3048 + }
3049 }
3050
3051 return 0;
3052 diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
3053 index 6e4c015783ff..c90d79096e8c 100644
3054 --- a/drivers/hv/connection.c
3055 +++ b/drivers/hv/connection.c
3056 @@ -67,7 +67,6 @@ static __u32 vmbus_get_next_version(__u32 current_version)
3057 int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
3058 {
3059 int ret = 0;
3060 - unsigned int cur_cpu;
3061 struct vmbus_channel_initiate_contact *msg;
3062 unsigned long flags;
3063
3064 @@ -100,24 +99,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
3065
3066 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
3067 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
3068 - /*
3069 - * We want all channel messages to be delivered on CPU 0.
3070 - * This has been the behavior pre-win8. This is not
3071 - * perf issue and having all channel messages delivered on CPU 0
3072 - * would be ok.
3073 - * For post win8 hosts, we support receiving channel messagges on
3074 - * all the CPUs. This is needed for kexec to work correctly where
3075 - * the CPU attempting to connect may not be CPU 0.
3076 - */
3077 - if (version >= VERSION_WIN8_1) {
3078 - cur_cpu = get_cpu();
3079 - msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
3080 - vmbus_connection.connect_cpu = cur_cpu;
3081 - put_cpu();
3082 - } else {
3083 - msg->target_vcpu = 0;
3084 - vmbus_connection.connect_cpu = 0;
3085 - }
3086 + msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
3087
3088 /*
3089 * Add to list before we send the request since we may
3090 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
3091 index fcc52797c169..d6320022af15 100644
3092 --- a/drivers/hv/hv.c
3093 +++ b/drivers/hv/hv.c
3094 @@ -249,6 +249,13 @@ int hv_synic_cleanup(unsigned int cpu)
3095 bool channel_found = false;
3096 unsigned long flags;
3097
3098 + /*
3099 + * Hyper-V does not provide a way to change the connect CPU once
3100 + * it is set; we must prevent the connect CPU from going offline.
3101 + */
3102 + if (cpu == VMBUS_CONNECT_CPU)
3103 + return -EBUSY;
3104 +
3105 /*
3106 * Search for channels which are bound to the CPU we're about to
3107 * cleanup. In case we find one and vmbus is still connected we need to
3108 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
3109 index af9379a3bf89..cabcb66e7c5e 100644
3110 --- a/drivers/hv/hyperv_vmbus.h
3111 +++ b/drivers/hv/hyperv_vmbus.h
3112 @@ -212,12 +212,13 @@ enum vmbus_connect_state {
3113
3114 #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
3115
3116 -struct vmbus_connection {
3117 - /*
3118 - * CPU on which the initial host contact was made.
3119 - */
3120 - int connect_cpu;
3121 +/*
3122 + * The CPU that Hyper-V will interrupt for VMBUS messages, such as
3123 + * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
3124 + */
3125 +#define VMBUS_CONNECT_CPU 0
3126
3127 +struct vmbus_connection {
3128 u32 msg_conn_id;
3129
3130 atomic_t offer_in_progress;
3131 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
3132 index 9cdd434bb340..160ff640485b 100644
3133 --- a/drivers/hv/vmbus_drv.c
3134 +++ b/drivers/hv/vmbus_drv.c
3135 @@ -1092,14 +1092,28 @@ void vmbus_on_msg_dpc(unsigned long data)
3136 /*
3137 * If we are handling the rescind message;
3138 * schedule the work on the global work queue.
3139 + *
3140 + * The OFFER message and the RESCIND message should
3141 + * not be handled by the same serialized work queue,
3142 + * because the OFFER handler may call vmbus_open(),
3143 + * which tries to open the channel by sending an
3144 + * OPEN_CHANNEL message to the host and waits for
3145 + * the host's response; however, if the host has
3146 + * rescinded the channel before it receives the
3147 + * OPEN_CHANNEL message, the host just silently
3148 + * ignores the OPEN_CHANNEL message; as a result,
3149 + * the guest's OFFER handler hangs for ever, if we
3150 + * handle the RESCIND message in the same serialized
3151 + * work queue: the RESCIND handler can not start to
3152 + * run before the OFFER handler finishes.
3153 */
3154 - schedule_work_on(vmbus_connection.connect_cpu,
3155 + schedule_work_on(VMBUS_CONNECT_CPU,
3156 &ctx->work);
3157 break;
3158
3159 case CHANNELMSG_OFFERCHANNEL:
3160 atomic_inc(&vmbus_connection.offer_in_progress);
3161 - queue_work_on(vmbus_connection.connect_cpu,
3162 + queue_work_on(VMBUS_CONNECT_CPU,
3163 vmbus_connection.work_queue,
3164 &ctx->work);
3165 break;
3166 @@ -1146,7 +1160,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
3167
3168 INIT_WORK(&ctx->work, vmbus_onmessage_work);
3169
3170 - queue_work_on(vmbus_connection.connect_cpu,
3171 + queue_work_on(VMBUS_CONNECT_CPU,
3172 vmbus_connection.work_queue,
3173 &ctx->work);
3174 }
3175 diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
3176 index 5c1dddde193c..f96fd8efb45a 100644
3177 --- a/drivers/hwmon/k10temp.c
3178 +++ b/drivers/hwmon/k10temp.c
3179 @@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
3180 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
3181 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
3182 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
3183 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
3184 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
3185 { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
3186 {}
3187 diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
3188 index 4150301a89a5..e8377ce0a95a 100644
3189 --- a/drivers/macintosh/windfarm_pm112.c
3190 +++ b/drivers/macintosh/windfarm_pm112.c
3191 @@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu)
3192 s32 tmax;
3193 int fmin;
3194
3195 - /* Get PID params from the appropriate SAT */
3196 - hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
3197 - if (hdr == NULL) {
3198 - printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
3199 - return -EINVAL;
3200 - }
3201 - piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
3202 -
3203 /* Get FVT params to get Tmax; if not found, assume default */
3204 hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
3205 if (hdr) {
3206 @@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu)
3207 if (tmax < cpu_all_tmax)
3208 cpu_all_tmax = tmax;
3209
3210 + kfree(hdr);
3211 +
3212 + /* Get PID params from the appropriate SAT */
3213 + hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
3214 + if (hdr == NULL) {
3215 + printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
3216 + return -EINVAL;
3217 + }
3218 + piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
3219 +
3220 /*
3221 * Darwin has a minimum fan speed of 1000 rpm for the 4-way and
3222 * 515 for the 2-way. That appears to be overkill, so for now,
3223 @@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu)
3224 pid.min = fmin;
3225
3226 wf_cpu_pid_init(&cpu_pid[cpu], &pid);
3227 +
3228 + kfree(hdr);
3229 +
3230 return 0;
3231 }
3232
3233 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3234 index 658b0f4a01f5..68901745eb20 100644
3235 --- a/drivers/md/bcache/super.c
3236 +++ b/drivers/md/bcache/super.c
3237 @@ -789,7 +789,9 @@ static void bcache_device_free(struct bcache_device *d)
3238 bcache_device_detach(d);
3239
3240 if (disk) {
3241 - if (disk->flags & GENHD_FL_UP)
3242 + bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
3243 +
3244 + if (disk_added)
3245 del_gendisk(disk);
3246
3247 if (disk->queue)
3248 @@ -797,7 +799,8 @@ static void bcache_device_free(struct bcache_device *d)
3249
3250 ida_simple_remove(&bcache_device_idx,
3251 first_minor_to_idx(disk->first_minor));
3252 - put_disk(disk);
3253 + if (disk_added)
3254 + put_disk(disk);
3255 }
3256
3257 bioset_exit(&d->bio_split);
3258 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3259 index 492bbe0584d9..ffbda729e26e 100644
3260 --- a/drivers/md/dm-crypt.c
3261 +++ b/drivers/md/dm-crypt.c
3262 @@ -2957,7 +2957,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3263 limits->max_segment_size = PAGE_SIZE;
3264
3265 limits->logical_block_size =
3266 - max_t(unsigned short, limits->logical_block_size, cc->sector_size);
3267 + max_t(unsigned, limits->logical_block_size, cc->sector_size);
3268 limits->physical_block_size =
3269 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3270 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3271 diff --git a/drivers/md/md.c b/drivers/md/md.c
3272 index 6b69a12ca2d8..5a378a453a2d 100644
3273 --- a/drivers/md/md.c
3274 +++ b/drivers/md/md.c
3275 @@ -7607,7 +7607,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
3276 */
3277 mddev_put(mddev);
3278 /* Wait until bdev->bd_disk is definitely gone */
3279 - flush_workqueue(md_misc_wq);
3280 + if (work_pending(&mddev->del_work))
3281 + flush_workqueue(md_misc_wq);
3282 /* Then retry the open from the top */
3283 return -ERESTARTSYS;
3284 }
3285 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
3286 index 36cd7c2fbf40..a3cbc9f4fec1 100644
3287 --- a/drivers/md/raid5.c
3288 +++ b/drivers/md/raid5.c
3289 @@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num)
3290 * of the P and Q blocks.
3291 */
3292 static int scribble_alloc(struct raid5_percpu *percpu,
3293 - int num, int cnt, gfp_t flags)
3294 + int num, int cnt)
3295 {
3296 size_t obj_size =
3297 sizeof(struct page *) * (num+2) +
3298 sizeof(addr_conv_t) * (num+2);
3299 void *scribble;
3300
3301 - scribble = kvmalloc_array(cnt, obj_size, flags);
3302 + /*
3303 + * If here is in raid array suspend context, it is in memalloc noio
3304 + * context as well, there is no potential recursive memory reclaim
3305 + * I/Os with the GFP_KERNEL flag.
3306 + */
3307 + scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
3308 if (!scribble)
3309 return -ENOMEM;
3310
3311 @@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
3312
3313 percpu = per_cpu_ptr(conf->percpu, cpu);
3314 err = scribble_alloc(percpu, new_disks,
3315 - new_sectors / STRIPE_SECTORS,
3316 - GFP_NOIO);
3317 + new_sectors / STRIPE_SECTORS);
3318 if (err)
3319 break;
3320 }
3321 @@ -6765,8 +6769,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
3322 conf->previous_raid_disks),
3323 max(conf->chunk_sectors,
3324 conf->prev_chunk_sectors)
3325 - / STRIPE_SECTORS,
3326 - GFP_KERNEL)) {
3327 + / STRIPE_SECTORS)) {
3328 free_scratch_buffer(conf, percpu);
3329 return -ENOMEM;
3330 }
3331 diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
3332 index b14c09cd9593..06383b26712b 100644
3333 --- a/drivers/media/cec/cec-adap.c
3334 +++ b/drivers/media/cec/cec-adap.c
3335 @@ -1732,6 +1732,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
3336 unsigned j;
3337
3338 log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
3339 + if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
3340 + dprintk(1, "unknown logical address type\n");
3341 + return -EINVAL;
3342 + }
3343 if (type_mask & (1 << log_addrs->log_addr_type[i])) {
3344 dprintk(1, "duplicate logical address type\n");
3345 return -EINVAL;
3346 @@ -1752,10 +1756,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
3347 dprintk(1, "invalid primary device type\n");
3348 return -EINVAL;
3349 }
3350 - if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
3351 - dprintk(1, "unknown logical address type\n");
3352 - return -EINVAL;
3353 - }
3354 for (j = 0; j < feature_sz; j++) {
3355 if ((features[j] & 0x80) == 0) {
3356 if (op_is_dev_features)
3357 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
3358 index 917fe034af37..032b6d7dd582 100644
3359 --- a/drivers/media/dvb-core/dvbdev.c
3360 +++ b/drivers/media/dvb-core/dvbdev.c
3361 @@ -707,9 +707,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
3362 }
3363
3364 if (ntuner && ndemod) {
3365 - pad_source = media_get_pad_index(tuner, true,
3366 + /* NOTE: first found tuner source pad presumed correct */
3367 + pad_source = media_get_pad_index(tuner, false,
3368 PAD_SIGNAL_ANALOG);
3369 - if (pad_source)
3370 + if (pad_source < 0)
3371 return -EINVAL;
3372 ret = media_create_pad_links(mdev,
3373 MEDIA_ENT_F_TUNER,
3374 diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
3375 index a398ea81e422..266e947572c1 100644
3376 --- a/drivers/media/i2c/ov5640.c
3377 +++ b/drivers/media/i2c/ov5640.c
3378 @@ -3068,8 +3068,8 @@ static int ov5640_probe(struct i2c_client *client)
3379 free_ctrls:
3380 v4l2_ctrl_handler_free(&sensor->ctrls.handler);
3381 entity_cleanup:
3382 - mutex_destroy(&sensor->lock);
3383 media_entity_cleanup(&sensor->sd.entity);
3384 + mutex_destroy(&sensor->lock);
3385 return ret;
3386 }
3387
3388 @@ -3079,9 +3079,9 @@ static int ov5640_remove(struct i2c_client *client)
3389 struct ov5640_dev *sensor = to_ov5640_dev(sd);
3390
3391 v4l2_async_unregister_subdev(&sensor->sd);
3392 - mutex_destroy(&sensor->lock);
3393 media_entity_cleanup(&sensor->sd.entity);
3394 v4l2_ctrl_handler_free(&sensor->ctrls.handler);
3395 + mutex_destroy(&sensor->lock);
3396
3397 return 0;
3398 }
3399 diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
3400 index 43c78620c9d8..5c6b00737fe7 100644
3401 --- a/drivers/media/platform/rcar-fcp.c
3402 +++ b/drivers/media/platform/rcar-fcp.c
3403 @@ -8,6 +8,7 @@
3404 */
3405
3406 #include <linux/device.h>
3407 +#include <linux/dma-mapping.h>
3408 #include <linux/list.h>
3409 #include <linux/module.h>
3410 #include <linux/mod_devicetable.h>
3411 @@ -21,6 +22,7 @@
3412 struct rcar_fcp_device {
3413 struct list_head list;
3414 struct device *dev;
3415 + struct device_dma_parameters dma_parms;
3416 };
3417
3418 static LIST_HEAD(fcp_devices);
3419 @@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev)
3420
3421 fcp->dev = &pdev->dev;
3422
3423 + fcp->dev->dma_parms = &fcp->dma_parms;
3424 + dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
3425 +
3426 pm_runtime_enable(&pdev->dev);
3427
3428 mutex_lock(&fcp_lock);
3429 diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
3430 index 82350097503e..84ec36156f73 100644
3431 --- a/drivers/media/platform/vicodec/vicodec-core.c
3432 +++ b/drivers/media/platform/vicodec/vicodec-core.c
3433 @@ -2172,16 +2172,19 @@ static int vicodec_probe(struct platform_device *pdev)
3434
3435 platform_set_drvdata(pdev, dev);
3436
3437 - if (register_instance(dev, &dev->stateful_enc,
3438 - "stateful-encoder", true))
3439 + ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder",
3440 + true);
3441 + if (ret)
3442 goto unreg_dev;
3443
3444 - if (register_instance(dev, &dev->stateful_dec,
3445 - "stateful-decoder", false))
3446 + ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder",
3447 + false);
3448 + if (ret)
3449 goto unreg_sf_enc;
3450
3451 - if (register_instance(dev, &dev->stateless_dec,
3452 - "stateless-decoder", false))
3453 + ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder",
3454 + false);
3455 + if (ret)
3456 goto unreg_sf_dec;
3457
3458 #ifdef CONFIG_MEDIA_CONTROLLER
3459 diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
3460 index e87040d6eca7..a39e1966816b 100644
3461 --- a/drivers/media/tuners/si2157.c
3462 +++ b/drivers/media/tuners/si2157.c
3463 @@ -75,24 +75,23 @@ static int si2157_init(struct dvb_frontend *fe)
3464 struct si2157_cmd cmd;
3465 const struct firmware *fw;
3466 const char *fw_name;
3467 - unsigned int uitmp, chip_id;
3468 + unsigned int chip_id, xtal_trim;
3469
3470 dev_dbg(&client->dev, "\n");
3471
3472 - /* Returned IF frequency is garbage when firmware is not running */
3473 - memcpy(cmd.args, "\x15\x00\x06\x07", 4);
3474 + /* Try to get Xtal trim property, to verify tuner still running */
3475 + memcpy(cmd.args, "\x15\x00\x04\x02", 4);
3476 cmd.wlen = 4;
3477 cmd.rlen = 4;
3478 ret = si2157_cmd_execute(client, &cmd);
3479 - if (ret)
3480 - goto err;
3481
3482 - uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
3483 - dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
3484 + xtal_trim = cmd.args[2] | (cmd.args[3] << 8);
3485
3486 - if (uitmp == dev->if_frequency / 1000)
3487 + if (ret == 0 && xtal_trim < 16)
3488 goto warm;
3489
3490 + dev->if_frequency = 0; /* we no longer know current tuner state */
3491 +
3492 /* power up */
3493 if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
3494 memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
3495 diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
3496 index d4ea72bf09c5..5131c8d4c632 100644
3497 --- a/drivers/media/usb/dvb-usb/dibusb-mb.c
3498 +++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
3499 @@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
3500
3501 if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
3502 err("tuner i2c write failed.");
3503 - ret = -EREMOTEIO;
3504 + return -EREMOTEIO;
3505 }
3506
3507 if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl)
3508 diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
3509 index b05fa227ffb2..95756cbb722f 100644
3510 --- a/drivers/media/usb/go7007/snd-go7007.c
3511 +++ b/drivers/media/usb/go7007/snd-go7007.c
3512 @@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go)
3513 gosnd->capturing = 0;
3514 ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
3515 &gosnd->card);
3516 - if (ret < 0) {
3517 - kfree(gosnd);
3518 - return ret;
3519 - }
3520 + if (ret < 0)
3521 + goto free_snd;
3522 +
3523 ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
3524 &go7007_snd_device_ops);
3525 - if (ret < 0) {
3526 - kfree(gosnd);
3527 - return ret;
3528 - }
3529 + if (ret < 0)
3530 + goto free_card;
3531 +
3532 ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
3533 - if (ret < 0) {
3534 - snd_card_free(gosnd->card);
3535 - kfree(gosnd);
3536 - return ret;
3537 - }
3538 + if (ret < 0)
3539 + goto free_card;
3540 +
3541 strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
3542 strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname));
3543 strscpy(gosnd->card->longname, gosnd->card->shortname,
3544 @@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go)
3545 &go7007_snd_capture_ops);
3546
3547 ret = snd_card_register(gosnd->card);
3548 - if (ret < 0) {
3549 - snd_card_free(gosnd->card);
3550 - kfree(gosnd);
3551 - return ret;
3552 - }
3553 + if (ret < 0)
3554 + goto free_card;
3555
3556 gosnd->substream = NULL;
3557 go->snd_context = gosnd;
3558 @@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go)
3559 ++dev;
3560
3561 return 0;
3562 +
3563 +free_card:
3564 + snd_card_free(gosnd->card);
3565 +free_snd:
3566 + kfree(gosnd);
3567 + return ret;
3568 }
3569 EXPORT_SYMBOL(go7007_snd_init);
3570
3571 diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
3572 index 999214e8cf2b..360d523132bd 100644
3573 --- a/drivers/mmc/host/meson-mx-sdio.c
3574 +++ b/drivers/mmc/host/meson-mx-sdio.c
3575 @@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
3576
3577 mrq = host->mrq;
3578
3579 + if (host->cmd->error)
3580 + meson_mx_mmc_soft_reset(host);
3581 +
3582 host->mrq = NULL;
3583 host->cmd = NULL;
3584
3585 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
3586 index dccb4df46512..b03d65222622 100644
3587 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
3588 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
3589 @@ -87,7 +87,7 @@
3590 #define ESDHC_STD_TUNING_EN (1 << 24)
3591 /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
3592 #define ESDHC_TUNING_START_TAP_DEFAULT 0x1
3593 -#define ESDHC_TUNING_START_TAP_MASK 0xff
3594 +#define ESDHC_TUNING_START_TAP_MASK 0x7f
3595 #define ESDHC_TUNING_STEP_MASK 0x00070000
3596 #define ESDHC_TUNING_STEP_SHIFT 16
3597
3598 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
3599 index 8b2a6a362c60..84cffdef264b 100644
3600 --- a/drivers/mmc/host/sdhci-msm.c
3601 +++ b/drivers/mmc/host/sdhci-msm.c
3602 @@ -1742,7 +1742,9 @@ static const struct sdhci_ops sdhci_msm_ops = {
3603 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
3604 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
3605 SDHCI_QUIRK_SINGLE_POWER_WRITE |
3606 - SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
3607 + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
3608 + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
3609 +
3610 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
3611 .ops = &sdhci_msm_ops,
3612 };
3613 diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
3614 index f4ac064ff471..8d96ecba1b55 100644
3615 --- a/drivers/mmc/host/via-sdmmc.c
3616 +++ b/drivers/mmc/host/via-sdmmc.c
3617 @@ -319,6 +319,8 @@ struct via_crdr_mmc_host {
3618 /* some devices need a very long delay for power to stabilize */
3619 #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
3620
3621 +#define VIA_CMD_TIMEOUT_MS 1000
3622 +
3623 static const struct pci_device_id via_ids[] = {
3624 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
3625 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
3626 @@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host,
3627 {
3628 void __iomem *addrbase;
3629 struct mmc_data *data;
3630 + unsigned int timeout_ms;
3631 u32 cmdctrl = 0;
3632
3633 WARN_ON(host->cmd);
3634
3635 data = cmd->data;
3636 - mod_timer(&host->timer, jiffies + HZ);
3637 host->cmd = cmd;
3638
3639 + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS;
3640 + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
3641 +
3642 /*Command index*/
3643 cmdctrl = cmd->opcode << 8;
3644
3645 diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
3646 index 15ef30b368a5..4fef20724175 100644
3647 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
3648 +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
3649 @@ -1019,11 +1019,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
3650 if (!section) {
3651 /*
3652 * Small-page NAND use byte 6 for BBI while large-page
3653 - * NAND use byte 0.
3654 + * NAND use bytes 0 and 1.
3655 */
3656 - if (cfg->page_size > 512)
3657 - oobregion->offset++;
3658 - oobregion->length--;
3659 + if (cfg->page_size > 512) {
3660 + oobregion->offset += 2;
3661 + oobregion->length -= 2;
3662 + } else {
3663 + oobregion->length--;
3664 + }
3665 }
3666 }
3667
3668 diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
3669 index c0e1a8ebe820..522390b99d3c 100644
3670 --- a/drivers/mtd/nand/raw/diskonchip.c
3671 +++ b/drivers/mtd/nand/raw/diskonchip.c
3672 @@ -1609,13 +1609,10 @@ static int __init doc_probe(unsigned long physadr)
3673 numchips = doc2001_init(mtd);
3674
3675 if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
3676 - /* DBB note: i believe nand_release is necessary here, as
3677 + /* DBB note: i believe nand_cleanup is necessary here, as
3678 buffers may have been allocated in nand_base. Check with
3679 Thomas. FIX ME! */
3680 - /* nand_release will call mtd_device_unregister, but we
3681 - haven't yet added it. This is handled without incident by
3682 - mtd_device_unregister, as far as I can tell. */
3683 - nand_release(nand);
3684 + nand_cleanup(nand);
3685 goto fail;
3686 }
3687
3688 diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
3689 index 49afebee50db..4b7c399d4f4b 100644
3690 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
3691 +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
3692 @@ -376,7 +376,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
3693
3694 ret = mtd_device_register(mtd, NULL, 0);
3695 if (ret) {
3696 - nand_release(chip);
3697 + nand_cleanup(chip);
3698 return ret;
3699 }
3700
3701 diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
3702 index 373d47d1ba4c..08008c844a47 100644
3703 --- a/drivers/mtd/nand/raw/mtk_nand.c
3704 +++ b/drivers/mtd/nand/raw/mtk_nand.c
3705 @@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
3706 ret = mtd_device_register(mtd, NULL, 0);
3707 if (ret) {
3708 dev_err(dev, "mtd parse partition error\n");
3709 - nand_release(nand);
3710 + nand_cleanup(nand);
3711 return ret;
3712 }
3713
3714 diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
3715 index 47c63968fa45..db66c1be6e5f 100644
3716 --- a/drivers/mtd/nand/raw/nand_base.c
3717 +++ b/drivers/mtd/nand/raw/nand_base.c
3718 @@ -731,8 +731,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
3719 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
3720 unsigned long timeout_ms)
3721 {
3722 - /* Wait until R/B pin indicates chip is ready or timeout occurs */
3723 - timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
3724 +
3725 + /*
3726 + * Wait until R/B pin indicates chip is ready or timeout occurs.
3727 + * +1 below is necessary because if we are now in the last fraction
3728 + * of jiffy and msecs_to_jiffies is 1 then we will wait only that
3729 + * small jiffy fraction - possibly leading to false timeout.
3730 + */
3731 + timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
3732 do {
3733 if (gpiod_get_value_cansleep(gpiod))
3734 return 0;
3735 diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
3736 index 0b879bd0a68c..8fe8d7bdd203 100644
3737 --- a/drivers/mtd/nand/raw/nand_onfi.c
3738 +++ b/drivers/mtd/nand/raw/nand_onfi.c
3739 @@ -173,7 +173,7 @@ int nand_onfi_detect(struct nand_chip *chip)
3740 }
3741
3742 if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
3743 - le16_to_cpu(p->crc)) {
3744 + le16_to_cpu(p[i].crc)) {
3745 if (i)
3746 memcpy(p, &p[i], sizeof(*p));
3747 break;
3748 diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
3749 index d27b39a7223c..a3dcdf25f5f2 100644
3750 --- a/drivers/mtd/nand/raw/orion_nand.c
3751 +++ b/drivers/mtd/nand/raw/orion_nand.c
3752 @@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
3753 mtd->name = "orion_nand";
3754 ret = mtd_device_register(mtd, board->parts, board->nr_parts);
3755 if (ret) {
3756 - nand_release(nc);
3757 + nand_cleanup(nc);
3758 goto no_dev;
3759 }
3760
3761 diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
3762 index c43cb4d92d3d..0429d218fd9f 100644
3763 --- a/drivers/mtd/nand/raw/oxnas_nand.c
3764 +++ b/drivers/mtd/nand/raw/oxnas_nand.c
3765 @@ -140,10 +140,8 @@ static int oxnas_nand_probe(struct platform_device *pdev)
3766 goto err_release_child;
3767
3768 err = mtd_device_register(mtd, NULL, 0);
3769 - if (err) {
3770 - nand_release(chip);
3771 - goto err_release_child;
3772 - }
3773 + if (err)
3774 + goto err_cleanup_nand;
3775
3776 oxnas->chips[nchips] = chip;
3777 ++nchips;
3778 @@ -159,6 +157,8 @@ static int oxnas_nand_probe(struct platform_device *pdev)
3779
3780 return 0;
3781
3782 +err_cleanup_nand:
3783 + nand_cleanup(chip);
3784 err_release_child:
3785 of_node_put(nand_np);
3786 err_clk_unprepare:
3787 diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
3788 index 9cfe7395172a..066ff6dc9a23 100644
3789 --- a/drivers/mtd/nand/raw/pasemi_nand.c
3790 +++ b/drivers/mtd/nand/raw/pasemi_nand.c
3791 @@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
3792 if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
3793 dev_err(dev, "Unable to register MTD device\n");
3794 err = -ENODEV;
3795 - goto out_lpc;
3796 + goto out_cleanup_nand;
3797 }
3798
3799 dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
3800 @@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
3801
3802 return 0;
3803
3804 + out_cleanup_nand:
3805 + nand_cleanup(chip);
3806 out_lpc:
3807 release_region(lpcctl, 4);
3808 out_ior:
3809 diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
3810 index dc0f3074ddbf..3a495b233443 100644
3811 --- a/drivers/mtd/nand/raw/plat_nand.c
3812 +++ b/drivers/mtd/nand/raw/plat_nand.c
3813 @@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev)
3814 if (!err)
3815 return err;
3816
3817 - nand_release(&data->chip);
3818 + nand_cleanup(&data->chip);
3819 out:
3820 if (pdata->ctrl.remove)
3821 pdata->ctrl.remove(pdev);
3822 diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
3823 index b47a9eaff89b..d8c52a016080 100644
3824 --- a/drivers/mtd/nand/raw/sharpsl.c
3825 +++ b/drivers/mtd/nand/raw/sharpsl.c
3826 @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
3827 return 0;
3828
3829 err_add:
3830 - nand_release(this);
3831 + nand_cleanup(this);
3832
3833 err_scan:
3834 iounmap(sharpsl->io);
3835 diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
3836 index 20f40c0e812c..7c94fc51a611 100644
3837 --- a/drivers/mtd/nand/raw/socrates_nand.c
3838 +++ b/drivers/mtd/nand/raw/socrates_nand.c
3839 @@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
3840 if (!res)
3841 return res;
3842
3843 - nand_release(nand_chip);
3844 + nand_cleanup(nand_chip);
3845
3846 out:
3847 iounmap(host->io_base);
3848 diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
3849 index 89773293c64d..45c376fc571a 100644
3850 --- a/drivers/mtd/nand/raw/sunxi_nand.c
3851 +++ b/drivers/mtd/nand/raw/sunxi_nand.c
3852 @@ -2003,7 +2003,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
3853 ret = mtd_device_register(mtd, NULL, 0);
3854 if (ret) {
3855 dev_err(dev, "failed to register mtd device: %d\n", ret);
3856 - nand_release(nand);
3857 + nand_cleanup(nand);
3858 return ret;
3859 }
3860
3861 diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
3862 index db030f1701ee..4e9a6d94f6e8 100644
3863 --- a/drivers/mtd/nand/raw/tmio_nand.c
3864 +++ b/drivers/mtd/nand/raw/tmio_nand.c
3865 @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev)
3866 if (!retval)
3867 return retval;
3868
3869 - nand_release(nand_chip);
3870 + nand_cleanup(nand_chip);
3871
3872 err_irq:
3873 tmio_hw_stop(dev, tmio);
3874 diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
3875 index 834f794816a9..018311dc8fe1 100644
3876 --- a/drivers/mtd/nand/raw/xway_nand.c
3877 +++ b/drivers/mtd/nand/raw/xway_nand.c
3878 @@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev)
3879
3880 err = mtd_device_register(mtd, NULL, 0);
3881 if (err)
3882 - nand_release(&data->chip);
3883 + nand_cleanup(&data->chip);
3884
3885 return err;
3886 }
3887 diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
3888 index 0537df06a9b5..ff318472a3ee 100644
3889 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c
3890 +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
3891 @@ -432,7 +432,7 @@ static void emac_timeout(struct net_device *dev)
3892 /* Hardware start transmission.
3893 * Send a packet to media from the upper layer.
3894 */
3895 -static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
3896 +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
3897 {
3898 struct emac_board_info *db = netdev_priv(dev);
3899 unsigned long channel;
3900 @@ -440,7 +440,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
3901
3902 channel = db->tx_fifo_stat & 3;
3903 if (channel == 3)
3904 - return 1;
3905 + return NETDEV_TX_BUSY;
3906
3907 channel = (channel == 1 ? 1 : 0);
3908
3909 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
3910 index 48de4bee209e..9225733f4fec 100644
3911 --- a/drivers/net/ethernet/amazon/ena/ena_com.c
3912 +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
3913 @@ -2349,6 +2349,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
3914 rss->hash_key;
3915 int rc;
3916
3917 + if (unlikely(!func))
3918 + return -EINVAL;
3919 +
3920 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
3921 ENA_ADMIN_RSS_HASH_FUNCTION,
3922 rss->hash_key_dma_addr,
3923 @@ -2361,8 +2364,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
3924 if (rss->hash_func)
3925 rss->hash_func--;
3926
3927 - if (func)
3928 - *func = rss->hash_func;
3929 + *func = rss->hash_func;
3930
3931 if (key)
3932 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
3933 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3934 index 12949f1ec1ea..145334fb18f4 100644
3935 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3936 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3937 @@ -690,6 +690,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
3938 u32 *regs_buff = p;
3939 int err = 0;
3940
3941 + if (unlikely(!self->aq_hw_ops->hw_get_regs))
3942 + return -EOPNOTSUPP;
3943 +
3944 regs->version = 1;
3945
3946 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
3947 @@ -704,6 +707,9 @@ err_exit:
3948
3949 int aq_nic_get_regs_count(struct aq_nic_s *self)
3950 {
3951 + if (unlikely(!self->aq_hw_ops->hw_get_regs))
3952 + return 0;
3953 +
3954 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
3955 }
3956
3957 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3958 index 6f01f4e03cef..3d3b1005d076 100644
3959 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3960 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3961 @@ -69,6 +69,9 @@
3962 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
3963 TOTAL_DESC * DMA_DESC_SIZE)
3964
3965 +/* Forward declarations */
3966 +static void bcmgenet_set_rx_mode(struct net_device *dev);
3967 +
3968 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
3969 {
3970 /* MIPS chips strapped for BE will automagically configure the
3971 @@ -2852,6 +2855,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
3972 struct bcmgenet_priv *priv = netdev_priv(dev);
3973
3974 /* Start the network engine */
3975 + bcmgenet_set_rx_mode(dev);
3976 bcmgenet_enable_rx_napi(priv);
3977
3978 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3979 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3980 index dbc69d8fa05f..5b7c2f9241d0 100644
3981 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3982 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3983 @@ -14,6 +14,7 @@
3984 #include <linux/if_vlan.h>
3985 #include <linux/phy.h>
3986 #include <linux/dim.h>
3987 +#include <linux/ethtool.h>
3988
3989 /* total number of Buffer Descriptors, same for Rx/Tx */
3990 #define TOTAL_DESC 256
3991 @@ -674,6 +675,7 @@ struct bcmgenet_priv {
3992 /* WOL */
3993 struct clk *clk_wol;
3994 u32 wolopts;
3995 + u8 sopass[SOPASS_MAX];
3996
3997 struct bcmgenet_mib_counters mib;
3998
3999 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
4000 index ea20d94bd050..a41f82379369 100644
4001 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
4002 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
4003 @@ -41,18 +41,13 @@
4004 void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4005 {
4006 struct bcmgenet_priv *priv = netdev_priv(dev);
4007 - u32 reg;
4008
4009 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
4010 wol->wolopts = priv->wolopts;
4011 memset(wol->sopass, 0, sizeof(wol->sopass));
4012
4013 - if (wol->wolopts & WAKE_MAGICSECURE) {
4014 - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
4015 - put_unaligned_be16(reg, &wol->sopass[0]);
4016 - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
4017 - put_unaligned_be32(reg, &wol->sopass[2]);
4018 - }
4019 + if (wol->wolopts & WAKE_MAGICSECURE)
4020 + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
4021 }
4022
4023 /* ethtool function - set WOL (Wake on LAN) settings.
4024 @@ -62,7 +57,6 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4025 {
4026 struct bcmgenet_priv *priv = netdev_priv(dev);
4027 struct device *kdev = &priv->pdev->dev;
4028 - u32 reg;
4029
4030 if (!device_can_wakeup(kdev))
4031 return -ENOTSUPP;
4032 @@ -70,17 +64,8 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4033 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
4034 return -EINVAL;
4035
4036 - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
4037 - if (wol->wolopts & WAKE_MAGICSECURE) {
4038 - bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
4039 - UMAC_MPD_PW_MS);
4040 - bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
4041 - UMAC_MPD_PW_LS);
4042 - reg |= MPD_PW_EN;
4043 - } else {
4044 - reg &= ~MPD_PW_EN;
4045 - }
4046 - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
4047 + if (wol->wolopts & WAKE_MAGICSECURE)
4048 + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
4049
4050 /* Flag the device and relevant IRQ as wakeup capable */
4051 if (wol->wolopts) {
4052 @@ -120,6 +105,14 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
4053 return retries;
4054 }
4055
4056 +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv)
4057 +{
4058 + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
4059 + UMAC_MPD_PW_MS);
4060 + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
4061 + UMAC_MPD_PW_LS);
4062 +}
4063 +
4064 int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
4065 enum bcmgenet_power_mode mode)
4066 {
4067 @@ -140,13 +133,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
4068
4069 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
4070 reg |= MPD_EN;
4071 + if (priv->wolopts & WAKE_MAGICSECURE) {
4072 + bcmgenet_set_mpd_password(priv);
4073 + reg |= MPD_PW_EN;
4074 + }
4075 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
4076
4077 /* Do not leave UniMAC in MPD mode only */
4078 retries = bcmgenet_poll_wol_status(priv);
4079 if (retries < 0) {
4080 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
4081 - reg &= ~MPD_EN;
4082 + reg &= ~(MPD_EN | MPD_PW_EN);
4083 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
4084 return retries;
4085 }
4086 @@ -185,7 +182,7 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
4087 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
4088 if (!(reg & MPD_EN))
4089 return; /* already powered up so skip the rest */
4090 - reg &= ~MPD_EN;
4091 + reg &= ~(MPD_EN | MPD_PW_EN);
4092 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
4093
4094 /* Disable CRC Forward */
4095 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4096 index a935b20effa3..3177dd8ede8e 100644
4097 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4098 +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4099 @@ -1981,7 +1981,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
4100 int i;
4101
4102 if (type != TC_SETUP_QDISC_MQPRIO)
4103 - return -EINVAL;
4104 + return -EOPNOTSUPP;
4105
4106 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4107 num_queues = dpaa2_eth_queue_count(priv);
4108 @@ -1993,7 +1993,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
4109 if (num_tc > dpaa2_eth_tc_count(priv)) {
4110 netdev_err(net_dev, "Max %d traffic classes supported\n",
4111 dpaa2_eth_tc_count(priv));
4112 - return -EINVAL;
4113 + return -EOPNOTSUPP;
4114 }
4115
4116 if (!num_tc) {
4117 diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
4118 index 86493fea56e4..f93ed70709c6 100644
4119 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c
4120 +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
4121 @@ -3140,8 +3140,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4122 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4123 if (skb->data_len && hdr_len == len) {
4124 switch (hw->mac_type) {
4125 + case e1000_82544: {
4126 unsigned int pull_size;
4127 - case e1000_82544:
4128 +
4129 /* Make sure we have room to chop off 4 bytes,
4130 * and that the end alignment will work out to
4131 * this hardware's requirements
4132 @@ -3162,6 +3163,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4133 }
4134 len = skb_headlen(skb);
4135 break;
4136 + }
4137 default:
4138 /* do nothing */
4139 break;
4140 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
4141 index 37a2314d3e6b..944abd5eae11 100644
4142 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
4143 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
4144 @@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
4145
4146 #define er32(reg) __er32(hw, E1000_##reg)
4147
4148 -s32 __ew32_prepare(struct e1000_hw *hw);
4149 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
4150
4151 #define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
4152 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
4153 index 8c4507838325..108297a099ed 100644
4154 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
4155 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
4156 @@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
4157 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
4158 * and try again a number of times.
4159 **/
4160 -s32 __ew32_prepare(struct e1000_hw *hw)
4161 +static void __ew32_prepare(struct e1000_hw *hw)
4162 {
4163 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
4164
4165 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
4166 udelay(50);
4167 -
4168 - return i;
4169 }
4170
4171 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
4172 @@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
4173 {
4174 struct e1000_adapter *adapter = rx_ring->adapter;
4175 struct e1000_hw *hw = &adapter->hw;
4176 - s32 ret_val = __ew32_prepare(hw);
4177
4178 + __ew32_prepare(hw);
4179 writel(i, rx_ring->tail);
4180
4181 - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
4182 + if (unlikely(i != readl(rx_ring->tail))) {
4183 u32 rctl = er32(RCTL);
4184
4185 ew32(RCTL, rctl & ~E1000_RCTL_EN);
4186 @@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
4187 {
4188 struct e1000_adapter *adapter = tx_ring->adapter;
4189 struct e1000_hw *hw = &adapter->hw;
4190 - s32 ret_val = __ew32_prepare(hw);
4191
4192 + __ew32_prepare(hw);
4193 writel(i, tx_ring->tail);
4194
4195 - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
4196 + if (unlikely(i != readl(tx_ring->tail))) {
4197 u32 tctl = er32(TCTL);
4198
4199 ew32(TCTL, tctl & ~E1000_TCTL_EN);
4200 @@ -5289,6 +5287,10 @@ static void e1000_watchdog_task(struct work_struct *work)
4201 /* oops */
4202 break;
4203 }
4204 + if (hw->mac.type == e1000_pch_spt) {
4205 + netdev->features &= ~NETIF_F_TSO;
4206 + netdev->features &= ~NETIF_F_TSO6;
4207 + }
4208 }
4209
4210 /* enable transmits in the hardware, need to do this
4211 diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
4212 index 171f0b625407..d68b8aa31b19 100644
4213 --- a/drivers/net/ethernet/intel/ice/ice_common.c
4214 +++ b/drivers/net/ethernet/intel/ice/ice_common.c
4215 @@ -436,6 +436,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
4216 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
4217 {
4218 struct ice_switch_info *sw;
4219 + enum ice_status status;
4220
4221 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
4222 sizeof(*hw->switch_info), GFP_KERNEL);
4223 @@ -446,7 +447,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
4224
4225 INIT_LIST_HEAD(&sw->vsi_list_map_head);
4226
4227 - return ice_init_def_sw_recp(hw);
4228 + status = ice_init_def_sw_recp(hw);
4229 + if (status) {
4230 + devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
4231 + return status;
4232 + }
4233 + return 0;
4234 }
4235
4236 /**
4237 diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
4238 index c68709c7ef81..2e9c97bad3c3 100644
4239 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c
4240 +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
4241 @@ -199,7 +199,9 @@ unwind_alloc_rq_bufs:
4242 cq->rq.r.rq_bi[i].pa = 0;
4243 cq->rq.r.rq_bi[i].size = 0;
4244 }
4245 + cq->rq.r.rq_bi = NULL;
4246 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
4247 + cq->rq.dma_head = NULL;
4248
4249 return ICE_ERR_NO_MEMORY;
4250 }
4251 @@ -245,7 +247,9 @@ unwind_alloc_sq_bufs:
4252 cq->sq.r.sq_bi[i].pa = 0;
4253 cq->sq.r.sq_bi[i].size = 0;
4254 }
4255 + cq->sq.r.sq_bi = NULL;
4256 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
4257 + cq->sq.dma_head = NULL;
4258
4259 return ICE_ERR_NO_MEMORY;
4260 }
4261 @@ -304,6 +308,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
4262 return 0;
4263 }
4264
4265 +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
4266 +do { \
4267 + int i; \
4268 + /* free descriptors */ \
4269 + if ((qi)->ring.r.ring##_bi) \
4270 + for (i = 0; i < (qi)->num_##ring##_entries; i++) \
4271 + if ((qi)->ring.r.ring##_bi[i].pa) { \
4272 + dmam_free_coherent(ice_hw_to_dev(hw), \
4273 + (qi)->ring.r.ring##_bi[i].size, \
4274 + (qi)->ring.r.ring##_bi[i].va, \
4275 + (qi)->ring.r.ring##_bi[i].pa); \
4276 + (qi)->ring.r.ring##_bi[i].va = NULL;\
4277 + (qi)->ring.r.ring##_bi[i].pa = 0;\
4278 + (qi)->ring.r.ring##_bi[i].size = 0;\
4279 + } \
4280 + /* free the buffer info list */ \
4281 + if ((qi)->ring.cmd_buf) \
4282 + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
4283 + /* free DMA head */ \
4284 + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
4285 +} while (0)
4286 +
4287 /**
4288 * ice_init_sq - main initialization routine for Control ATQ
4289 * @hw: pointer to the hardware structure
4290 @@ -357,6 +383,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
4291 goto init_ctrlq_exit;
4292
4293 init_ctrlq_free_rings:
4294 + ICE_FREE_CQ_BUFS(hw, cq, sq);
4295 ice_free_cq_ring(hw, &cq->sq);
4296
4297 init_ctrlq_exit:
4298 @@ -416,33 +443,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
4299 goto init_ctrlq_exit;
4300
4301 init_ctrlq_free_rings:
4302 + ICE_FREE_CQ_BUFS(hw, cq, rq);
4303 ice_free_cq_ring(hw, &cq->rq);
4304
4305 init_ctrlq_exit:
4306 return ret_code;
4307 }
4308
4309 -#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
4310 -do { \
4311 - int i; \
4312 - /* free descriptors */ \
4313 - for (i = 0; i < (qi)->num_##ring##_entries; i++) \
4314 - if ((qi)->ring.r.ring##_bi[i].pa) { \
4315 - dmam_free_coherent(ice_hw_to_dev(hw), \
4316 - (qi)->ring.r.ring##_bi[i].size,\
4317 - (qi)->ring.r.ring##_bi[i].va,\
4318 - (qi)->ring.r.ring##_bi[i].pa);\
4319 - (qi)->ring.r.ring##_bi[i].va = NULL; \
4320 - (qi)->ring.r.ring##_bi[i].pa = 0; \
4321 - (qi)->ring.r.ring##_bi[i].size = 0; \
4322 - } \
4323 - /* free the buffer info list */ \
4324 - if ((qi)->ring.cmd_buf) \
4325 - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
4326 - /* free DMA head */ \
4327 - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
4328 -} while (0)
4329 -
4330 /**
4331 * ice_shutdown_sq - shutdown the Control ATQ
4332 * @hw: pointer to the hardware structure
4333 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
4334 index 2408f0de95fc..d0ccb7ad447b 100644
4335 --- a/drivers/net/ethernet/intel/ice/ice_main.c
4336 +++ b/drivers/net/ethernet/intel/ice/ice_main.c
4337 @@ -2900,7 +2900,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4338 if (err) {
4339 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4340 err = -EIO;
4341 - goto err_init_interrupt_unroll;
4342 + goto err_init_vsi_unroll;
4343 }
4344
4345 /* Driver is mostly up */
4346 @@ -2986,6 +2986,7 @@ err_msix_misc_unroll:
4347 ice_free_irq_msix_misc(pf);
4348 err_init_interrupt_unroll:
4349 ice_clear_interrupt_scheme(pf);
4350 +err_init_vsi_unroll:
4351 devm_kfree(dev, pf->vsi);
4352 err_init_pf_unroll:
4353 ice_deinit_pf(pf);
4354 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
4355 index 8959418776f6..f80933320fd3 100644
4356 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
4357 +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
4358 @@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev,
4359 u32 speed;
4360 u32 supported, advertising;
4361
4362 - status = rd32(E1000_STATUS);
4363 + status = pm_runtime_suspended(&adapter->pdev->dev) ?
4364 + 0 : rd32(E1000_STATUS);
4365 if (hw->phy.media_type == e1000_media_type_copper) {
4366
4367 supported = (SUPPORTED_10baseT_Half |
4368 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
4369 index 0bd1294ba517..39c5e6fdb72c 100644
4370 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
4371 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
4372 @@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
4373 }
4374
4375 /* Configure pause time (2 TCs per register) */
4376 - reg = hw->fc.pause_time * 0x00010001;
4377 + reg = hw->fc.pause_time * 0x00010001U;
4378 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
4379 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4380
4381 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4382 index a26f9fb95ac0..edaa0bffa5c3 100644
4383 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4384 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4385 @@ -2254,7 +2254,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
4386 rx_buffer->page_offset ^= truesize;
4387 #else
4388 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
4389 - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
4390 + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
4391 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
4392 SKB_DATA_ALIGN(size);
4393
4394 rx_buffer->page_offset += truesize;
4395 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
4396 index c4eed5bbcd45..066bada4ccd1 100644
4397 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
4398 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
4399 @@ -1428,6 +1428,7 @@ out:
4400
4401 #ifdef CONFIG_MLX5_CORE_IPOIB
4402
4403 +#define MLX5_IB_GRH_SGID_OFFSET 8
4404 #define MLX5_IB_GRH_DGID_OFFSET 24
4405 #define MLX5_GID_SIZE 16
4406
4407 @@ -1441,6 +1442,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
4408 struct net_device *netdev;
4409 struct mlx5e_priv *priv;
4410 char *pseudo_header;
4411 + u32 flags_rqpn;
4412 u32 qpn;
4413 u8 *dgid;
4414 u8 g;
4415 @@ -1462,7 +1464,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
4416 tstamp = &priv->tstamp;
4417 stats = &priv->channel_stats[rq->ix].rq;
4418
4419 - g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
4420 + flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
4421 + g = (flags_rqpn >> 28) & 3;
4422 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
4423 if ((!g) || dgid[0] != 0xff)
4424 skb->pkt_type = PACKET_HOST;
4425 @@ -1471,9 +1474,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
4426 else
4427 skb->pkt_type = PACKET_MULTICAST;
4428
4429 - /* TODO: IB/ipoib: Allow mcast packets from other VFs
4430 - * 68996a6e760e5c74654723eeb57bf65628ae87f4
4431 + /* Drop packets that this interface sent, ie multicast packets
4432 + * that the HCA has replicated.
4433 */
4434 + if (g && (qpn == (flags_rqpn & 0xffffff)) &&
4435 + (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
4436 + MLX5_GID_SIZE) == 0)) {
4437 + skb->dev = NULL;
4438 + return;
4439 + }
4440
4441 skb_pull(skb, MLX5_IB_GRH_BYTES);
4442
4443 diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
4444 index 544012a67221..1d59ef367a85 100644
4445 --- a/drivers/net/ethernet/nxp/lpc_eth.c
4446 +++ b/drivers/net/ethernet/nxp/lpc_eth.c
4447 @@ -815,7 +815,8 @@ static int lpc_mii_init(struct netdata_local *pldat)
4448 if (mdiobus_register(pldat->mii_bus))
4449 goto err_out_unregister_bus;
4450
4451 - if (lpc_mii_probe(pldat->ndev) != 0)
4452 + err = lpc_mii_probe(pldat->ndev);
4453 + if (err)
4454 goto err_out_unregister_bus;
4455
4456 return 0;
4457 diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
4458 index 1f27f9866b80..61b5aa3e5b98 100644
4459 --- a/drivers/net/ethernet/qlogic/qede/qede.h
4460 +++ b/drivers/net/ethernet/qlogic/qede/qede.h
4461 @@ -574,12 +574,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
4462 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
4463 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
4464 #define NUM_RX_BDS_MIN 128
4465 +#define NUM_RX_BDS_KDUMP_MIN 63
4466 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
4467
4468 #define TX_RING_SIZE_POW 13
4469 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
4470 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
4471 #define NUM_TX_BDS_MIN 128
4472 +#define NUM_TX_BDS_KDUMP_MIN 63
4473 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
4474
4475 #define QEDE_MIN_PKT_LEN 64
4476 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
4477 index ba53612ae0df..1da6b5bda80a 100644
4478 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c
4479 +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
4480 @@ -29,6 +29,7 @@
4481 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
4482 * SOFTWARE.
4483 */
4484 +#include <linux/crash_dump.h>
4485 #include <linux/module.h>
4486 #include <linux/pci.h>
4487 #include <linux/version.h>
4488 @@ -707,8 +708,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
4489 edev->dp_module = dp_module;
4490 edev->dp_level = dp_level;
4491 edev->ops = qed_ops;
4492 - edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
4493 - edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
4494 +
4495 + if (is_kdump_kernel()) {
4496 + edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
4497 + edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
4498 + } else {
4499 + edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
4500 + edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
4501 + }
4502
4503 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
4504 info->num_queues, info->num_queues);
4505 diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
4506 index 38b7f6d35759..702fdc393da0 100644
4507 --- a/drivers/net/ethernet/ti/davinci_mdio.c
4508 +++ b/drivers/net/ethernet/ti/davinci_mdio.c
4509 @@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
4510 data->dev = dev;
4511
4512 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4513 + if (!res)
4514 + return -EINVAL;
4515 data->regs = devm_ioremap(dev, res->start, resource_size(res));
4516 if (!data->regs)
4517 return -ENOMEM;
4518 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
4519 index 0ce1004a8d0d..9d3209ae41cf 100644
4520 --- a/drivers/net/macvlan.c
4521 +++ b/drivers/net/macvlan.c
4522 @@ -447,6 +447,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
4523 int ret;
4524 rx_handler_result_t handle_res;
4525
4526 + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
4527 + if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
4528 + return RX_HANDLER_PASS;
4529 +
4530 port = macvlan_port_get_rcu(skb->dev);
4531 if (is_multicast_ether_addr(eth->h_dest)) {
4532 unsigned int hash;
4533 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
4534 index 9f3c839f9e5f..88cfd63f08a6 100644
4535 --- a/drivers/net/veth.c
4536 +++ b/drivers/net/veth.c
4537 @@ -510,13 +510,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
4538 struct veth_xdp_tx_bq *bq)
4539 {
4540 void *hard_start = frame->data - frame->headroom;
4541 - void *head = hard_start - sizeof(struct xdp_frame);
4542 int len = frame->len, delta = 0;
4543 struct xdp_frame orig_frame;
4544 struct bpf_prog *xdp_prog;
4545 unsigned int headroom;
4546 struct sk_buff *skb;
4547
4548 + /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
4549 + hard_start -= sizeof(struct xdp_frame);
4550 +
4551 rcu_read_lock();
4552 xdp_prog = rcu_dereference(rq->xdp_prog);
4553 if (likely(xdp_prog)) {
4554 @@ -538,7 +540,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
4555 break;
4556 case XDP_TX:
4557 orig_frame = *frame;
4558 - xdp.data_hard_start = head;
4559 xdp.rxq->mem = frame->mem;
4560 if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
4561 trace_xdp_exception(rq->dev, xdp_prog, act);
4562 @@ -550,7 +551,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
4563 goto xdp_xmit;
4564 case XDP_REDIRECT:
4565 orig_frame = *frame;
4566 - xdp.data_hard_start = head;
4567 xdp.rxq->mem = frame->mem;
4568 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
4569 frame = &orig_frame;
4570 @@ -572,7 +572,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
4571 rcu_read_unlock();
4572
4573 headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
4574 - skb = veth_build_skb(head, headroom, len, 0);
4575 + skb = veth_build_skb(hard_start, headroom, len, 0);
4576 if (!skb) {
4577 xdp_return_frame(frame);
4578 goto err;
4579 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
4580 index 0a38c76688ab..5e2571d23ab9 100644
4581 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
4582 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
4583 @@ -702,6 +702,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
4584 *hfunc = ETH_RSS_HASH_TOP;
4585 if (!p)
4586 return 0;
4587 + if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
4588 + return 0;
4589 while (n--)
4590 p[n] = rssConf->indTable[n];
4591 return 0;
4592 diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
4593 index 30c080094af1..bd5fa4dbab9c 100644
4594 --- a/drivers/net/wireless/ath/ath10k/htt.h
4595 +++ b/drivers/net/wireless/ath/ath10k/htt.h
4596 @@ -2033,6 +2033,7 @@ struct ath10k_htt_tx_ops {
4597 int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
4598 u8 max_subfrms_ampdu,
4599 u8 max_subfrms_amsdu);
4600 + void (*htt_flush_tx)(struct ath10k_htt *htt);
4601 };
4602
4603 static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
4604 @@ -2072,6 +2073,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt,
4605 return htt->tx_ops->htt_tx(htt, txmode, msdu);
4606 }
4607
4608 +static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
4609 +{
4610 + if (htt->tx_ops->htt_flush_tx)
4611 + htt->tx_ops->htt_flush_tx(htt);
4612 +}
4613 +
4614 static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
4615 {
4616 if (!htt->tx_ops->htt_alloc_txbuff)
4617 diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
4618 index a182c0944cc7..735482877a1f 100644
4619 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c
4620 +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
4621 @@ -529,9 +529,14 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
4622 htt->tx_mem_allocated = false;
4623 }
4624
4625 -void ath10k_htt_tx_stop(struct ath10k_htt *htt)
4626 +static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
4627 {
4628 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
4629 +}
4630 +
4631 +void ath10k_htt_tx_stop(struct ath10k_htt *htt)
4632 +{
4633 + ath10k_htt_flush_tx_queue(htt);
4634 idr_destroy(&htt->pending_tx);
4635 }
4636
4637 @@ -1774,6 +1779,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
4638 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
4639 .htt_tx = ath10k_htt_tx_hl,
4640 .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
4641 + .htt_flush_tx = ath10k_htt_flush_tx_queue,
4642 };
4643
4644 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
4645 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
4646 index 36d24ea126a2..d373602a8014 100644
4647 --- a/drivers/net/wireless/ath/ath10k/mac.c
4648 +++ b/drivers/net/wireless/ath/ath10k/mac.c
4649 @@ -3911,6 +3911,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
4650 if (ret) {
4651 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
4652 ret);
4653 + /* remove this msdu from idr tracking */
4654 + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
4655 +
4656 dma_unmap_single(ar->dev, paddr, skb->len,
4657 DMA_TO_DEVICE);
4658 ieee80211_free_txskb(ar->hw, skb);
4659 @@ -7082,6 +7085,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4660 ath10k_wmi_peer_flush(ar, arvif->vdev_id,
4661 arvif->bssid, bitmap);
4662 }
4663 + ath10k_htt_flush_tx(&ar->htt);
4664 }
4665 return;
4666 }
4667 @@ -8811,7 +8815,6 @@ int ath10k_mac_register(struct ath10k *ar)
4668 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
4669
4670 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
4671 - ar->hw->wiphy->max_sched_scan_reqs = 1;
4672 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
4673 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
4674 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
4675 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
4676 index 0a727502d14c..fd49d3419e79 100644
4677 --- a/drivers/net/wireless/ath/ath10k/pci.c
4678 +++ b/drivers/net/wireless/ath/ath10k/pci.c
4679 @@ -2074,6 +2074,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
4680 ath10k_pci_irq_sync(ar);
4681 napi_synchronize(&ar->napi);
4682 napi_disable(&ar->napi);
4683 + cancel_work_sync(&ar_pci->dump_work);
4684
4685 /* Most likely the device has HTT Rx ring configured. The only way to
4686 * prevent the device from accessing (and possible corrupting) host
4687 diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
4688 index 39abf8b12903..f46b9083bbf1 100644
4689 --- a/drivers/net/wireless/ath/ath10k/txrx.c
4690 +++ b/drivers/net/wireless/ath/ath10k/txrx.c
4691 @@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
4692 wake_up(&htt->empty_tx_wq);
4693 spin_unlock_bh(&htt->tx_lock);
4694
4695 + rcu_read_lock();
4696 if (txq && txq->sta && skb_cb->airtime_est)
4697 ieee80211_sta_register_airtime(txq->sta, txq->tid,
4698 skb_cb->airtime_est, 0);
4699 + rcu_read_unlock();
4700
4701 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
4702 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4703 diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
4704 index 1491c25518bb..edccabc667e8 100644
4705 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
4706 +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
4707 @@ -133,6 +133,7 @@ struct wmi_ops {
4708 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
4709 struct sk_buff *skb,
4710 dma_addr_t paddr);
4711 + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
4712 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
4713 u32 log_level);
4714 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
4715 @@ -441,6 +442,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
4716 return ar->wmi.ops->get_txbf_conf_scheme(ar);
4717 }
4718
4719 +static inline int
4720 +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
4721 +{
4722 + if (!ar->wmi.ops->cleanup_mgmt_tx_send)
4723 + return -EOPNOTSUPP;
4724 +
4725 + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
4726 +}
4727 +
4728 static inline int
4729 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
4730 dma_addr_t paddr)
4731 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4732 index eb0c963d9fd5..9d5b9df29c35 100644
4733 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4734 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4735 @@ -2837,6 +2837,18 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
4736 return skb;
4737 }
4738
4739 +static int
4740 +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
4741 + struct sk_buff *msdu)
4742 +{
4743 + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
4744 + struct ath10k_wmi *wmi = &ar->wmi;
4745 +
4746 + idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
4747 +
4748 + return 0;
4749 +}
4750 +
4751 static int
4752 ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
4753 dma_addr_t paddr)
4754 @@ -2911,6 +2923,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
4755 if (desc_id < 0)
4756 goto err_free_skb;
4757
4758 + cb->msdu_id = desc_id;
4759 +
4760 ptr = (void *)skb->data;
4761 tlv = ptr;
4762 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
4763 @@ -4339,6 +4353,7 @@ static const struct wmi_ops wmi_tlv_ops = {
4764 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4765 /* .gen_mgmt_tx = not implemented; HTT is used */
4766 .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4767 + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4768 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4769 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4770 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4771 diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
4772 index 51934d191f33..1ab09e1c9ec5 100644
4773 --- a/drivers/net/wireless/ath/carl9170/fw.c
4774 +++ b/drivers/net/wireless/ath/carl9170/fw.c
4775 @@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
4776 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
4777
4778 if (SUPP(CARL9170FW_WLANTX_CAB)) {
4779 - if_comb_types |=
4780 - BIT(NL80211_IFTYPE_AP) |
4781 - BIT(NL80211_IFTYPE_P2P_GO);
4782 + if_comb_types |= BIT(NL80211_IFTYPE_AP);
4783
4784 #ifdef CONFIG_MAC80211_MESH
4785 if_comb_types |=
4786 diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
4787 index 40a8054f8aa6..21ca62b06214 100644
4788 --- a/drivers/net/wireless/ath/carl9170/main.c
4789 +++ b/drivers/net/wireless/ath/carl9170/main.c
4790 @@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar,
4791 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
4792 (vif->type != NL80211_IFTYPE_AP));
4793
4794 - /* While the driver supports HW offload in a single
4795 - * P2P client configuration, it doesn't support HW
4796 - * offload in the favourit, concurrent P2P GO+CLIENT
4797 - * configuration. Hence, HW offload will always be
4798 - * disabled for P2P.
4799 + /* The driver used to have P2P GO+CLIENT support,
4800 + * but since this was dropped and we don't know if
4801 + * there are any gremlins lurking in the shadows,
4802 + * so best we keep HW offload disabled for P2P.
4803 */
4804 ar->disable_offload |= vif->p2p;
4805
4806 @@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
4807 if (vif->type == NL80211_IFTYPE_STATION)
4808 break;
4809
4810 - /* P2P GO [master] use-case
4811 - * Because the P2P GO station is selected dynamically
4812 - * by all participating peers of a WIFI Direct network,
4813 - * the driver has be able to change the main interface
4814 - * operating mode on the fly.
4815 - */
4816 - if (main_vif->p2p && vif->p2p &&
4817 - vif->type == NL80211_IFTYPE_AP) {
4818 - old_main = main_vif;
4819 - break;
4820 - }
4821 -
4822 err = -EBUSY;
4823 rcu_read_unlock();
4824
4825 diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
4826 index 79998a3ddb7a..ad051f34e65b 100644
4827 --- a/drivers/net/wireless/ath/wcn36xx/main.c
4828 +++ b/drivers/net/wireless/ath/wcn36xx/main.c
4829 @@ -1341,7 +1341,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
4830 if (addr && ret != ETH_ALEN) {
4831 wcn36xx_err("invalid local-mac-address\n");
4832 ret = -EINVAL;
4833 - goto out_wq;
4834 + goto out_destroy_ept;
4835 } else if (addr) {
4836 wcn36xx_info("mac address: %pM\n", addr);
4837 SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
4838 @@ -1349,7 +1349,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
4839
4840 ret = wcn36xx_platform_get_resources(wcn, pdev);
4841 if (ret)
4842 - goto out_wq;
4843 + goto out_destroy_ept;
4844
4845 wcn36xx_init_ieee80211(wcn);
4846 ret = ieee80211_register_hw(wcn->hw);
4847 @@ -1361,6 +1361,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
4848 out_unmap:
4849 iounmap(wcn->ccu_base);
4850 iounmap(wcn->dxe_base);
4851 +out_destroy_ept:
4852 + rpmsg_destroy_ept(wcn->smd_channel);
4853 out_wq:
4854 ieee80211_free_hw(hw);
4855 out_err:
4856 diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
4857 index b85603e91c7a..3432dfe1ddb4 100644
4858 --- a/drivers/net/wireless/broadcom/b43/main.c
4859 +++ b/drivers/net/wireless/broadcom/b43/main.c
4860 @@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
4861 /* fill hw info */
4862 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
4863 ieee80211_hw_set(hw, SIGNAL_DBM);
4864 -
4865 + ieee80211_hw_set(hw, MFP_CAPABLE);
4866 hw->wiphy->interface_modes =
4867 BIT(NL80211_IFTYPE_AP) |
4868 BIT(NL80211_IFTYPE_MESH_POINT) |
4869 diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
4870 index 8b6b657c4b85..5208a39fd6f7 100644
4871 --- a/drivers/net/wireless/broadcom/b43legacy/main.c
4872 +++ b/drivers/net/wireless/broadcom/b43legacy/main.c
4873 @@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
4874 /* fill hw info */
4875 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
4876 ieee80211_hw_set(hw, SIGNAL_DBM);
4877 + ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */
4878
4879 hw->wiphy->interface_modes =
4880 BIT(NL80211_IFTYPE_AP) |
4881 diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
4882 index e9b23c2e5bd4..efd63f4ce74f 100644
4883 --- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
4884 +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
4885 @@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
4886 default:
4887 b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
4888 chanstat);
4889 + goto drop;
4890 }
4891
4892 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
4893 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
4894 index 2c3526aeca6f..545015610cf8 100644
4895 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
4896 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
4897 @@ -283,13 +283,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
4898 if (!err)
4899 ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
4900
4901 + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
4902 +
4903 if (drvr->settings->feature_disable) {
4904 brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
4905 ifp->drvr->feat_flags,
4906 drvr->settings->feature_disable);
4907 ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
4908 }
4909 - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
4910
4911 brcmf_feat_firmware_overrides(drvr);
4912
4913 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
4914 index ad18c2f1a806..524f9dd2323d 100644
4915 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
4916 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
4917 @@ -5,10 +5,9 @@
4918 *
4919 * GPL LICENSE SUMMARY
4920 *
4921 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
4922 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
4923 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
4924 - * Copyright(c) 2018 - 2019 Intel Corporation
4925 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
4926 *
4927 * This program is free software; you can redistribute it and/or modify
4928 * it under the terms of version 2 of the GNU General Public License as
4929 @@ -28,10 +27,9 @@
4930 *
4931 * BSD LICENSE
4932 *
4933 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
4934 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
4935 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
4936 - * Copyright(c) 2018 - 2019 Intel Corporation
4937 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
4938 * All rights reserved.
4939 *
4940 * Redistribution and use in source and binary forms, with or without
4941 @@ -478,6 +476,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
4942 if (kstrtou16(buf, 0, &amsdu_len))
4943 return -EINVAL;
4944
4945 + /* only change from debug set <-> debug unset */
4946 + if ((amsdu_len && mvmsta->orig_amsdu_len) ||
4947 + (!!amsdu_len && mvmsta->orig_amsdu_len))
4948 + return -EBUSY;
4949 +
4950 if (amsdu_len) {
4951 mvmsta->orig_amsdu_len = sta->max_amsdu_len;
4952 sta->max_amsdu_len = amsdu_len;
4953 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4954 index 6ca087ffd163..ed92a8e8cd51 100644
4955 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4956 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4957 @@ -1193,14 +1193,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
4958 */
4959 flush_work(&mvm->roc_done_wk);
4960
4961 + iwl_mvm_rm_aux_sta(mvm);
4962 +
4963 iwl_mvm_stop_device(mvm);
4964
4965 iwl_mvm_async_handlers_purge(mvm);
4966 /* async_handlers_list is empty and will stay empty: HW is stopped */
4967
4968 - /* the fw is stopped, the aux sta is dead: clean up driver state */
4969 - iwl_mvm_del_aux_sta(mvm);
4970 -
4971 /*
4972 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
4973 * hw (as restart_complete() won't be called in this case) and mac80211
4974 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4975 index 5b2bd603febf..be8bc0601d7b 100644
4976 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4977 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
4978 @@ -367,14 +367,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
4979 u16 size = le32_to_cpu(notif->amsdu_size);
4980 int i;
4981
4982 - /*
4983 - * In debug sta->max_amsdu_len < size
4984 - * so also check with orig_amsdu_len which holds the original
4985 - * data before debugfs changed the value
4986 - */
4987 - if (WARN_ON(sta->max_amsdu_len < size &&
4988 - mvmsta->orig_amsdu_len < size))
4989 + if (sta->max_amsdu_len < size) {
4990 + /*
4991 + * In debug sta->max_amsdu_len < size
4992 + * so also check with orig_amsdu_len which holds the
4993 + * original data before debugfs changed the value
4994 + */
4995 + WARN_ON(mvmsta->orig_amsdu_len < size);
4996 goto out;
4997 + }
4998
4999 mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
5000 mvmsta->max_amsdu_len = size;
5001 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
5002 index 71d339e90a9e..41f62793a57c 100644
5003 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
5004 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
5005 @@ -2080,16 +2080,24 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
5006 return ret;
5007 }
5008
5009 -void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
5010 +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
5011 {
5012 - iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
5013 -}
5014 + int ret;
5015
5016 -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
5017 -{
5018 lockdep_assert_held(&mvm->mutex);
5019
5020 + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
5021 + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
5022 + if (ret)
5023 + IWL_WARN(mvm, "Failed sending remove station\n");
5024 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
5025 +
5026 + return ret;
5027 +}
5028 +
5029 +void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
5030 +{
5031 + iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
5032 }
5033
5034 /*
5035 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
5036 index 8d70093847cb..da2d1ac01229 100644
5037 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
5038 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
5039 @@ -8,7 +8,7 @@
5040 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
5041 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5042 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
5043 - * Copyright(c) 2018 - 2019 Intel Corporation
5044 + * Copyright(c) 2018 - 2020 Intel Corporation
5045 *
5046 * This program is free software; you can redistribute it and/or modify
5047 * it under the terms of version 2 of the GNU General Public License as
5048 @@ -31,7 +31,7 @@
5049 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
5050 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5051 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
5052 - * Copyright(c) 2018 - 2019 Intel Corporation
5053 + * Copyright(c) 2018 - 2020 Intel Corporation
5054 * All rights reserved.
5055 *
5056 * Redistribution and use in source and binary forms, with or without
5057 @@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
5058 int tid, u8 queue, bool start);
5059
5060 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
5061 -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
5062 +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
5063
5064 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
5065 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
5066 diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
5067 index 25ac9db35dbf..bedc09215088 100644
5068 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
5069 +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
5070 @@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf)
5071
5072 lbtf_deb_enter(LBTF_DEB_MAIN);
5073
5074 - if_usb_reset_device(priv);
5075 -
5076 - if (priv)
5077 + if (priv) {
5078 + if_usb_reset_device(priv);
5079 lbtf_remove_card(priv);
5080 + }
5081
5082 /* Unlink and free urb */
5083 if_usb_free(cardp);
5084 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5085 index d89684168500..9e6dc289ec3e 100644
5086 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5087 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5088 @@ -1496,7 +1496,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
5089 int idx, u8 *mac, struct station_info *sinfo)
5090 {
5091 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
5092 - static struct mwifiex_sta_node *node;
5093 + struct mwifiex_sta_node *node;
5094 + int i;
5095
5096 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
5097 priv->media_connected && idx == 0) {
5098 @@ -1506,13 +1507,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
5099 mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST,
5100 HostCmd_ACT_GEN_GET, 0, NULL, true);
5101
5102 - if (node && (&node->list == &priv->sta_list)) {
5103 - node = NULL;
5104 - return -ENOENT;
5105 - }
5106 -
5107 - node = list_prepare_entry(node, &priv->sta_list, list);
5108 - list_for_each_entry_continue(node, &priv->sta_list, list) {
5109 + i = 0;
5110 + list_for_each_entry(node, &priv->sta_list, list) {
5111 + if (i++ != idx)
5112 + continue;
5113 ether_addr_copy(mac, node->mac_addr);
5114 return mwifiex_dump_station_info(priv, node, sinfo);
5115 }
5116 diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
5117 index 8f3d36a15e17..cbff0dfc9631 100644
5118 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
5119 +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
5120 @@ -143,8 +143,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
5121 struct ieee80211_sta *sta;
5122 struct mt76_rx_tid *tid;
5123 bool sn_less;
5124 - u16 seqno, head, size;
5125 - u8 ackp, idx;
5126 + u16 seqno, head, size, idx;
5127 + u8 ackp;
5128
5129 __skb_queue_tail(frames, skb);
5130
5131 @@ -230,7 +230,7 @@ out:
5132 }
5133
5134 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
5135 - u16 ssn, u8 size)
5136 + u16 ssn, u16 size)
5137 {
5138 struct mt76_rx_tid *tid;
5139
5140 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
5141
5142 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
5143 {
5144 - u8 size = tid->size;
5145 + u16 size = tid->size;
5146 int i;
5147
5148 cancel_delayed_work(&tid->reorder_work);
5149 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
5150 index 502814c26b33..52a16b42dfd7 100644
5151 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
5152 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
5153 @@ -240,8 +240,8 @@ struct mt76_rx_tid {
5154 struct delayed_work reorder_work;
5155
5156 u16 head;
5157 - u8 size;
5158 - u8 nframes;
5159 + u16 size;
5160 + u16 nframes;
5161
5162 u8 started:1, stopped:1, timer_pending:1;
5163
5164 @@ -723,7 +723,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
5165 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
5166
5167 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
5168 - u16 ssn, u8 size);
5169 + u16 ssn, u16 size);
5170 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
5171
5172 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
5173 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
5174 index 348b0072cdd6..c66c6dc00378 100644
5175 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
5176 +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
5177 @@ -881,10 +881,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
5178
5179 WARN_ON(NULL == skb);
5180 _urb = usb_alloc_urb(0, GFP_ATOMIC);
5181 - if (!_urb) {
5182 - kfree_skb(skb);
5183 + if (!_urb)
5184 return NULL;
5185 - }
5186 _rtl_install_trx_info(rtlusb, skb, ep_num);
5187 usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
5188 ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
5189 @@ -898,7 +896,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
5190 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
5191 u32 ep_num;
5192 struct urb *_urb = NULL;
5193 - struct sk_buff *_skb = NULL;
5194
5195 WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
5196 if (unlikely(IS_USB_STOP(rtlusb))) {
5197 @@ -907,8 +904,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
5198 return;
5199 }
5200 ep_num = rtlusb->ep_map.ep_mapping[qnum];
5201 - _skb = skb;
5202 - _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
5203 + _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num);
5204 if (unlikely(!_urb)) {
5205 pr_err("Can't allocate urb. Drop skb!\n");
5206 kfree_skb(skb);
5207 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
5208 index 77a2bdee50fa..4a43c4fa716d 100644
5209 --- a/drivers/net/wireless/realtek/rtw88/pci.c
5210 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
5211 @@ -974,6 +974,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
5212 len = pci_resource_len(pdev, bar_id);
5213 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
5214 if (!rtwpci->mmap) {
5215 + pci_release_regions(pdev);
5216 rtw_err(rtwdev, "failed to map pci memory\n");
5217 return -ENOMEM;
5218 }
5219 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5220 index f0e0af3aa714..d4b388793f40 100644
5221 --- a/drivers/nvme/host/core.c
5222 +++ b/drivers/nvme/host/core.c
5223 @@ -1032,6 +1032,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
5224 }
5225 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
5226
5227 +/*
5228 + * In NVMe 1.0 the CNS field was just a binary controller or namespace
5229 + * flag, thus sending any new CNS opcodes has a big chance of not working.
5230 + * Qemu unfortunately had that bug after reporting a 1.1 version compliance
5231 + * (but not for any later version).
5232 + */
5233 +static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
5234 +{
5235 + if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
5236 + return ctrl->vs < NVME_VS(1, 2, 0);
5237 + return ctrl->vs < NVME_VS(1, 1, 0);
5238 +}
5239 +
5240 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
5241 {
5242 struct nvme_command c = { };
5243 @@ -3740,8 +3753,7 @@ static void nvme_scan_work(struct work_struct *work)
5244
5245 mutex_lock(&ctrl->scan_lock);
5246 nn = le32_to_cpu(id->nn);
5247 - if (ctrl->vs >= NVME_VS(1, 1, 0) &&
5248 - !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
5249 + if (!nvme_ctrl_limited_cns(ctrl)) {
5250 if (!nvme_scan_ns_list(ctrl, nn))
5251 goto out_free_id;
5252 }
5253 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5254 index cd64ddb129e5..1c2129493508 100644
5255 --- a/drivers/nvme/host/pci.c
5256 +++ b/drivers/nvme/host/pci.c
5257 @@ -128,6 +128,9 @@ struct nvme_dev {
5258 dma_addr_t host_mem_descs_dma;
5259 struct nvme_host_mem_buf_desc *host_mem_descs;
5260 void **host_mem_desc_bufs;
5261 + unsigned int nr_allocated_queues;
5262 + unsigned int nr_write_queues;
5263 + unsigned int nr_poll_queues;
5264 };
5265
5266 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
5267 @@ -210,25 +213,14 @@ struct nvme_iod {
5268 struct scatterlist *sg;
5269 };
5270
5271 -static unsigned int max_io_queues(void)
5272 +static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
5273 {
5274 - return num_possible_cpus() + write_queues + poll_queues;
5275 -}
5276 -
5277 -static unsigned int max_queue_count(void)
5278 -{
5279 - /* IO queues + admin queue */
5280 - return 1 + max_io_queues();
5281 -}
5282 -
5283 -static inline unsigned int nvme_dbbuf_size(u32 stride)
5284 -{
5285 - return (max_queue_count() * 8 * stride);
5286 + return dev->nr_allocated_queues * 8 * dev->db_stride;
5287 }
5288
5289 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
5290 {
5291 - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
5292 + unsigned int mem_size = nvme_dbbuf_size(dev);
5293
5294 if (dev->dbbuf_dbs)
5295 return 0;
5296 @@ -253,7 +245,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
5297
5298 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
5299 {
5300 - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
5301 + unsigned int mem_size = nvme_dbbuf_size(dev);
5302
5303 if (dev->dbbuf_dbs) {
5304 dma_free_coherent(dev->dev, mem_size,
5305 @@ -2030,7 +2022,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
5306 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
5307 {
5308 struct nvme_dev *dev = affd->priv;
5309 - unsigned int nr_read_queues;
5310 + unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
5311
5312 /*
5313 * If there is no interupt available for queues, ensure that
5314 @@ -2046,12 +2038,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
5315 if (!nrirqs) {
5316 nrirqs = 1;
5317 nr_read_queues = 0;
5318 - } else if (nrirqs == 1 || !write_queues) {
5319 + } else if (nrirqs == 1 || !nr_write_queues) {
5320 nr_read_queues = 0;
5321 - } else if (write_queues >= nrirqs) {
5322 + } else if (nr_write_queues >= nrirqs) {
5323 nr_read_queues = 1;
5324 } else {
5325 - nr_read_queues = nrirqs - write_queues;
5326 + nr_read_queues = nrirqs - nr_write_queues;
5327 }
5328
5329 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
5330 @@ -2075,7 +2067,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
5331 * Poll queues don't need interrupts, but we need at least one IO
5332 * queue left over for non-polled IO.
5333 */
5334 - this_p_queues = poll_queues;
5335 + this_p_queues = dev->nr_poll_queues;
5336 if (this_p_queues >= nr_io_queues) {
5337 this_p_queues = nr_io_queues - 1;
5338 irq_queues = 1;
5339 @@ -2105,14 +2097,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
5340 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
5341 }
5342
5343 +static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
5344 +{
5345 + return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
5346 +}
5347 +
5348 static int nvme_setup_io_queues(struct nvme_dev *dev)
5349 {
5350 struct nvme_queue *adminq = &dev->queues[0];
5351 struct pci_dev *pdev = to_pci_dev(dev->dev);
5352 - int result, nr_io_queues;
5353 + unsigned int nr_io_queues;
5354 unsigned long size;
5355 + int result;
5356
5357 - nr_io_queues = max_io_queues();
5358 + /*
5359 + * Sample the module parameters once at reset time so that we have
5360 + * stable values to work with.
5361 + */
5362 + dev->nr_write_queues = write_queues;
5363 + dev->nr_poll_queues = poll_queues;
5364
5365 /*
5366 * If tags are shared with admin queue (Apple bug), then
5367 @@ -2120,6 +2123,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
5368 */
5369 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
5370 nr_io_queues = 1;
5371 + else
5372 + nr_io_queues = min(nvme_max_io_queues(dev),
5373 + dev->nr_allocated_queues - 1);
5374
5375 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
5376 if (result < 0)
5377 @@ -2794,8 +2800,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5378 if (!dev)
5379 return -ENOMEM;
5380
5381 - dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
5382 - GFP_KERNEL, node);
5383 + dev->nr_write_queues = write_queues;
5384 + dev->nr_poll_queues = poll_queues;
5385 + dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
5386 + dev->queues = kcalloc_node(dev->nr_allocated_queues,
5387 + sizeof(struct nvme_queue), GFP_KERNEL, node);
5388 if (!dev->queues)
5389 goto free;
5390
5391 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
5392 index 11e84ed4de36..7900814355c2 100644
5393 --- a/drivers/nvme/host/tcp.c
5394 +++ b/drivers/nvme/host/tcp.c
5395 @@ -784,11 +784,11 @@ static void nvme_tcp_data_ready(struct sock *sk)
5396 {
5397 struct nvme_tcp_queue *queue;
5398
5399 - read_lock(&sk->sk_callback_lock);
5400 + read_lock_bh(&sk->sk_callback_lock);
5401 queue = sk->sk_user_data;
5402 if (likely(queue && queue->rd_enabled))
5403 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
5404 - read_unlock(&sk->sk_callback_lock);
5405 + read_unlock_bh(&sk->sk_callback_lock);
5406 }
5407
5408 static void nvme_tcp_write_space(struct sock *sk)
5409 diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
5410 index a35d3f3996d7..afc1a3d240b5 100644
5411 --- a/drivers/pci/controller/vmd.c
5412 +++ b/drivers/pci/controller/vmd.c
5413 @@ -854,6 +854,8 @@ static const struct pci_device_id vmd_ids[] = {
5414 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
5415 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
5416 VMD_FEAT_HAS_BUS_RESTRICTIONS,},
5417 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
5418 + .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
5419 {0,}
5420 };
5421 MODULE_DEVICE_TABLE(pci, vmd_ids);
5422 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
5423 index d3033873395d..83d909abc61d 100644
5424 --- a/drivers/pci/probe.c
5425 +++ b/drivers/pci/probe.c
5426 @@ -1777,7 +1777,7 @@ int pci_setup_device(struct pci_dev *dev)
5427 /* Device class may be changed after fixup */
5428 class = dev->class >> 8;
5429
5430 - if (dev->non_compliant_bars) {
5431 + if (dev->non_compliant_bars && !dev->mmio_always_on) {
5432 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5433 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
5434 pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
5435 @@ -1889,13 +1889,33 @@ static void pci_configure_mps(struct pci_dev *dev)
5436 struct pci_dev *bridge = pci_upstream_bridge(dev);
5437 int mps, mpss, p_mps, rc;
5438
5439 - if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
5440 + if (!pci_is_pcie(dev))
5441 return;
5442
5443 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
5444 if (dev->is_virtfn)
5445 return;
5446
5447 + /*
5448 + * For Root Complex Integrated Endpoints, program the maximum
5449 + * supported value unless limited by the PCIE_BUS_PEER2PEER case.
5450 + */
5451 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
5452 + if (pcie_bus_config == PCIE_BUS_PEER2PEER)
5453 + mps = 128;
5454 + else
5455 + mps = 128 << dev->pcie_mpss;
5456 + rc = pcie_set_mps(dev, mps);
5457 + if (rc) {
5458 + pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
5459 + mps);
5460 + }
5461 + return;
5462 + }
5463 +
5464 + if (!bridge || !pci_is_pcie(bridge))
5465 + return;
5466 +
5467 mps = pcie_get_mps(dev);
5468 p_mps = pcie_get_mps(bridge);
5469
5470 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5471 index 798e52051ecc..a1ec8a1977d3 100644
5472 --- a/drivers/pci/quirks.c
5473 +++ b/drivers/pci/quirks.c
5474 @@ -4683,6 +4683,20 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
5475 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
5476 }
5477
5478 +static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
5479 +{
5480 + /*
5481 + * Intel RCiEP's are required to allow p2p only on translated
5482 + * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
5483 + * "Root-Complex Peer to Peer Considerations".
5484 + */
5485 + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
5486 + return -ENOTTY;
5487 +
5488 + return pci_acs_ctrl_enabled(acs_flags,
5489 + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
5490 +}
5491 +
5492 static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
5493 {
5494 /*
5495 @@ -4765,6 +4779,7 @@ static const struct pci_dev_acs_enabled {
5496 /* I219 */
5497 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
5498 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
5499 + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
5500 /* QCOM QDF2xxx root ports */
5501 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
5502 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
5503 @@ -5130,13 +5145,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5504 }
5505 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5506
5507 -/* FLR may cause some 82579 devices to hang */
5508 -static void quirk_intel_no_flr(struct pci_dev *dev)
5509 +/*
5510 + * FLR may cause the following to devices to hang:
5511 + *
5512 + * AMD Starship/Matisse HD Audio Controller 0x1487
5513 + * AMD Starship USB 3.0 Host Controller 0x148c
5514 + * AMD Matisse USB 3.0 Host Controller 0x149c
5515 + * Intel 82579LM Gigabit Ethernet Controller 0x1502
5516 + * Intel 82579V Gigabit Ethernet Controller 0x1503
5517 + *
5518 + */
5519 +static void quirk_no_flr(struct pci_dev *dev)
5520 {
5521 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5522 }
5523 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
5524 -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
5525 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
5526 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
5527 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
5528 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
5529 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
5530
5531 static void quirk_no_ext_tags(struct pci_dev *pdev)
5532 {
5533 @@ -5551,6 +5578,19 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5534 }
5535 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5536
5537 +/*
5538 + * Device [12d8:0x400e] and [12d8:0x400f]
5539 + * These devices advertise PME# support in all power states but don't
5540 + * reliably assert it.
5541 + */
5542 +static void pci_fixup_no_pme(struct pci_dev *dev)
5543 +{
5544 + pci_info(dev, "PME# is unreliable, disabling it\n");
5545 + dev->pme_support = 0;
5546 +}
5547 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
5548 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
5549 +
5550 static void apex_pci_fixup_class(struct pci_dev *pdev)
5551 {
5552 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5553 diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
5554 index 2f8787276d9b..3269232ff570 100644
5555 --- a/drivers/perf/arm_smmuv3_pmu.c
5556 +++ b/drivers/perf/arm_smmuv3_pmu.c
5557 @@ -815,7 +815,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
5558 if (err) {
5559 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
5560 err, &res_0->start);
5561 - return err;
5562 + goto out_clear_affinity;
5563 }
5564
5565 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
5566 @@ -834,6 +834,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
5567
5568 out_unregister:
5569 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
5570 +out_clear_affinity:
5571 + irq_set_affinity_hint(smmu_pmu->irq, NULL);
5572 return err;
5573 }
5574
5575 @@ -843,6 +845,7 @@ static int smmu_pmu_remove(struct platform_device *pdev)
5576
5577 perf_pmu_unregister(&smmu_pmu->pmu);
5578 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
5579 + irq_set_affinity_hint(smmu_pmu->irq, NULL);
5580
5581 return 0;
5582 }
5583 diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
5584 index f28063873e11..0d6325d6a4ec 100644
5585 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
5586 +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
5587 @@ -285,7 +285,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = {
5588 HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
5589 HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
5590 HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
5591 - HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d),
5592 + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d),
5593 HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
5594 HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
5595 HISI_PMU_EVENT_ATTR(spill_num, 0x20),
5596 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
5597 index 0599f5127b01..84501c785473 100644
5598 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
5599 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
5600 @@ -40,6 +40,8 @@ struct exynos_irq_chip {
5601 u32 eint_pend;
5602 u32 eint_wake_mask_value;
5603 u32 eint_wake_mask_reg;
5604 + void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
5605 + struct exynos_irq_chip *irq_chip);
5606 };
5607
5608 static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
5609 @@ -265,6 +267,7 @@ struct exynos_eint_gpio_save {
5610 u32 eint_con;
5611 u32 eint_fltcon0;
5612 u32 eint_fltcon1;
5613 + u32 eint_mask;
5614 };
5615
5616 /*
5617 @@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
5618 return 0;
5619 }
5620
5621 +static void
5622 +exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
5623 + struct exynos_irq_chip *irq_chip)
5624 +{
5625 + struct regmap *pmu_regs;
5626 +
5627 + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
5628 + dev_warn(drvdata->dev,
5629 + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
5630 + return;
5631 + }
5632 +
5633 + pmu_regs = drvdata->retention_ctrl->priv;
5634 + dev_info(drvdata->dev,
5635 + "Setting external wakeup interrupt mask: 0x%x\n",
5636 + irq_chip->eint_wake_mask_value);
5637 +
5638 + regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
5639 + irq_chip->eint_wake_mask_value);
5640 +}
5641 +
5642 +static void
5643 +s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
5644 + struct exynos_irq_chip *irq_chip)
5645 +
5646 +{
5647 + void __iomem *clk_base;
5648 +
5649 + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
5650 + dev_warn(drvdata->dev,
5651 + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
5652 + return;
5653 + }
5654 +
5655 +
5656 + clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
5657 +
5658 + __raw_writel(irq_chip->eint_wake_mask_value,
5659 + clk_base + irq_chip->eint_wake_mask_reg);
5660 +}
5661 +
5662 /*
5663 * irq_chip for wakeup interrupts
5664 */
5665 @@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
5666 .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
5667 .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
5668 .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
5669 - /* Only difference with exynos4210_wkup_irq_chip: */
5670 + /* Only differences with exynos4210_wkup_irq_chip: */
5671 .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
5672 + .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
5673 };
5674
5675 static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
5676 @@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
5677 .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
5678 .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
5679 .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
5680 + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
5681 };
5682
5683 static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
5684 @@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
5685 .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
5686 .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
5687 .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
5688 + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
5689 };
5690
5691 /* list of external wakeup controllers supported */
5692 @@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
5693 return 0;
5694 }
5695
5696 -static void
5697 -exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
5698 - struct exynos_irq_chip *irq_chip)
5699 -{
5700 - struct regmap *pmu_regs;
5701 -
5702 - if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
5703 - dev_warn(drvdata->dev,
5704 - "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
5705 - return;
5706 - }
5707 -
5708 - pmu_regs = drvdata->retention_ctrl->priv;
5709 - dev_info(drvdata->dev,
5710 - "Setting external wakeup interrupt mask: 0x%x\n",
5711 - irq_chip->eint_wake_mask_value);
5712 -
5713 - regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
5714 - irq_chip->eint_wake_mask_value);
5715 -}
5716 -
5717 static void exynos_pinctrl_suspend_bank(
5718 struct samsung_pinctrl_drv_data *drvdata,
5719 struct samsung_pin_bank *bank)
5720 @@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank(
5721 + 2 * bank->eint_offset);
5722 save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
5723 + 2 * bank->eint_offset + 4);
5724 + save->eint_mask = readl(regs + bank->irq_chip->eint_mask
5725 + + bank->eint_offset);
5726
5727 pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
5728 pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
5729 pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
5730 + pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
5731 }
5732
5733 void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
5734 @@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
5735 else if (bank->eint_type == EINT_TYPE_WKUP) {
5736 if (!irq_chip) {
5737 irq_chip = bank->irq_chip;
5738 - exynos_pinctrl_set_eint_wakeup_mask(drvdata,
5739 - irq_chip);
5740 + irq_chip->set_eint_wakeup_mask(drvdata,
5741 + irq_chip);
5742 } else if (bank->irq_chip != irq_chip) {
5743 dev_warn(drvdata->dev,
5744 "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
5745 @@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank(
5746 pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
5747 readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
5748 + 2 * bank->eint_offset + 4), save->eint_fltcon1);
5749 + pr_debug("%s: mask %#010x => %#010x\n", bank->name,
5750 + readl(regs + bank->irq_chip->eint_mask
5751 + + bank->eint_offset), save->eint_mask);
5752
5753 writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
5754 + bank->eint_offset);
5755 @@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank(
5756 + 2 * bank->eint_offset);
5757 writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
5758 + 2 * bank->eint_offset + 4);
5759 + writel(save->eint_mask, regs + bank->irq_chip->eint_mask
5760 + + bank->eint_offset);
5761 }
5762
5763 void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
5764 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
5765 index 41e28552b2ce..b1f4a31ba1ee 100644
5766 --- a/drivers/platform/x86/asus-wmi.c
5767 +++ b/drivers/platform/x86/asus-wmi.c
5768 @@ -111,6 +111,8 @@ struct bios_args {
5769 u32 arg0;
5770 u32 arg1;
5771 u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
5772 + u32 arg4;
5773 + u32 arg5;
5774 } __packed;
5775
5776 /*
5777 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
5778 index 74e988f839e8..4c1dd1d4e60b 100644
5779 --- a/drivers/platform/x86/dell-laptop.c
5780 +++ b/drivers/platform/x86/dell-laptop.c
5781 @@ -2204,10 +2204,13 @@ static int __init dell_init(void)
5782
5783 dell_laptop_register_notifier(&dell_laptop_notifier);
5784
5785 - micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
5786 - ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
5787 - if (ret < 0)
5788 - goto fail_led;
5789 + if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
5790 + dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) {
5791 + micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
5792 + ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
5793 + if (ret < 0)
5794 + goto fail_led;
5795 + }
5796
5797 if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
5798 return 0;
5799 diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
5800 index a881b709af25..a44a2ec33287 100644
5801 --- a/drivers/platform/x86/hp-wmi.c
5802 +++ b/drivers/platform/x86/hp-wmi.c
5803 @@ -461,8 +461,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
5804 static ssize_t als_store(struct device *dev, struct device_attribute *attr,
5805 const char *buf, size_t count)
5806 {
5807 - u32 tmp = simple_strtoul(buf, NULL, 10);
5808 - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
5809 + u32 tmp;
5810 + int ret;
5811 +
5812 + ret = kstrtou32(buf, 10, &tmp);
5813 + if (ret)
5814 + return ret;
5815 +
5816 + ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
5817 sizeof(tmp), sizeof(tmp));
5818 if (ret)
5819 return ret < 0 ? ret : -EINVAL;
5820 diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
5821 index ef6d4bd77b1a..7a506c1d0113 100644
5822 --- a/drivers/platform/x86/intel-hid.c
5823 +++ b/drivers/platform/x86/intel-hid.c
5824 @@ -77,6 +77,13 @@ static const struct dmi_system_id button_array_table[] = {
5825 DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"),
5826 },
5827 },
5828 + {
5829 + .ident = "HP Spectre x2 (2015)",
5830 + .matches = {
5831 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
5832 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
5833 + },
5834 + },
5835 { }
5836 };
5837
5838 diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
5839 index b74932307d69..cb2a80fdd8f4 100644
5840 --- a/drivers/platform/x86/intel-vbtn.c
5841 +++ b/drivers/platform/x86/intel-vbtn.c
5842 @@ -39,28 +39,51 @@ static const struct key_entry intel_vbtn_keymap[] = {
5843 { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
5844 { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */
5845 { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */
5846 +};
5847 +
5848 +static const struct key_entry intel_vbtn_switchmap[] = {
5849 { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */
5850 { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */
5851 { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */
5852 { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */
5853 - { KE_END },
5854 };
5855
5856 +#define KEYMAP_LEN \
5857 + (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1)
5858 +
5859 struct intel_vbtn_priv {
5860 + struct key_entry keymap[KEYMAP_LEN];
5861 struct input_dev *input_dev;
5862 + bool has_switches;
5863 bool wakeup_mode;
5864 };
5865
5866 static int intel_vbtn_input_setup(struct platform_device *device)
5867 {
5868 struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
5869 - int ret;
5870 + int ret, keymap_len = 0;
5871 +
5872 + if (true) {
5873 + memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap,
5874 + ARRAY_SIZE(intel_vbtn_keymap) *
5875 + sizeof(struct key_entry));
5876 + keymap_len += ARRAY_SIZE(intel_vbtn_keymap);
5877 + }
5878 +
5879 + if (priv->has_switches) {
5880 + memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap,
5881 + ARRAY_SIZE(intel_vbtn_switchmap) *
5882 + sizeof(struct key_entry));
5883 + keymap_len += ARRAY_SIZE(intel_vbtn_switchmap);
5884 + }
5885 +
5886 + priv->keymap[keymap_len].type = KE_END;
5887
5888 priv->input_dev = devm_input_allocate_device(&device->dev);
5889 if (!priv->input_dev)
5890 return -ENOMEM;
5891
5892 - ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
5893 + ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL);
5894 if (ret)
5895 return ret;
5896
5897 @@ -115,31 +138,40 @@ out_unknown:
5898
5899 static void detect_tablet_mode(struct platform_device *device)
5900 {
5901 - const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
5902 struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
5903 acpi_handle handle = ACPI_HANDLE(&device->dev);
5904 - struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
5905 - union acpi_object *obj;
5906 + unsigned long long vgbs;
5907 acpi_status status;
5908 int m;
5909
5910 - if (!(chassis_type && strcmp(chassis_type, "31") == 0))
5911 - goto out;
5912 -
5913 - status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
5914 + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
5915 if (ACPI_FAILURE(status))
5916 - goto out;
5917 -
5918 - obj = vgbs_output.pointer;
5919 - if (!(obj && obj->type == ACPI_TYPE_INTEGER))
5920 - goto out;
5921 + return;
5922
5923 - m = !(obj->integer.value & TABLET_MODE_FLAG);
5924 + m = !(vgbs & TABLET_MODE_FLAG);
5925 input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
5926 - m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0;
5927 + m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0;
5928 input_report_switch(priv->input_dev, SW_DOCK, m);
5929 -out:
5930 - kfree(vgbs_output.pointer);
5931 +}
5932 +
5933 +static bool intel_vbtn_has_switches(acpi_handle handle)
5934 +{
5935 + const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
5936 + unsigned long long vgbs;
5937 + acpi_status status;
5938 +
5939 + /*
5940 + * Some normal laptops have a VGBS method despite being non-convertible
5941 + * and their VGBS method always returns 0, causing detect_tablet_mode()
5942 + * to report SW_TABLET_MODE=1 to userspace, which causes issues.
5943 + * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report
5944 + * switches on any devices with a DMI chassis_type of 9.
5945 + */
5946 + if (chassis_type && strcmp(chassis_type, "9") == 0)
5947 + return false;
5948 +
5949 + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
5950 + return ACPI_SUCCESS(status);
5951 }
5952
5953 static int intel_vbtn_probe(struct platform_device *device)
5954 @@ -160,13 +192,16 @@ static int intel_vbtn_probe(struct platform_device *device)
5955 return -ENOMEM;
5956 dev_set_drvdata(&device->dev, priv);
5957
5958 + priv->has_switches = intel_vbtn_has_switches(handle);
5959 +
5960 err = intel_vbtn_input_setup(device);
5961 if (err) {
5962 pr_err("Failed to setup Intel Virtual Button\n");
5963 return err;
5964 }
5965
5966 - detect_tablet_mode(device);
5967 + if (priv->has_switches)
5968 + detect_tablet_mode(device);
5969
5970 status = acpi_install_notify_handler(handle,
5971 ACPI_DEVICE_NOTIFY,
5972 diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
5973 index 90cbaa8341e3..0bf9ab8653ae 100644
5974 --- a/drivers/power/reset/vexpress-poweroff.c
5975 +++ b/drivers/power/reset/vexpress-poweroff.c
5976 @@ -143,6 +143,7 @@ static struct platform_driver vexpress_reset_driver = {
5977 .driver = {
5978 .name = "vexpress-reset",
5979 .of_match_table = vexpress_reset_of_match,
5980 + .suppress_bind_attrs = true,
5981 },
5982 };
5983
5984 diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
5985 index 75cf861ba492..2e7e2b73b012 100644
5986 --- a/drivers/power/supply/power_supply_hwmon.c
5987 +++ b/drivers/power/supply/power_supply_hwmon.c
5988 @@ -144,7 +144,7 @@ static int power_supply_hwmon_read_string(struct device *dev,
5989 u32 attr, int channel,
5990 const char **str)
5991 {
5992 - *str = channel ? "temp" : "temp ambient";
5993 + *str = channel ? "temp ambient" : "temp";
5994 return 0;
5995 }
5996
5997 @@ -304,7 +304,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
5998 goto error;
5999 }
6000
6001 - ret = devm_add_action(dev, power_supply_hwmon_bitmap_free,
6002 + ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free,
6003 psyhw->props);
6004 if (ret)
6005 goto error;
6006 diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
6007 index 0246b6f99fb5..f11e4bfbc91b 100644
6008 --- a/drivers/regulator/qcom-rpmh-regulator.c
6009 +++ b/drivers/regulator/qcom-rpmh-regulator.c
6010 @@ -832,11 +832,11 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
6011 RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"),
6012 RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"),
6013 RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
6014 - RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"),
6015 + RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"),
6016 RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
6017 RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
6018 - RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"),
6019 - RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"),
6020 + RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"),
6021 + RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"),
6022 RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
6023 {},
6024 };
6025 @@ -857,7 +857,7 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
6026 RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"),
6027 RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"),
6028 RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"),
6029 - RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"),
6030 + RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8"),
6031 RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"),
6032 RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
6033 RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
6034 diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
6035 index c8ef05d6b8c7..25df4406ce52 100644
6036 --- a/drivers/soc/tegra/Kconfig
6037 +++ b/drivers/soc/tegra/Kconfig
6038 @@ -130,6 +130,7 @@ config SOC_TEGRA_FLOWCTRL
6039
6040 config SOC_TEGRA_PMC
6041 bool
6042 + select GENERIC_PINCONF
6043
6044 config SOC_TEGRA_POWERGATE_BPMP
6045 def_bool y
6046 diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
6047 index 2663bb12d9ce..b07710c76fc9 100644
6048 --- a/drivers/spi/spi-dw-mid.c
6049 +++ b/drivers/spi/spi-dw-mid.c
6050 @@ -147,6 +147,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
6051 if (!xfer->tx_buf)
6052 return NULL;
6053
6054 + memset(&txconf, 0, sizeof(txconf));
6055 txconf.direction = DMA_MEM_TO_DEV;
6056 txconf.dst_addr = dws->dma_addr;
6057 txconf.dst_maxburst = 16;
6058 @@ -193,6 +194,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
6059 if (!xfer->rx_buf)
6060 return NULL;
6061
6062 + memset(&rxconf, 0, sizeof(rxconf));
6063 rxconf.direction = DMA_DEV_TO_MEM;
6064 rxconf.src_addr = dws->dma_addr;
6065 rxconf.src_maxburst = 16;
6066 @@ -218,19 +220,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
6067
6068 static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
6069 {
6070 - u16 dma_ctrl = 0;
6071 + u16 imr = 0, dma_ctrl = 0;
6072
6073 dw_writel(dws, DW_SPI_DMARDLR, 0xf);
6074 dw_writel(dws, DW_SPI_DMATDLR, 0x10);
6075
6076 - if (xfer->tx_buf)
6077 + if (xfer->tx_buf) {
6078 dma_ctrl |= SPI_DMA_TDMAE;
6079 - if (xfer->rx_buf)
6080 + imr |= SPI_INT_TXOI;
6081 + }
6082 + if (xfer->rx_buf) {
6083 dma_ctrl |= SPI_DMA_RDMAE;
6084 + imr |= SPI_INT_RXUI | SPI_INT_RXOI;
6085 + }
6086 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
6087
6088 /* Set the interrupt mask */
6089 - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
6090 + spi_umask_intr(dws, imr);
6091
6092 dws->transfer_handler = dma_transfer;
6093
6094 @@ -260,7 +266,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
6095 dma_async_issue_pending(dws->txchan);
6096 }
6097
6098 - return 0;
6099 + return 1;
6100 }
6101
6102 static void mid_spi_dma_stop(struct dw_spi *dws)
6103 diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
6104 index 82c5c027ec4c..c2f96941ad04 100644
6105 --- a/drivers/spi/spi-dw.c
6106 +++ b/drivers/spi/spi-dw.c
6107 @@ -381,11 +381,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
6108
6109 spi_enable_chip(dws, 1);
6110
6111 - if (dws->dma_mapped) {
6112 - ret = dws->dma_ops->dma_transfer(dws, transfer);
6113 - if (ret < 0)
6114 - return ret;
6115 - }
6116 + if (dws->dma_mapped)
6117 + return dws->dma_ops->dma_transfer(dws, transfer);
6118
6119 if (chip->poll_mode)
6120 return poll_transfer(dws);
6121 @@ -529,6 +526,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
6122 dws->dma_inited = 0;
6123 } else {
6124 master->can_dma = dws->dma_ops->can_dma;
6125 + master->flags |= SPI_CONTROLLER_MUST_TX;
6126 }
6127 }
6128
6129 diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
6130 index 9f0fa9f3116d..de0ba3e5449f 100644
6131 --- a/drivers/spi/spi-mem.c
6132 +++ b/drivers/spi/spi-mem.c
6133 @@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
6134 return 0;
6135
6136 case 2:
6137 - if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
6138 - (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
6139 + if ((tx &&
6140 + (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
6141 + (!tx &&
6142 + (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
6143 return 0;
6144
6145 break;
6146
6147 case 4:
6148 - if ((tx && (mode & SPI_TX_QUAD)) ||
6149 - (!tx && (mode & SPI_RX_QUAD)))
6150 + if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
6151 + (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
6152 return 0;
6153
6154 break;
6155 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
6156 index d0d6f1bda1b6..7f4285e2ae68 100644
6157 --- a/drivers/spi/spi-pxa2xx.c
6158 +++ b/drivers/spi/spi-pxa2xx.c
6159 @@ -148,6 +148,7 @@ static const struct lpss_config lpss_platforms[] = {
6160 .tx_threshold_hi = 48,
6161 .cs_sel_shift = 8,
6162 .cs_sel_mask = 3 << 8,
6163 + .cs_clk_stays_gated = true,
6164 },
6165 { /* LPSS_CNL_SSP */
6166 .offset = 0x200,
6167 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
6168 index 6bfbf0cfcf63..c6242f0a307f 100644
6169 --- a/drivers/spi/spi.c
6170 +++ b/drivers/spi/spi.c
6171 @@ -1950,6 +1950,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
6172 }
6173
6174 lookup->max_speed_hz = sb->connection_speed;
6175 + lookup->bits_per_word = sb->data_bit_length;
6176
6177 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
6178 lookup->mode |= SPI_CPHA;
6179 diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
6180 index 473b465724f1..0755b11348ed 100644
6181 --- a/drivers/staging/android/ion/ion_heap.c
6182 +++ b/drivers/staging/android/ion/ion_heap.c
6183 @@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
6184
6185 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
6186 {
6187 - void *addr = vm_map_ram(pages, num, -1, pgprot);
6188 + void *addr = vmap(pages, num, VM_MAP, pgprot);
6189
6190 if (!addr)
6191 return -ENOMEM;
6192 memset(addr, 0, PAGE_SIZE * num);
6193 - vm_unmap_ram(addr, num);
6194 + vunmap(addr);
6195
6196 return 0;
6197 }
6198 diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
6199 index 68c5718be827..c4b16bb5c1a4 100644
6200 --- a/drivers/staging/greybus/sdio.c
6201 +++ b/drivers/staging/greybus/sdio.c
6202 @@ -411,6 +411,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
6203 struct gb_sdio_command_request request = {0};
6204 struct gb_sdio_command_response response;
6205 struct mmc_data *data = host->mrq->data;
6206 + unsigned int timeout_ms;
6207 u8 cmd_flags;
6208 u8 cmd_type;
6209 int i;
6210 @@ -469,9 +470,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
6211 request.data_blksz = cpu_to_le16(data->blksz);
6212 }
6213
6214 - ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
6215 - &request, sizeof(request), &response,
6216 - sizeof(response));
6217 + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
6218 + GB_OPERATION_TIMEOUT_DEFAULT;
6219 +
6220 + ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
6221 + &request, sizeof(request), &response,
6222 + sizeof(response), timeout_ms);
6223 if (ret < 0)
6224 goto out;
6225
6226 diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
6227 index 6f628195c4da..021bbd420390 100644
6228 --- a/drivers/staging/media/imx/imx7-mipi-csis.c
6229 +++ b/drivers/staging/media/imx/imx7-mipi-csis.c
6230 @@ -657,28 +657,6 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd,
6231 return 0;
6232 }
6233
6234 -static struct csis_pix_format const *
6235 -mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf)
6236 -{
6237 - struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
6238 - struct csis_pix_format const *csis_fmt;
6239 -
6240 - csis_fmt = find_csis_format(mf->code);
6241 - if (!csis_fmt)
6242 - csis_fmt = &mipi_csis_formats[0];
6243 -
6244 - v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
6245 - csis_fmt->pix_width_alignment,
6246 - &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
6247 - 0);
6248 -
6249 - state->format_mbus.code = csis_fmt->code;
6250 - state->format_mbus.width = mf->width;
6251 - state->format_mbus.height = mf->height;
6252 -
6253 - return csis_fmt;
6254 -}
6255 -
6256 static struct v4l2_mbus_framefmt *
6257 mipi_csis_get_format(struct csi_state *state,
6258 struct v4l2_subdev_pad_config *cfg,
6259 @@ -691,53 +669,67 @@ mipi_csis_get_format(struct csi_state *state,
6260 return &state->format_mbus;
6261 }
6262
6263 -static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
6264 +static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
6265 struct v4l2_subdev_pad_config *cfg,
6266 struct v4l2_subdev_format *sdformat)
6267 {
6268 struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
6269 - struct csis_pix_format const *csis_fmt;
6270 struct v4l2_mbus_framefmt *fmt;
6271
6272 - if (sdformat->pad >= CSIS_PADS_NUM)
6273 - return -EINVAL;
6274 -
6275 - fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
6276 -
6277 mutex_lock(&state->lock);
6278 - if (sdformat->pad == CSIS_PAD_SOURCE) {
6279 - sdformat->format = *fmt;
6280 - goto unlock;
6281 - }
6282 -
6283 - csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format);
6284 -
6285 + fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
6286 sdformat->format = *fmt;
6287 -
6288 - if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
6289 - state->csis_fmt = csis_fmt;
6290 - else
6291 - cfg->try_fmt = sdformat->format;
6292 -
6293 -unlock:
6294 mutex_unlock(&state->lock);
6295
6296 return 0;
6297 }
6298
6299 -static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
6300 +static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
6301 struct v4l2_subdev_pad_config *cfg,
6302 struct v4l2_subdev_format *sdformat)
6303 {
6304 struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
6305 + struct csis_pix_format const *csis_fmt;
6306 struct v4l2_mbus_framefmt *fmt;
6307
6308 - mutex_lock(&state->lock);
6309 + /*
6310 + * The CSIS can't transcode in any way, the source format can't be
6311 + * modified.
6312 + */
6313 + if (sdformat->pad == CSIS_PAD_SOURCE)
6314 + return mipi_csis_get_fmt(mipi_sd, cfg, sdformat);
6315 +
6316 + if (sdformat->pad != CSIS_PAD_SINK)
6317 + return -EINVAL;
6318
6319 fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad);
6320
6321 + mutex_lock(&state->lock);
6322 +
6323 + /* Validate the media bus code and clamp the size. */
6324 + csis_fmt = find_csis_format(sdformat->format.code);
6325 + if (!csis_fmt)
6326 + csis_fmt = &mipi_csis_formats[0];
6327 +
6328 + fmt->code = csis_fmt->code;
6329 + fmt->width = sdformat->format.width;
6330 + fmt->height = sdformat->format.height;
6331 +
6332 + v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH,
6333 + csis_fmt->pix_width_alignment,
6334 + &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 1, 0);
6335 +
6336 sdformat->format = *fmt;
6337
6338 + /* Propagate the format from sink to source. */
6339 + fmt = mipi_csis_get_format(state, cfg, sdformat->which,
6340 + CSIS_PAD_SOURCE);
6341 + *fmt = sdformat->format;
6342 +
6343 + /* Store the CSIS format descriptor for active formats. */
6344 + if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
6345 + state->csis_fmt = csis_fmt;
6346 +
6347 mutex_unlock(&state->lock);
6348
6349 return 0;
6350 diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
6351 index 3d969b0522ab..abcf1f3e5f63 100644
6352 --- a/drivers/staging/media/ipu3/ipu3-mmu.c
6353 +++ b/drivers/staging/media/ipu3/ipu3-mmu.c
6354 @@ -174,8 +174,10 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
6355 spin_lock_irqsave(&mmu->lock, flags);
6356
6357 l2pt = mmu->l2pts[l1pt_idx];
6358 - if (l2pt)
6359 - goto done;
6360 + if (l2pt) {
6361 + spin_unlock_irqrestore(&mmu->lock, flags);
6362 + return l2pt;
6363 + }
6364
6365 spin_unlock_irqrestore(&mmu->lock, flags);
6366
6367 @@ -190,8 +192,9 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
6368
6369 l2pt = mmu->l2pts[l1pt_idx];
6370 if (l2pt) {
6371 + spin_unlock_irqrestore(&mmu->lock, flags);
6372 imgu_mmu_free_page_table(new_l2pt);
6373 - goto done;
6374 + return l2pt;
6375 }
6376
6377 l2pt = new_l2pt;
6378 @@ -200,7 +203,6 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
6379 pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
6380 mmu->l1pt[l1pt_idx] = pteval;
6381
6382 -done:
6383 spin_unlock_irqrestore(&mmu->lock, flags);
6384 return l2pt;
6385 }
6386 diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
6387 index 3c7ad1eed434..c764cb55dc8d 100644
6388 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c
6389 +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
6390 @@ -367,8 +367,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb)
6391
6392 vb2_set_plane_payload(vb, 0, need_bytes);
6393
6394 + mutex_lock(&imgu->streaming_lock);
6395 if (imgu->streaming)
6396 imgu_queue_buffers(imgu, false, node->pipe);
6397 + mutex_unlock(&imgu->streaming_lock);
6398
6399 dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__,
6400 node->pipe, node->id);
6401 @@ -468,10 +470,13 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
6402 dev_dbg(dev, "%s node name %s pipe %u id %u", __func__,
6403 node->name, node->pipe, node->id);
6404
6405 + mutex_lock(&imgu->streaming_lock);
6406 if (imgu->streaming) {
6407 r = -EBUSY;
6408 + mutex_unlock(&imgu->streaming_lock);
6409 goto fail_return_bufs;
6410 }
6411 + mutex_unlock(&imgu->streaming_lock);
6412
6413 if (!node->enabled) {
6414 dev_err(dev, "IMGU node is not enabled");
6415 @@ -498,9 +503,11 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
6416
6417 /* Start streaming of the whole pipeline now */
6418 dev_dbg(dev, "IMGU streaming is ready to start");
6419 + mutex_lock(&imgu->streaming_lock);
6420 r = imgu_s_stream(imgu, true);
6421 if (!r)
6422 imgu->streaming = true;
6423 + mutex_unlock(&imgu->streaming_lock);
6424
6425 return 0;
6426
6427 @@ -532,6 +539,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
6428 dev_err(&imgu->pci_dev->dev,
6429 "failed to stop subdev streaming\n");
6430
6431 + mutex_lock(&imgu->streaming_lock);
6432 /* Was this the first node with streaming disabled? */
6433 if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
6434 /* Yes, really stop streaming now */
6435 @@ -542,6 +550,8 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
6436 }
6437
6438 imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
6439 + mutex_unlock(&imgu->streaming_lock);
6440 +
6441 media_pipeline_stop(&node->vdev.entity);
6442 }
6443
6444 diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
6445 index 06a61f31ca50..08eb6791918b 100644
6446 --- a/drivers/staging/media/ipu3/ipu3.c
6447 +++ b/drivers/staging/media/ipu3/ipu3.c
6448 @@ -261,6 +261,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
6449
6450 ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
6451 struct imgu_vb2_buffer, list);
6452 + list_del(&ivb->list);
6453 vb = &ivb->vbb.vb2_buf;
6454 r = imgu_css_set_parameters(&imgu->css, pipe,
6455 vb2_plane_vaddr(vb, 0));
6456 @@ -274,7 +275,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe
6457 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
6458 dev_dbg(&imgu->pci_dev->dev,
6459 "queue user parameters %d to css.", vb->index);
6460 - list_del(&ivb->list);
6461 } else if (imgu_pipe->queue_enabled[node]) {
6462 struct imgu_css_buffer *buf =
6463 imgu_queue_getbuf(imgu, node, pipe);
6464 @@ -663,6 +663,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
6465 return r;
6466
6467 mutex_init(&imgu->lock);
6468 + mutex_init(&imgu->streaming_lock);
6469 atomic_set(&imgu->qbuf_barrier, 0);
6470 init_waitqueue_head(&imgu->buf_drain_wq);
6471
6472 @@ -726,6 +727,7 @@ out_mmu_exit:
6473 out_css_powerdown:
6474 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
6475 out_mutex_destroy:
6476 + mutex_destroy(&imgu->streaming_lock);
6477 mutex_destroy(&imgu->lock);
6478
6479 return r;
6480 @@ -743,6 +745,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev)
6481 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
6482 imgu_dmamap_exit(imgu);
6483 imgu_mmu_exit(imgu->mmu);
6484 + mutex_destroy(&imgu->streaming_lock);
6485 mutex_destroy(&imgu->lock);
6486 }
6487
6488 diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
6489 index 73b123b2b8a2..8cd6a0077d99 100644
6490 --- a/drivers/staging/media/ipu3/ipu3.h
6491 +++ b/drivers/staging/media/ipu3/ipu3.h
6492 @@ -146,6 +146,10 @@ struct imgu_device {
6493 * vid_buf.list and css->queue
6494 */
6495 struct mutex lock;
6496 +
6497 + /* Lock to protect writes to streaming flag in this struct */
6498 + struct mutex streaming_lock;
6499 +
6500 /* Forbid streaming and buffer queuing during system suspend. */
6501 atomic_t qbuf_barrier;
6502 /* Indicate if system suspend take place while imgu is streaming. */
6503 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
6504 index 56ca4c9ad01c..47940f02457b 100644
6505 --- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
6506 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
6507 @@ -65,6 +65,8 @@ void cedrus_device_run(void *priv)
6508
6509 v4l2_m2m_buf_copy_metadata(run.src, run.dst, true);
6510
6511 + cedrus_dst_format_set(dev, &ctx->dst_fmt);
6512 +
6513 dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
6514
6515 /* Complete request(s) controls if needed. */
6516 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
6517 index eeee3efd247b..966f9f3ed9d3 100644
6518 --- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
6519 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
6520 @@ -286,7 +286,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
6521 struct v4l2_format *f)
6522 {
6523 struct cedrus_ctx *ctx = cedrus_file2ctx(file);
6524 - struct cedrus_dev *dev = ctx->dev;
6525 struct vb2_queue *vq;
6526 int ret;
6527
6528 @@ -300,8 +299,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
6529
6530 ctx->dst_fmt = f->fmt.pix;
6531
6532 - cedrus_dst_format_set(dev, &ctx->dst_fmt);
6533 -
6534 return 0;
6535 }
6536
6537 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
6538 index 28bdbd7b4ab2..f1d230c5a8ef 100644
6539 --- a/drivers/tty/serial/8250/8250_core.c
6540 +++ b/drivers/tty/serial/8250/8250_core.c
6541 @@ -1026,7 +1026,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
6542 gpios = mctrl_gpio_init(&uart->port, 0);
6543 if (IS_ERR(gpios)) {
6544 ret = PTR_ERR(gpios);
6545 - goto out_unlock;
6546 + goto err;
6547 } else {
6548 uart->gpios = gpios;
6549 }
6550 @@ -1075,8 +1075,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
6551 serial8250_apply_quirks(uart);
6552 ret = uart_add_one_port(&serial8250_reg,
6553 &uart->port);
6554 - if (ret == 0)
6555 - ret = uart->port.line;
6556 + if (ret)
6557 + goto err;
6558 +
6559 + ret = uart->port.line;
6560 } else {
6561 dev_info(uart->port.dev,
6562 "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
6563 @@ -1098,10 +1100,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
6564 }
6565 }
6566
6567 -out_unlock:
6568 mutex_unlock(&serial_mutex);
6569
6570 return ret;
6571 +
6572 +err:
6573 + uart->port.dev = NULL;
6574 + mutex_unlock(&serial_mutex);
6575 + return ret;
6576 }
6577 EXPORT_SYMBOL(serial8250_register_8250_port);
6578
6579 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
6580 index 8a01d034f9d1..7cad66eb39ff 100644
6581 --- a/drivers/tty/serial/8250/8250_pci.c
6582 +++ b/drivers/tty/serial/8250/8250_pci.c
6583 @@ -1871,12 +1871,6 @@ pci_moxa_setup(struct serial_private *priv,
6584 #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
6585 #define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
6586
6587 -#define PCI_VENDOR_ID_PERICOM 0x12D8
6588 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
6589 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
6590 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
6591 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
6592 -
6593 #define PCI_VENDOR_ID_ACCESIO 0x494f
6594 #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
6595 #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
6596 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
6597 index c7d51b51898f..f5608ad68ae1 100644
6598 --- a/drivers/tty/serial/kgdboc.c
6599 +++ b/drivers/tty/serial/kgdboc.c
6600 @@ -20,6 +20,7 @@
6601 #include <linux/vt_kern.h>
6602 #include <linux/input.h>
6603 #include <linux/module.h>
6604 +#include <linux/platform_device.h>
6605
6606 #define MAX_CONFIG_LEN 40
6607
6608 @@ -27,6 +28,7 @@ static struct kgdb_io kgdboc_io_ops;
6609
6610 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
6611 static int configured = -1;
6612 +static DEFINE_MUTEX(config_mutex);
6613
6614 static char config[MAX_CONFIG_LEN];
6615 static struct kparam_string kps = {
6616 @@ -38,6 +40,8 @@ static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
6617 static struct tty_driver *kgdb_tty_driver;
6618 static int kgdb_tty_line;
6619
6620 +static struct platform_device *kgdboc_pdev;
6621 +
6622 #ifdef CONFIG_KDB_KEYBOARD
6623 static int kgdboc_reset_connect(struct input_handler *handler,
6624 struct input_dev *dev,
6625 @@ -133,11 +137,13 @@ static void kgdboc_unregister_kbd(void)
6626
6627 static void cleanup_kgdboc(void)
6628 {
6629 + if (configured != 1)
6630 + return;
6631 +
6632 if (kgdb_unregister_nmi_console())
6633 return;
6634 kgdboc_unregister_kbd();
6635 - if (configured == 1)
6636 - kgdb_unregister_io_module(&kgdboc_io_ops);
6637 + kgdb_unregister_io_module(&kgdboc_io_ops);
6638 }
6639
6640 static int configure_kgdboc(void)
6641 @@ -200,20 +206,79 @@ nmi_con_failed:
6642 kgdb_unregister_io_module(&kgdboc_io_ops);
6643 noconfig:
6644 kgdboc_unregister_kbd();
6645 - config[0] = 0;
6646 configured = 0;
6647 - cleanup_kgdboc();
6648
6649 return err;
6650 }
6651
6652 +static int kgdboc_probe(struct platform_device *pdev)
6653 +{
6654 + int ret = 0;
6655 +
6656 + mutex_lock(&config_mutex);
6657 + if (configured != 1) {
6658 + ret = configure_kgdboc();
6659 +
6660 + /* Convert "no device" to "defer" so we'll keep trying */
6661 + if (ret == -ENODEV)
6662 + ret = -EPROBE_DEFER;
6663 + }
6664 + mutex_unlock(&config_mutex);
6665 +
6666 + return ret;
6667 +}
6668 +
6669 +static struct platform_driver kgdboc_platform_driver = {
6670 + .probe = kgdboc_probe,
6671 + .driver = {
6672 + .name = "kgdboc",
6673 + .suppress_bind_attrs = true,
6674 + },
6675 +};
6676 +
6677 static int __init init_kgdboc(void)
6678 {
6679 - /* Already configured? */
6680 - if (configured == 1)
6681 + int ret;
6682 +
6683 + /*
6684 + * kgdboc is a little bit of an odd "platform_driver". It can be
6685 + * up and running long before the platform_driver object is
6686 + * created and thus doesn't actually store anything in it. There's
6687 + * only one instance of kgdb so anything is stored as global state.
6688 + * The platform_driver is only created so that we can leverage the
6689 + * kernel's mechanisms (like -EPROBE_DEFER) to call us when our
6690 + * underlying tty is ready. Here we init our platform driver and
6691 + * then create the single kgdboc instance.
6692 + */
6693 + ret = platform_driver_register(&kgdboc_platform_driver);
6694 + if (ret)
6695 + return ret;
6696 +
6697 + kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE);
6698 + if (!kgdboc_pdev) {
6699 + ret = -ENOMEM;
6700 + goto err_did_register;
6701 + }
6702 +
6703 + ret = platform_device_add(kgdboc_pdev);
6704 + if (!ret)
6705 return 0;
6706
6707 - return configure_kgdboc();
6708 + platform_device_put(kgdboc_pdev);
6709 +
6710 +err_did_register:
6711 + platform_driver_unregister(&kgdboc_platform_driver);
6712 + return ret;
6713 +}
6714 +
6715 +static void exit_kgdboc(void)
6716 +{
6717 + mutex_lock(&config_mutex);
6718 + cleanup_kgdboc();
6719 + mutex_unlock(&config_mutex);
6720 +
6721 + platform_device_unregister(kgdboc_pdev);
6722 + platform_driver_unregister(&kgdboc_platform_driver);
6723 }
6724
6725 static int kgdboc_get_char(void)
6726 @@ -236,24 +301,20 @@ static int param_set_kgdboc_var(const char *kmessage,
6727 const struct kernel_param *kp)
6728 {
6729 size_t len = strlen(kmessage);
6730 + int ret = 0;
6731
6732 if (len >= MAX_CONFIG_LEN) {
6733 pr_err("config string too long\n");
6734 return -ENOSPC;
6735 }
6736
6737 - /* Only copy in the string if the init function has not run yet */
6738 - if (configured < 0) {
6739 - strcpy(config, kmessage);
6740 - return 0;
6741 - }
6742 -
6743 if (kgdb_connected) {
6744 pr_err("Cannot reconfigure while KGDB is connected.\n");
6745 -
6746 return -EBUSY;
6747 }
6748
6749 + mutex_lock(&config_mutex);
6750 +
6751 strcpy(config, kmessage);
6752 /* Chop out \n char as a result of echo */
6753 if (len && config[len - 1] == '\n')
6754 @@ -262,8 +323,30 @@ static int param_set_kgdboc_var(const char *kmessage,
6755 if (configured == 1)
6756 cleanup_kgdboc();
6757
6758 - /* Go and configure with the new params. */
6759 - return configure_kgdboc();
6760 + /*
6761 + * Configure with the new params as long as init already ran.
6762 + * Note that we can get called before init if someone loads us
6763 + * with "modprobe kgdboc kgdboc=..." or if they happen to use the
6764 + * the odd syntax of "kgdboc.kgdboc=..." on the kernel command.
6765 + */
6766 + if (configured >= 0)
6767 + ret = configure_kgdboc();
6768 +
6769 + /*
6770 + * If we couldn't configure then clear out the config. Note that
6771 + * specifying an invalid config on the kernel command line vs.
6772 + * through sysfs have slightly different behaviors. If we fail
6773 + * to configure what was specified on the kernel command line
6774 + * we'll leave it in the 'config' and return -EPROBE_DEFER from
6775 + * our probe. When specified through sysfs userspace is
6776 + * responsible for loading the tty driver before setting up.
6777 + */
6778 + if (ret)
6779 + config[0] = '\0';
6780 +
6781 + mutex_unlock(&config_mutex);
6782 +
6783 + return ret;
6784 }
6785
6786 static int dbg_restore_graphics;
6787 @@ -326,15 +409,8 @@ __setup("kgdboc=", kgdboc_option_setup);
6788 /* This is only available if kgdboc is a built in for early debugging */
6789 static int __init kgdboc_early_init(char *opt)
6790 {
6791 - /* save the first character of the config string because the
6792 - * init routine can destroy it.
6793 - */
6794 - char save_ch;
6795 -
6796 kgdboc_option_setup(opt);
6797 - save_ch = config[0];
6798 - init_kgdboc();
6799 - config[0] = save_ch;
6800 + configure_kgdboc();
6801 return 0;
6802 }
6803
6804 @@ -342,7 +418,7 @@ early_param("ekgdboc", kgdboc_early_init);
6805 #endif /* CONFIG_KGDB_SERIAL_CONSOLE */
6806
6807 module_init(init_kgdboc);
6808 -module_exit(cleanup_kgdboc);
6809 +module_exit(exit_kgdboc);
6810 module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
6811 MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
6812 MODULE_DESCRIPTION("KGDB Console TTY Driver");
6813 diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
6814 index 4164045866b3..6bac5c18cf6d 100644
6815 --- a/drivers/w1/masters/omap_hdq.c
6816 +++ b/drivers/w1/masters/omap_hdq.c
6817 @@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
6818 /* check irqstatus */
6819 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
6820 dev_dbg(hdq_data->dev, "timeout waiting for"
6821 - " TXCOMPLETE/RXCOMPLETE, %x", *status);
6822 + " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
6823 ret = -ETIMEDOUT;
6824 goto out;
6825 }
6826 @@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
6827 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
6828 if (ret) {
6829 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
6830 - " return to zero, %x", tmp_status);
6831 + " return to zero, %x\n", tmp_status);
6832 }
6833
6834 out:
6835 @@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq)
6836 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
6837 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
6838 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
6839 - dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
6840 + dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
6841
6842 if (hdq_data->hdq_irqstatus &
6843 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
6844 @@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
6845 tmp_status = hdq_data->hdq_irqstatus;
6846 /* check irqstatus */
6847 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
6848 - dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
6849 + dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
6850 tmp_status);
6851 ret = -ETIMEDOUT;
6852 goto out;
6853 @@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
6854 &tmp_status);
6855 if (ret)
6856 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
6857 - " return to zero, %x", tmp_status);
6858 + " return to zero, %x\n", tmp_status);
6859
6860 out:
6861 mutex_unlock(&hdq_data->hdq_mutex);
6862 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
6863 index 2fead6c3c687..c2dd94e1b274 100644
6864 --- a/fs/btrfs/block-group.c
6865 +++ b/fs/btrfs/block-group.c
6866 @@ -1167,7 +1167,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
6867 free_extent_map(em);
6868
6869 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
6870 - num_items, 1);
6871 + num_items);
6872 }
6873
6874 /*
6875 diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
6876 index d07bd41a7c1e..343400d49bd1 100644
6877 --- a/fs/btrfs/block-rsv.c
6878 +++ b/fs/btrfs/block-rsv.c
6879 @@ -5,6 +5,7 @@
6880 #include "block-rsv.h"
6881 #include "space-info.h"
6882 #include "transaction.h"
6883 +#include "block-group.h"
6884
6885 static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
6886 struct btrfs_block_rsv *block_rsv,
6887 @@ -313,6 +314,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
6888 else
6889 block_rsv->full = 0;
6890
6891 + if (block_rsv->size >= sinfo->total_bytes)
6892 + sinfo->force_alloc = CHUNK_ALLOC_FORCE;
6893 spin_unlock(&block_rsv->lock);
6894 spin_unlock(&sinfo->lock);
6895 }
6896 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
6897 index 169075550a5a..6d2c277c6e0a 100644
6898 --- a/fs/btrfs/ctree.h
6899 +++ b/fs/btrfs/ctree.h
6900 @@ -2465,6 +2465,7 @@ enum btrfs_reserve_flush_enum {
6901 BTRFS_RESERVE_FLUSH_LIMIT,
6902 BTRFS_RESERVE_FLUSH_EVICT,
6903 BTRFS_RESERVE_FLUSH_ALL,
6904 + BTRFS_RESERVE_FLUSH_ALL_STEAL,
6905 };
6906
6907 enum btrfs_flush_state {
6908 diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
6909 index f62a179f85bb..2b8f29c07668 100644
6910 --- a/fs/btrfs/file-item.c
6911 +++ b/fs/btrfs/file-item.c
6912 @@ -798,10 +798,12 @@ again:
6913 nritems = btrfs_header_nritems(path->nodes[0]);
6914 if (!nritems || (path->slots[0] >= nritems - 1)) {
6915 ret = btrfs_next_leaf(root, path);
6916 - if (ret == 1)
6917 + if (ret < 0) {
6918 + goto out;
6919 + } else if (ret > 0) {
6920 found_next = 1;
6921 - if (ret != 0)
6922 goto insert;
6923 + }
6924 slot = path->slots[0];
6925 }
6926 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
6927 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6928 index 94b0df3fb3c8..127cdecbe872 100644
6929 --- a/fs/btrfs/inode.c
6930 +++ b/fs/btrfs/inode.c
6931 @@ -49,6 +49,7 @@
6932 #include "qgroup.h"
6933 #include "delalloc-space.h"
6934 #include "block-group.h"
6935 +#include "space-info.h"
6936
6937 struct btrfs_iget_args {
6938 struct btrfs_key *location;
6939 @@ -1132,7 +1133,7 @@ out_unlock:
6940 */
6941 if (extent_reserved) {
6942 extent_clear_unlock_delalloc(inode, start,
6943 - start + cur_alloc_size,
6944 + start + cur_alloc_size - 1,
6945 locked_page,
6946 clear_bits,
6947 page_ops);
6948 @@ -1322,6 +1323,66 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
6949 return 1;
6950 }
6951
6952 +static int fallback_to_cow(struct inode *inode, struct page *locked_page,
6953 + const u64 start, const u64 end,
6954 + int *page_started, unsigned long *nr_written)
6955 +{
6956 + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
6957 + const u64 range_bytes = end + 1 - start;
6958 + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6959 + u64 range_start = start;
6960 + u64 count;
6961 +
6962 + /*
6963 + * If EXTENT_NORESERVE is set it means that when the buffered write was
6964 + * made we had not enough available data space and therefore we did not
6965 + * reserve data space for it, since we though we could do NOCOW for the
6966 + * respective file range (either there is prealloc extent or the inode
6967 + * has the NOCOW bit set).
6968 + *
6969 + * However when we need to fallback to COW mode (because for example the
6970 + * block group for the corresponding extent was turned to RO mode by a
6971 + * scrub or relocation) we need to do the following:
6972 + *
6973 + * 1) We increment the bytes_may_use counter of the data space info.
6974 + * If COW succeeds, it allocates a new data extent and after doing
6975 + * that it decrements the space info's bytes_may_use counter and
6976 + * increments its bytes_reserved counter by the same amount (we do
6977 + * this at btrfs_add_reserved_bytes()). So we need to increment the
6978 + * bytes_may_use counter to compensate (when space is reserved at
6979 + * buffered write time, the bytes_may_use counter is incremented);
6980 + *
6981 + * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
6982 + * that if the COW path fails for any reason, it decrements (through
6983 + * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
6984 + * data space info, which we incremented in the step above.
6985 + *
6986 + * If we need to fallback to cow and the inode corresponds to a free
6987 + * space cache inode, we must also increment bytes_may_use of the data
6988 + * space_info for the same reason. Space caches always get a prealloc
6989 + * extent for them, however scrub or balance may have set the block
6990 + * group that contains that extent to RO mode.
6991 + */
6992 + count = count_range_bits(io_tree, &range_start, end, range_bytes,
6993 + EXTENT_NORESERVE, 0);
6994 + if (count > 0 || is_space_ino) {
6995 + const u64 bytes = is_space_ino ? range_bytes : count;
6996 + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
6997 + struct btrfs_space_info *sinfo = fs_info->data_sinfo;
6998 +
6999 + spin_lock(&sinfo->lock);
7000 + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
7001 + spin_unlock(&sinfo->lock);
7002 +
7003 + if (count > 0)
7004 + clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
7005 + 0, 0, NULL);
7006 + }
7007 +
7008 + return cow_file_range(inode, locked_page, start, end, page_started,
7009 + nr_written, 1);
7010 +}
7011 +
7012 /*
7013 * when nowcow writeback call back. This checks for snapshots or COW copies
7014 * of the extents that exist in the file, and COWs the file as required.
7015 @@ -1569,9 +1630,9 @@ out_check:
7016 * NOCOW, following one which needs to be COW'ed
7017 */
7018 if (cow_start != (u64)-1) {
7019 - ret = cow_file_range(inode, locked_page,
7020 - cow_start, found_key.offset - 1,
7021 - page_started, nr_written, 1);
7022 + ret = fallback_to_cow(inode, locked_page, cow_start,
7023 + found_key.offset - 1,
7024 + page_started, nr_written);
7025 if (ret) {
7026 if (nocow)
7027 btrfs_dec_nocow_writers(fs_info,
7028 @@ -1660,8 +1721,8 @@ out_check:
7029
7030 if (cow_start != (u64)-1) {
7031 cur_offset = end;
7032 - ret = cow_file_range(inode, locked_page, cow_start, end,
7033 - page_started, nr_written, 1);
7034 + ret = fallback_to_cow(inode, locked_page, cow_start, end,
7035 + page_started, nr_written);
7036 if (ret)
7037 goto error;
7038 }
7039 @@ -4250,7 +4311,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
7040 * 1 for the inode ref
7041 * 1 for the inode
7042 */
7043 - return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
7044 + return btrfs_start_transaction_fallback_global_rsv(root, 5);
7045 }
7046
7047 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
7048 @@ -8534,7 +8595,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
7049
7050 /* bio split */
7051 ASSERT(geom.len <= INT_MAX);
7052 - atomic_inc(&dip->pending_bios);
7053 do {
7054 clone_len = min_t(int, submit_len, geom.len);
7055
7056 @@ -8584,7 +8644,8 @@ submit:
7057 if (!status)
7058 return 0;
7059
7060 - bio_put(bio);
7061 + if (bio != orig_bio)
7062 + bio_put(bio);
7063 out_err:
7064 dip->errors = 1;
7065 /*
7066 @@ -8625,7 +8686,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
7067 bio->bi_private = dip;
7068 dip->orig_bio = bio;
7069 dip->dio_bio = dio_bio;
7070 - atomic_set(&dip->pending_bios, 0);
7071 + atomic_set(&dip->pending_bios, 1);
7072 io_bio = btrfs_io_bio(bio);
7073 io_bio->logical = file_offset;
7074
7075 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
7076 index 590defdf8860..b94f6f99e90d 100644
7077 --- a/fs/btrfs/qgroup.c
7078 +++ b/fs/btrfs/qgroup.c
7079 @@ -2636,6 +2636,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
7080 struct btrfs_root *quota_root;
7081 struct btrfs_qgroup *srcgroup;
7082 struct btrfs_qgroup *dstgroup;
7083 + bool need_rescan = false;
7084 u32 level_size = 0;
7085 u64 nums;
7086
7087 @@ -2779,6 +2780,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
7088 goto unlock;
7089 }
7090 ++i_qgroups;
7091 +
7092 + /*
7093 + * If we're doing a snapshot, and adding the snapshot to a new
7094 + * qgroup, the numbers are guaranteed to be incorrect.
7095 + */
7096 + if (srcid)
7097 + need_rescan = true;
7098 }
7099
7100 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
7101 @@ -2798,6 +2806,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
7102
7103 dst->rfer = src->rfer - level_size;
7104 dst->rfer_cmpr = src->rfer_cmpr - level_size;
7105 +
7106 + /* Manually tweaking numbers certainly needs a rescan */
7107 + need_rescan = true;
7108 }
7109 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
7110 struct btrfs_qgroup *src;
7111 @@ -2816,6 +2827,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
7112
7113 dst->excl = src->excl + level_size;
7114 dst->excl_cmpr = src->excl_cmpr + level_size;
7115 + need_rescan = true;
7116 }
7117
7118 unlock:
7119 @@ -2823,6 +2835,8 @@ unlock:
7120 out:
7121 if (!committing)
7122 mutex_unlock(&fs_info->qgroup_ioctl_lock);
7123 + if (need_rescan)
7124 + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
7125 return ret;
7126 }
7127
7128 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
7129 index 3eb0fec2488a..6ad216e8178e 100644
7130 --- a/fs/btrfs/send.c
7131 +++ b/fs/btrfs/send.c
7132 @@ -23,6 +23,7 @@
7133 #include "btrfs_inode.h"
7134 #include "transaction.h"
7135 #include "compression.h"
7136 +#include "xattr.h"
7137
7138 /*
7139 * Maximum number of references an extent can have in order for us to attempt to
7140 @@ -4536,6 +4537,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
7141 struct fs_path *p;
7142 struct posix_acl_xattr_header dummy_acl;
7143
7144 + /* Capabilities are emitted by finish_inode_if_needed */
7145 + if (!strncmp(name, XATTR_NAME_CAPS, name_len))
7146 + return 0;
7147 +
7148 p = fs_path_alloc();
7149 if (!p)
7150 return -ENOMEM;
7151 @@ -5098,6 +5103,64 @@ static int send_extent_data(struct send_ctx *sctx,
7152 return 0;
7153 }
7154
7155 +/*
7156 + * Search for a capability xattr related to sctx->cur_ino. If the capability is
7157 + * found, call send_set_xattr function to emit it.
7158 + *
7159 + * Return 0 if there isn't a capability, or when the capability was emitted
7160 + * successfully, or < 0 if an error occurred.
7161 + */
7162 +static int send_capabilities(struct send_ctx *sctx)
7163 +{
7164 + struct fs_path *fspath = NULL;
7165 + struct btrfs_path *path;
7166 + struct btrfs_dir_item *di;
7167 + struct extent_buffer *leaf;
7168 + unsigned long data_ptr;
7169 + char *buf = NULL;
7170 + int buf_len;
7171 + int ret = 0;
7172 +
7173 + path = alloc_path_for_send();
7174 + if (!path)
7175 + return -ENOMEM;
7176 +
7177 + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
7178 + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
7179 + if (!di) {
7180 + /* There is no xattr for this inode */
7181 + goto out;
7182 + } else if (IS_ERR(di)) {
7183 + ret = PTR_ERR(di);
7184 + goto out;
7185 + }
7186 +
7187 + leaf = path->nodes[0];
7188 + buf_len = btrfs_dir_data_len(leaf, di);
7189 +
7190 + fspath = fs_path_alloc();
7191 + buf = kmalloc(buf_len, GFP_KERNEL);
7192 + if (!fspath || !buf) {
7193 + ret = -ENOMEM;
7194 + goto out;
7195 + }
7196 +
7197 + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
7198 + if (ret < 0)
7199 + goto out;
7200 +
7201 + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
7202 + read_extent_buffer(leaf, buf, data_ptr, buf_len);
7203 +
7204 + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
7205 + strlen(XATTR_NAME_CAPS), buf, buf_len);
7206 +out:
7207 + kfree(buf);
7208 + fs_path_free(fspath);
7209 + btrfs_free_path(path);
7210 + return ret;
7211 +}
7212 +
7213 static int clone_range(struct send_ctx *sctx,
7214 struct clone_root *clone_root,
7215 const u64 disk_byte,
7216 @@ -6001,6 +6064,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
7217 goto out;
7218 }
7219
7220 + ret = send_capabilities(sctx);
7221 + if (ret < 0)
7222 + goto out;
7223 +
7224 /*
7225 * If other directory inodes depended on our current directory
7226 * inode's move/rename, now do their move/rename operations.
7227 diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
7228 index e8a4b0ebe97f..7889a59a57fa 100644
7229 --- a/fs/btrfs/space-info.c
7230 +++ b/fs/btrfs/space-info.c
7231 @@ -462,6 +462,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
7232 struct reserve_ticket *ticket = NULL;
7233 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
7234 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
7235 + struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
7236 struct btrfs_trans_handle *trans;
7237 u64 bytes_needed;
7238 u64 reclaim_bytes = 0;
7239 @@ -524,6 +525,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
7240 spin_lock(&delayed_refs_rsv->lock);
7241 reclaim_bytes += delayed_refs_rsv->reserved;
7242 spin_unlock(&delayed_refs_rsv->lock);
7243 +
7244 + spin_lock(&trans_rsv->lock);
7245 + reclaim_bytes += trans_rsv->reserved;
7246 + spin_unlock(&trans_rsv->lock);
7247 +
7248 if (reclaim_bytes >= bytes_needed)
7249 goto commit;
7250 bytes_needed -= reclaim_bytes;
7251 @@ -683,6 +689,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
7252 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
7253 }
7254
7255 +static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
7256 + struct btrfs_space_info *space_info,
7257 + struct reserve_ticket *ticket)
7258 +{
7259 + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
7260 + u64 min_bytes;
7261 +
7262 + if (global_rsv->space_info != space_info)
7263 + return false;
7264 +
7265 + spin_lock(&global_rsv->lock);
7266 + min_bytes = div_factor(global_rsv->size, 5);
7267 + if (global_rsv->reserved < min_bytes + ticket->bytes) {
7268 + spin_unlock(&global_rsv->lock);
7269 + return false;
7270 + }
7271 + global_rsv->reserved -= ticket->bytes;
7272 + ticket->bytes = 0;
7273 + list_del_init(&ticket->list);
7274 + wake_up(&ticket->wait);
7275 + space_info->tickets_id++;
7276 + if (global_rsv->reserved < global_rsv->size)
7277 + global_rsv->full = 0;
7278 + spin_unlock(&global_rsv->lock);
7279 +
7280 + return true;
7281 +}
7282 +
7283 /*
7284 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
7285 * @fs_info - fs_info for this fs
7286 @@ -715,6 +749,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
7287 ticket = list_first_entry(&space_info->tickets,
7288 struct reserve_ticket, list);
7289
7290 + if (ticket->steal &&
7291 + steal_from_global_rsv(fs_info, space_info, ticket))
7292 + return true;
7293 +
7294 /*
7295 * may_commit_transaction will avoid committing the transaction
7296 * if it doesn't feel like the space reclaimed by the commit
7297 @@ -934,6 +972,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
7298
7299 switch (flush) {
7300 case BTRFS_RESERVE_FLUSH_ALL:
7301 + case BTRFS_RESERVE_FLUSH_ALL_STEAL:
7302 wait_reserve_ticket(fs_info, space_info, ticket);
7303 break;
7304 case BTRFS_RESERVE_FLUSH_LIMIT:
7305 @@ -1033,7 +1072,9 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
7306 ticket.bytes = orig_bytes;
7307 ticket.error = 0;
7308 init_waitqueue_head(&ticket.wait);
7309 - if (flush == BTRFS_RESERVE_FLUSH_ALL) {
7310 + ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
7311 + if (flush == BTRFS_RESERVE_FLUSH_ALL ||
7312 + flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
7313 list_add_tail(&ticket.list, &space_info->tickets);
7314 if (!space_info->flush) {
7315 space_info->flush = 1;
7316 diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
7317 index 8867e84aa33d..8b9a1d8fefcb 100644
7318 --- a/fs/btrfs/space-info.h
7319 +++ b/fs/btrfs/space-info.h
7320 @@ -72,6 +72,7 @@ struct btrfs_space_info {
7321 struct reserve_ticket {
7322 u64 bytes;
7323 int error;
7324 + bool steal;
7325 struct list_head list;
7326 wait_queue_head_t wait;
7327 };
7328 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
7329 index cdca0f656594..54589e940f9a 100644
7330 --- a/fs/btrfs/transaction.c
7331 +++ b/fs/btrfs/transaction.c
7332 @@ -21,6 +21,7 @@
7333 #include "dev-replace.h"
7334 #include "qgroup.h"
7335 #include "block-group.h"
7336 +#include "space-info.h"
7337
7338 #define BTRFS_ROOT_TRANS_TAG 0
7339
7340 @@ -451,6 +452,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
7341 u64 num_bytes = 0;
7342 u64 qgroup_reserved = 0;
7343 bool reloc_reserved = false;
7344 + bool do_chunk_alloc = false;
7345 int ret;
7346
7347 /* Send isn't supposed to start transactions. */
7348 @@ -491,7 +493,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
7349 * refill that amount for whatever is missing in the reserve.
7350 */
7351 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
7352 - if (delayed_refs_rsv->full == 0) {
7353 + if (flush == BTRFS_RESERVE_FLUSH_ALL &&
7354 + delayed_refs_rsv->full == 0) {
7355 delayed_refs_bytes = num_bytes;
7356 num_bytes <<= 1;
7357 }
7358 @@ -512,6 +515,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
7359 delayed_refs_bytes);
7360 num_bytes -= delayed_refs_bytes;
7361 }
7362 +
7363 + if (rsv->space_info->force_alloc)
7364 + do_chunk_alloc = true;
7365 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
7366 !delayed_refs_rsv->full) {
7367 /*
7368 @@ -593,6 +599,19 @@ got_it:
7369 if (!current->journal_info)
7370 current->journal_info = h;
7371
7372 + /*
7373 + * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
7374 + * ALLOC_FORCE the first run through, and then we won't allocate for
7375 + * anybody else who races in later. We don't care about the return
7376 + * value here.
7377 + */
7378 + if (do_chunk_alloc && num_bytes) {
7379 + u64 flags = h->block_rsv->space_info->flags;
7380 +
7381 + btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
7382 + CHUNK_ALLOC_NO_FORCE);
7383 + }
7384 +
7385 /*
7386 * btrfs_record_root_in_trans() needs to alloc new extents, and may
7387 * call btrfs_join_transaction() while we're also starting a
7388 @@ -627,43 +646,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
7389
7390 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
7391 struct btrfs_root *root,
7392 - unsigned int num_items,
7393 - int min_factor)
7394 + unsigned int num_items)
7395 {
7396 - struct btrfs_fs_info *fs_info = root->fs_info;
7397 - struct btrfs_trans_handle *trans;
7398 - u64 num_bytes;
7399 - int ret;
7400 -
7401 - /*
7402 - * We have two callers: unlink and block group removal. The
7403 - * former should succeed even if we will temporarily exceed
7404 - * quota and the latter operates on the extent root so
7405 - * qgroup enforcement is ignored anyway.
7406 - */
7407 - trans = start_transaction(root, num_items, TRANS_START,
7408 - BTRFS_RESERVE_FLUSH_ALL, false);
7409 - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
7410 - return trans;
7411 -
7412 - trans = btrfs_start_transaction(root, 0);
7413 - if (IS_ERR(trans))
7414 - return trans;
7415 -
7416 - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
7417 - ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
7418 - num_bytes, min_factor);
7419 - if (ret) {
7420 - btrfs_end_transaction(trans);
7421 - return ERR_PTR(ret);
7422 - }
7423 -
7424 - trans->block_rsv = &fs_info->trans_block_rsv;
7425 - trans->bytes_reserved = num_bytes;
7426 - trace_btrfs_space_reservation(fs_info, "transaction",
7427 - trans->transid, num_bytes, 1);
7428 -
7429 - return trans;
7430 + return start_transaction(root, num_items, TRANS_START,
7431 + BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
7432 }
7433
7434 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
7435 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
7436 index 2c5a6f6e5bb0..b15c31d23148 100644
7437 --- a/fs/btrfs/transaction.h
7438 +++ b/fs/btrfs/transaction.h
7439 @@ -181,8 +181,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
7440 unsigned int num_items);
7441 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
7442 struct btrfs_root *root,
7443 - unsigned int num_items,
7444 - int min_factor);
7445 + unsigned int num_items);
7446 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
7447 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
7448 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
7449 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
7450 index 3e64f49c394b..c8b0e5005f02 100644
7451 --- a/fs/btrfs/volumes.c
7452 +++ b/fs/btrfs/volumes.c
7453 @@ -1223,6 +1223,8 @@ again:
7454 &device->dev_state)) {
7455 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
7456 &device->dev_state) &&
7457 + !test_bit(BTRFS_DEV_STATE_MISSING,
7458 + &device->dev_state) &&
7459 (!latest_dev ||
7460 device->generation > latest_dev->generation)) {
7461 latest_dev = device;
7462 @@ -2769,8 +2771,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
7463 ret = btrfs_commit_transaction(trans);
7464 }
7465
7466 - /* Update ctime/mtime for libblkid */
7467 + /*
7468 + * Now that we have written a new super block to this device, check all
7469 + * other fs_devices list if device_path alienates any other scanned
7470 + * device.
7471 + * We can ignore the return value as it typically returns -EINVAL and
7472 + * only succeeds if the device was an alien.
7473 + */
7474 + btrfs_forget_devices(device_path);
7475 +
7476 + /* Update ctime/mtime for blkid or udev */
7477 update_dev_time(device_path);
7478 +
7479 return ret;
7480
7481 error_sysfs:
7482 diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
7483 index 98bd0e9ee7df..ca78fd709845 100644
7484 --- a/fs/ext4/ext4_extents.h
7485 +++ b/fs/ext4/ext4_extents.h
7486 @@ -170,10 +170,13 @@ struct partial_cluster {
7487 (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
7488 #define EXT_LAST_INDEX(__hdr__) \
7489 (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
7490 -#define EXT_MAX_EXTENT(__hdr__) \
7491 - (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
7492 +#define EXT_MAX_EXTENT(__hdr__) \
7493 + ((le16_to_cpu((__hdr__)->eh_max)) ? \
7494 + ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \
7495 + : 0)
7496 #define EXT_MAX_INDEX(__hdr__) \
7497 - (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
7498 + ((le16_to_cpu((__hdr__)->eh_max)) ? \
7499 + ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0)
7500
7501 static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
7502 {
7503 diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
7504 index 5508baa11bb6..8a28d47bd502 100644
7505 --- a/fs/ext4/fsync.c
7506 +++ b/fs/ext4/fsync.c
7507 @@ -44,30 +44,28 @@
7508 */
7509 static int ext4_sync_parent(struct inode *inode)
7510 {
7511 - struct dentry *dentry = NULL;
7512 - struct inode *next;
7513 + struct dentry *dentry, *next;
7514 int ret = 0;
7515
7516 if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
7517 return 0;
7518 - inode = igrab(inode);
7519 + dentry = d_find_any_alias(inode);
7520 + if (!dentry)
7521 + return 0;
7522 while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
7523 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
7524 - dentry = d_find_any_alias(inode);
7525 - if (!dentry)
7526 - break;
7527 - next = igrab(d_inode(dentry->d_parent));
7528 +
7529 + next = dget_parent(dentry);
7530 dput(dentry);
7531 - if (!next)
7532 - break;
7533 - iput(inode);
7534 - inode = next;
7535 + dentry = next;
7536 + inode = dentry->d_inode;
7537 +
7538 /*
7539 * The directory inode may have gone through rmdir by now. But
7540 * the inode itself and its blocks are still allocated (we hold
7541 - * a reference to the inode so it didn't go through
7542 - * ext4_evict_inode()) and so we are safe to flush metadata
7543 - * blocks and the inode.
7544 + * a reference to the inode via its dentry), so it didn't go
7545 + * through ext4_evict_inode()) and so we are safe to flush
7546 + * metadata blocks and the inode.
7547 */
7548 ret = sync_mapping_buffers(inode->i_mapping);
7549 if (ret)
7550 @@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode)
7551 if (ret)
7552 break;
7553 }
7554 - iput(inode);
7555 + dput(dentry);
7556 return ret;
7557 }
7558
7559 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
7560 index 491f9ee4040e..894a61010ae9 100644
7561 --- a/fs/ext4/xattr.c
7562 +++ b/fs/ext4/xattr.c
7563 @@ -1820,8 +1820,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
7564 if (EXT4_I(inode)->i_file_acl) {
7565 /* The inode already has an extended attribute block. */
7566 bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
7567 - if (IS_ERR(bs->bh))
7568 - return PTR_ERR(bs->bh);
7569 + if (IS_ERR(bs->bh)) {
7570 + error = PTR_ERR(bs->bh);
7571 + bs->bh = NULL;
7572 + return error;
7573 + }
7574 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
7575 atomic_read(&(bs->bh->b_count)),
7576 le32_to_cpu(BHDR(bs->bh)->h_refcount));
7577 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
7578 index 3edde3d6d089..a26ea1e6ba88 100644
7579 --- a/fs/f2fs/f2fs.h
7580 +++ b/fs/f2fs/f2fs.h
7581 @@ -138,6 +138,7 @@ struct f2fs_mount_info {
7582 int alloc_mode; /* segment allocation policy */
7583 int fsync_mode; /* fsync policy */
7584 bool test_dummy_encryption; /* test dummy encryption */
7585 + block_t unusable_cap_perc; /* percentage for cap */
7586 block_t unusable_cap; /* Amount of space allowed to be
7587 * unusable when disabling checkpoint
7588 */
7589 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
7590 index 5e1d4d9243a9..e36543c9f2b7 100644
7591 --- a/fs/f2fs/super.c
7592 +++ b/fs/f2fs/super.c
7593 @@ -277,6 +277,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
7594 F2FS_OPTION(sbi).s_resgid));
7595 }
7596
7597 +static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
7598 +{
7599 + if (!F2FS_OPTION(sbi).unusable_cap_perc)
7600 + return;
7601 +
7602 + if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
7603 + F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
7604 + else
7605 + F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
7606 + F2FS_OPTION(sbi).unusable_cap_perc;
7607 +
7608 + f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
7609 + F2FS_OPTION(sbi).unusable_cap,
7610 + F2FS_OPTION(sbi).unusable_cap_perc);
7611 +}
7612 +
7613 static void init_once(void *foo)
7614 {
7615 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
7616 @@ -790,12 +806,7 @@ static int parse_options(struct super_block *sb, char *options)
7617 return -EINVAL;
7618 if (arg < 0 || arg > 100)
7619 return -EINVAL;
7620 - if (arg == 100)
7621 - F2FS_OPTION(sbi).unusable_cap =
7622 - sbi->user_block_count;
7623 - else
7624 - F2FS_OPTION(sbi).unusable_cap =
7625 - (sbi->user_block_count / 100) * arg;
7626 + F2FS_OPTION(sbi).unusable_cap_perc = arg;
7627 set_opt(sbi, DISABLE_CHECKPOINT);
7628 break;
7629 case Opt_checkpoint_disable_cap:
7630 @@ -1735,6 +1746,7 @@ skip:
7631 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
7632
7633 limit_reserve_root(sbi);
7634 + adjust_unusable_cap_perc(sbi);
7635 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
7636 return 0;
7637 restore_gc:
7638 @@ -3397,6 +3409,7 @@ try_onemore:
7639 sbi->reserved_blocks = 0;
7640 sbi->current_reserved_blocks = 0;
7641 limit_reserve_root(sbi);
7642 + adjust_unusable_cap_perc(sbi);
7643
7644 for (i = 0; i < NR_INODE_TYPE; i++) {
7645 INIT_LIST_HEAD(&sbi->inode_list[i]);
7646 diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
7647 index 4f443703065e..0c71acc1b831 100644
7648 --- a/fs/xfs/xfs_bmap_util.c
7649 +++ b/fs/xfs/xfs_bmap_util.c
7650 @@ -1760,7 +1760,7 @@ xfs_swap_extents(
7651 if (xfs_inode_has_cow_data(tip)) {
7652 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
7653 if (error)
7654 - return error;
7655 + goto out_unlock;
7656 }
7657
7658 /*
7659 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
7660 index 0abba171aa89..1264ac63e4e5 100644
7661 --- a/fs/xfs/xfs_buf.c
7662 +++ b/fs/xfs/xfs_buf.c
7663 @@ -1162,8 +1162,10 @@ xfs_buf_ioend(
7664 bp->b_ops->verify_read(bp);
7665 }
7666
7667 - if (!bp->b_error)
7668 + if (!bp->b_error) {
7669 + bp->b_flags &= ~XBF_WRITE_FAIL;
7670 bp->b_flags |= XBF_DONE;
7671 + }
7672
7673 if (bp->b_iodone)
7674 (*(bp->b_iodone))(bp);
7675 @@ -1223,7 +1225,7 @@ xfs_bwrite(
7676
7677 bp->b_flags |= XBF_WRITE;
7678 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
7679 - XBF_WRITE_FAIL | XBF_DONE);
7680 + XBF_DONE);
7681
7682 error = xfs_buf_submit(bp);
7683 if (error)
7684 @@ -1929,7 +1931,7 @@ xfs_buf_delwri_submit_buffers(
7685 * synchronously. Otherwise, drop the buffer from the delwri
7686 * queue and submit async.
7687 */
7688 - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
7689 + bp->b_flags &= ~_XBF_DELWRI_Q;
7690 bp->b_flags |= XBF_WRITE;
7691 if (wait_list) {
7692 bp->b_flags &= ~XBF_ASYNC;
7693 diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
7694 index aeb95e7391c1..3cbf248af51f 100644
7695 --- a/fs/xfs/xfs_dquot.c
7696 +++ b/fs/xfs/xfs_dquot.c
7697 @@ -1116,13 +1116,12 @@ xfs_qm_dqflush(
7698 dqb = bp->b_addr + dqp->q_bufoffset;
7699 ddqp = &dqb->dd_diskdq;
7700
7701 - /*
7702 - * A simple sanity check in case we got a corrupted dquot.
7703 - */
7704 - fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0);
7705 + /* sanity check the in-core structure before we flush */
7706 + fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id),
7707 + 0);
7708 if (fa) {
7709 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
7710 - be32_to_cpu(ddqp->d_id), fa);
7711 + be32_to_cpu(dqp->q_core.d_id), fa);
7712 xfs_buf_relse(bp);
7713 xfs_dqfunlock(dqp);
7714 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
7715 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
7716 index b072aeb1fd78..4d6fe87fd38f 100644
7717 --- a/include/linux/kgdb.h
7718 +++ b/include/linux/kgdb.h
7719 @@ -323,7 +323,7 @@ extern void gdbstub_exit(int status);
7720 extern int kgdb_single_step;
7721 extern atomic_t kgdb_active;
7722 #define in_dbg_master() \
7723 - (raw_smp_processor_id() == atomic_read(&kgdb_active))
7724 + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active)))
7725 extern bool dbg_is_early;
7726 extern void __init dbg_late_init(void);
7727 extern void kgdb_panic(const char *msg);
7728 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
7729 index 8b5f758942a2..85804ba62215 100644
7730 --- a/include/linux/mmzone.h
7731 +++ b/include/linux/mmzone.h
7732 @@ -709,6 +709,8 @@ typedef struct pglist_data {
7733 /*
7734 * Must be held any time you expect node_start_pfn,
7735 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
7736 + * Also synchronizes pgdat->first_deferred_pfn during deferred page
7737 + * init.
7738 *
7739 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
7740 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
7741 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
7742 index 228f66347620..0ad57693f392 100644
7743 --- a/include/linux/pci_ids.h
7744 +++ b/include/linux/pci_ids.h
7745 @@ -148,6 +148,8 @@
7746
7747 /* Vendors and devices. Sort key: vendor first, device next. */
7748
7749 +#define PCI_VENDOR_ID_LOONGSON 0x0014
7750 +
7751 #define PCI_VENDOR_ID_TTTECH 0x0357
7752 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a
7753
7754 @@ -548,7 +550,9 @@
7755 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
7756 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
7757 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
7758 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
7759 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
7760 +#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
7761 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
7762 #define PCI_DEVICE_ID_AMD_LANCE 0x2000
7763 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
7764 @@ -1829,6 +1833,12 @@
7765 #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
7766 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
7767
7768 +#define PCI_VENDOR_ID_PERICOM 0x12D8
7769 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
7770 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
7771 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
7772 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
7773 +
7774 #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
7775 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
7776 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
7777 @@ -3008,6 +3018,7 @@
7778 #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
7779 #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
7780 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
7781 +#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
7782 #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
7783
7784 #define PCI_VENDOR_ID_SCALEMP 0x8686
7785 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
7786 index c49257a3b510..a132d875d351 100644
7787 --- a/include/linux/sched/mm.h
7788 +++ b/include/linux/sched/mm.h
7789 @@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm)
7790 __mmdrop(mm);
7791 }
7792
7793 +void mmdrop(struct mm_struct *mm);
7794 +
7795 /*
7796 * This has to be called after a get_task_mm()/mmget_not_zero()
7797 * followed by taking the mmap_sem for writing before modifying the
7798 diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
7799 index a3adbe593505..4bdb5e4bbd6a 100644
7800 --- a/include/linux/skmsg.h
7801 +++ b/include/linux/skmsg.h
7802 @@ -457,4 +457,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs)
7803 psock_set_prog(&progs->skb_verdict, NULL);
7804 }
7805
7806 +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
7807 +
7808 +static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
7809 +{
7810 + if (!psock)
7811 + return false;
7812 + return psock->parser.enabled;
7813 +}
7814 #endif /* _LINUX_SKMSG_H */
7815 diff --git a/include/linux/string.h b/include/linux/string.h
7816 index b6ccdc2c7f02..b2264355272d 100644
7817 --- a/include/linux/string.h
7818 +++ b/include/linux/string.h
7819 @@ -269,6 +269,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob
7820 void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
7821
7822 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
7823 +
7824 +#ifdef CONFIG_KASAN
7825 +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
7826 +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
7827 +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
7828 +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
7829 +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
7830 +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
7831 +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
7832 +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
7833 +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
7834 +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
7835 +#else
7836 +#define __underlying_memchr __builtin_memchr
7837 +#define __underlying_memcmp __builtin_memcmp
7838 +#define __underlying_memcpy __builtin_memcpy
7839 +#define __underlying_memmove __builtin_memmove
7840 +#define __underlying_memset __builtin_memset
7841 +#define __underlying_strcat __builtin_strcat
7842 +#define __underlying_strcpy __builtin_strcpy
7843 +#define __underlying_strlen __builtin_strlen
7844 +#define __underlying_strncat __builtin_strncat
7845 +#define __underlying_strncpy __builtin_strncpy
7846 +#endif
7847 +
7848 __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
7849 {
7850 size_t p_size = __builtin_object_size(p, 0);
7851 @@ -276,14 +301,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
7852 __write_overflow();
7853 if (p_size < size)
7854 fortify_panic(__func__);
7855 - return __builtin_strncpy(p, q, size);
7856 + return __underlying_strncpy(p, q, size);
7857 }
7858
7859 __FORTIFY_INLINE char *strcat(char *p, const char *q)
7860 {
7861 size_t p_size = __builtin_object_size(p, 0);
7862 if (p_size == (size_t)-1)
7863 - return __builtin_strcat(p, q);
7864 + return __underlying_strcat(p, q);
7865 if (strlcat(p, q, p_size) >= p_size)
7866 fortify_panic(__func__);
7867 return p;
7868 @@ -297,7 +322,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
7869 /* Work around gcc excess stack consumption issue */
7870 if (p_size == (size_t)-1 ||
7871 (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
7872 - return __builtin_strlen(p);
7873 + return __underlying_strlen(p);
7874 ret = strnlen(p, p_size);
7875 if (p_size <= ret)
7876 fortify_panic(__func__);
7877 @@ -330,7 +355,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
7878 __write_overflow();
7879 if (len >= p_size)
7880 fortify_panic(__func__);
7881 - __builtin_memcpy(p, q, len);
7882 + __underlying_memcpy(p, q, len);
7883 p[len] = '\0';
7884 }
7885 return ret;
7886 @@ -343,12 +368,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
7887 size_t p_size = __builtin_object_size(p, 0);
7888 size_t q_size = __builtin_object_size(q, 0);
7889 if (p_size == (size_t)-1 && q_size == (size_t)-1)
7890 - return __builtin_strncat(p, q, count);
7891 + return __underlying_strncat(p, q, count);
7892 p_len = strlen(p);
7893 copy_len = strnlen(q, count);
7894 if (p_size < p_len + copy_len + 1)
7895 fortify_panic(__func__);
7896 - __builtin_memcpy(p + p_len, q, copy_len);
7897 + __underlying_memcpy(p + p_len, q, copy_len);
7898 p[p_len + copy_len] = '\0';
7899 return p;
7900 }
7901 @@ -360,7 +385,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
7902 __write_overflow();
7903 if (p_size < size)
7904 fortify_panic(__func__);
7905 - return __builtin_memset(p, c, size);
7906 + return __underlying_memset(p, c, size);
7907 }
7908
7909 __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
7910 @@ -375,7 +400,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
7911 }
7912 if (p_size < size || q_size < size)
7913 fortify_panic(__func__);
7914 - return __builtin_memcpy(p, q, size);
7915 + return __underlying_memcpy(p, q, size);
7916 }
7917
7918 __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
7919 @@ -390,7 +415,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
7920 }
7921 if (p_size < size || q_size < size)
7922 fortify_panic(__func__);
7923 - return __builtin_memmove(p, q, size);
7924 + return __underlying_memmove(p, q, size);
7925 }
7926
7927 extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
7928 @@ -416,7 +441,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
7929 }
7930 if (p_size < size || q_size < size)
7931 fortify_panic(__func__);
7932 - return __builtin_memcmp(p, q, size);
7933 + return __underlying_memcmp(p, q, size);
7934 }
7935
7936 __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
7937 @@ -426,7 +451,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
7938 __read_overflow();
7939 if (p_size < size)
7940 fortify_panic(__func__);
7941 - return __builtin_memchr(p, c, size);
7942 + return __underlying_memchr(p, c, size);
7943 }
7944
7945 void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
7946 @@ -457,11 +482,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
7947 size_t p_size = __builtin_object_size(p, 0);
7948 size_t q_size = __builtin_object_size(q, 0);
7949 if (p_size == (size_t)-1 && q_size == (size_t)-1)
7950 - return __builtin_strcpy(p, q);
7951 + return __underlying_strcpy(p, q);
7952 memcpy(p, q, strlen(q) + 1);
7953 return p;
7954 }
7955
7956 +/* Don't use these outside the FORITFY_SOURCE implementation */
7957 +#undef __underlying_memchr
7958 +#undef __underlying_memcmp
7959 +#undef __underlying_memcpy
7960 +#undef __underlying_memmove
7961 +#undef __underlying_memset
7962 +#undef __underlying_strcat
7963 +#undef __underlying_strcpy
7964 +#undef __underlying_strlen
7965 +#undef __underlying_strncat
7966 +#undef __underlying_strncpy
7967 #endif
7968
7969 /**
7970 diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
7971 index d4326d6662a4..b5a4eb14f809 100644
7972 --- a/include/linux/sunrpc/gss_api.h
7973 +++ b/include/linux/sunrpc/gss_api.h
7974 @@ -85,6 +85,7 @@ struct pf_desc {
7975 u32 service;
7976 char *name;
7977 char *auth_domain_name;
7978 + struct auth_domain *domain;
7979 bool datatouch;
7980 };
7981
7982 diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
7983 index a4528b26c8aa..d229d27ab19e 100644
7984 --- a/include/linux/sunrpc/svcauth_gss.h
7985 +++ b/include/linux/sunrpc/svcauth_gss.h
7986 @@ -21,7 +21,8 @@ int gss_svc_init(void);
7987 void gss_svc_shutdown(void);
7988 int gss_svc_init_net(struct net *net);
7989 void gss_svc_shutdown_net(struct net *net);
7990 -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
7991 +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
7992 + char *name);
7993 u32 svcauth_gss_flavor(struct auth_domain *dom);
7994
7995 #endif /* __KERNEL__ */
7996 diff --git a/include/net/tls.h b/include/net/tls.h
7997 index db26e3ec918f..0a065bdffa39 100644
7998 --- a/include/net/tls.h
7999 +++ b/include/net/tls.h
8000 @@ -590,6 +590,15 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
8001 return !!tls_sw_ctx_tx(ctx);
8002 }
8003
8004 +static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
8005 +{
8006 + struct tls_context *ctx = tls_get_ctx(sk);
8007 +
8008 + if (!ctx)
8009 + return false;
8010 + return !!tls_sw_ctx_rx(ctx);
8011 +}
8012 +
8013 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
8014 void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
8015
8016 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
8017 index 52641d8ca9e8..e735bc4075dc 100644
8018 --- a/include/uapi/linux/kvm.h
8019 +++ b/include/uapi/linux/kvm.h
8020 @@ -189,9 +189,11 @@ struct kvm_hyperv_exit {
8021 #define KVM_EXIT_HYPERV_SYNIC 1
8022 #define KVM_EXIT_HYPERV_HCALL 2
8023 __u32 type;
8024 + __u32 pad1;
8025 union {
8026 struct {
8027 __u32 msr;
8028 + __u32 pad2;
8029 __u64 control;
8030 __u64 evt_page;
8031 __u64 msg_page;
8032 diff --git a/kernel/audit.c b/kernel/audit.c
8033 index fcfbb3476ccd..05ae208ad442 100644
8034 --- a/kernel/audit.c
8035 +++ b/kernel/audit.c
8036 @@ -879,7 +879,7 @@ main_queue:
8037 return 0;
8038 }
8039
8040 -int audit_send_list(void *_dest)
8041 +int audit_send_list_thread(void *_dest)
8042 {
8043 struct audit_netlink_list *dest = _dest;
8044 struct sk_buff *skb;
8045 @@ -923,19 +923,30 @@ out_kfree_skb:
8046 return NULL;
8047 }
8048
8049 +static void audit_free_reply(struct audit_reply *reply)
8050 +{
8051 + if (!reply)
8052 + return;
8053 +
8054 + if (reply->skb)
8055 + kfree_skb(reply->skb);
8056 + if (reply->net)
8057 + put_net(reply->net);
8058 + kfree(reply);
8059 +}
8060 +
8061 static int audit_send_reply_thread(void *arg)
8062 {
8063 struct audit_reply *reply = (struct audit_reply *)arg;
8064 - struct sock *sk = audit_get_sk(reply->net);
8065
8066 audit_ctl_lock();
8067 audit_ctl_unlock();
8068
8069 /* Ignore failure. It'll only happen if the sender goes away,
8070 because our timeout is set to infinite. */
8071 - netlink_unicast(sk, reply->skb, reply->portid, 0);
8072 - put_net(reply->net);
8073 - kfree(reply);
8074 + netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0);
8075 + reply->skb = NULL;
8076 + audit_free_reply(reply);
8077 return 0;
8078 }
8079
8080 @@ -949,35 +960,32 @@ static int audit_send_reply_thread(void *arg)
8081 * @payload: payload data
8082 * @size: payload size
8083 *
8084 - * Allocates an skb, builds the netlink message, and sends it to the port id.
8085 - * No failure notifications.
8086 + * Allocates a skb, builds the netlink message, and sends it to the port id.
8087 */
8088 static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
8089 int multi, const void *payload, int size)
8090 {
8091 - struct net *net = sock_net(NETLINK_CB(request_skb).sk);
8092 - struct sk_buff *skb;
8093 struct task_struct *tsk;
8094 - struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
8095 - GFP_KERNEL);
8096 + struct audit_reply *reply;
8097
8098 + reply = kzalloc(sizeof(*reply), GFP_KERNEL);
8099 if (!reply)
8100 return;
8101
8102 - skb = audit_make_reply(seq, type, done, multi, payload, size);
8103 - if (!skb)
8104 - goto out;
8105 -
8106 - reply->net = get_net(net);
8107 + reply->skb = audit_make_reply(seq, type, done, multi, payload, size);
8108 + if (!reply->skb)
8109 + goto err;
8110 + reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
8111 reply->portid = NETLINK_CB(request_skb).portid;
8112 - reply->skb = skb;
8113
8114 tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
8115 - if (!IS_ERR(tsk))
8116 - return;
8117 - kfree_skb(skb);
8118 -out:
8119 - kfree(reply);
8120 + if (IS_ERR(tsk))
8121 + goto err;
8122 +
8123 + return;
8124 +
8125 +err:
8126 + audit_free_reply(reply);
8127 }
8128
8129 /*
8130 diff --git a/kernel/audit.h b/kernel/audit.h
8131 index 6fb7160412d4..ddc22878433d 100644
8132 --- a/kernel/audit.h
8133 +++ b/kernel/audit.h
8134 @@ -229,7 +229,7 @@ struct audit_netlink_list {
8135 struct sk_buff_head q;
8136 };
8137
8138 -int audit_send_list(void *_dest);
8139 +int audit_send_list_thread(void *_dest);
8140
8141 extern int selinux_audit_rule_update(void);
8142
8143 diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
8144 index 026e34da4ace..a10e2997aa6c 100644
8145 --- a/kernel/auditfilter.c
8146 +++ b/kernel/auditfilter.c
8147 @@ -1161,11 +1161,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
8148 */
8149 int audit_list_rules_send(struct sk_buff *request_skb, int seq)
8150 {
8151 - u32 portid = NETLINK_CB(request_skb).portid;
8152 - struct net *net = sock_net(NETLINK_CB(request_skb).sk);
8153 struct task_struct *tsk;
8154 struct audit_netlink_list *dest;
8155 - int err = 0;
8156
8157 /* We can't just spew out the rules here because we might fill
8158 * the available socket buffer space and deadlock waiting for
8159 @@ -1173,25 +1170,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq)
8160 * happen if we're actually running in the context of auditctl
8161 * trying to _send_ the stuff */
8162
8163 - dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
8164 + dest = kmalloc(sizeof(*dest), GFP_KERNEL);
8165 if (!dest)
8166 return -ENOMEM;
8167 - dest->net = get_net(net);
8168 - dest->portid = portid;
8169 + dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
8170 + dest->portid = NETLINK_CB(request_skb).portid;
8171 skb_queue_head_init(&dest->q);
8172
8173 mutex_lock(&audit_filter_mutex);
8174 audit_list_rules(seq, &dest->q);
8175 mutex_unlock(&audit_filter_mutex);
8176
8177 - tsk = kthread_run(audit_send_list, dest, "audit_send_list");
8178 + tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list");
8179 if (IS_ERR(tsk)) {
8180 skb_queue_purge(&dest->q);
8181 + put_net(dest->net);
8182 kfree(dest);
8183 - err = PTR_ERR(tsk);
8184 + return PTR_ERR(tsk);
8185 }
8186
8187 - return err;
8188 + return 0;
8189 }
8190
8191 int audit_comparator(u32 left, u32 op, u32 right)
8192 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
8193 index 946cfdd3b2cc..e7af1ac69d75 100644
8194 --- a/kernel/bpf/syscall.c
8195 +++ b/kernel/bpf/syscall.c
8196 @@ -1118,7 +1118,8 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
8197 map = __bpf_map_get(f);
8198 if (IS_ERR(map))
8199 return PTR_ERR(map);
8200 - if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
8201 + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
8202 + !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
8203 err = -EPERM;
8204 goto err_put;
8205 }
8206 diff --git a/kernel/cpu.c b/kernel/cpu.c
8207 index d7890c1285bf..7527825ac7da 100644
8208 --- a/kernel/cpu.c
8209 +++ b/kernel/cpu.c
8210 @@ -3,6 +3,7 @@
8211 *
8212 * This code is licenced under the GPL.
8213 */
8214 +#include <linux/sched/mm.h>
8215 #include <linux/proc_fs.h>
8216 #include <linux/smp.h>
8217 #include <linux/init.h>
8218 @@ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu)
8219 return bringup_wait_for_ap(cpu);
8220 }
8221
8222 +static int finish_cpu(unsigned int cpu)
8223 +{
8224 + struct task_struct *idle = idle_thread_get(cpu);
8225 + struct mm_struct *mm = idle->active_mm;
8226 +
8227 + /*
8228 + * idle_task_exit() will have switched to &init_mm, now
8229 + * clean up any remaining active_mm state.
8230 + */
8231 + if (mm != &init_mm)
8232 + idle->active_mm = &init_mm;
8233 + mmdrop(mm);
8234 + return 0;
8235 +}
8236 +
8237 /*
8238 * Hotplug state machine related functions
8239 */
8240 @@ -1434,7 +1450,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
8241 [CPUHP_BRINGUP_CPU] = {
8242 .name = "cpu:bringup",
8243 .startup.single = bringup_cpu,
8244 - .teardown.single = NULL,
8245 + .teardown.single = finish_cpu,
8246 .cant_stop = true,
8247 },
8248 /* Final state before CPU kills itself */
8249 diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
8250 index cbca6879ab7d..44a259338e33 100644
8251 --- a/kernel/cpu_pm.c
8252 +++ b/kernel/cpu_pm.c
8253 @@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
8254 */
8255 int cpu_pm_enter(void)
8256 {
8257 - int nr_calls;
8258 + int nr_calls = 0;
8259 int ret = 0;
8260
8261 ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
8262 @@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
8263 */
8264 int cpu_cluster_pm_enter(void)
8265 {
8266 - int nr_calls;
8267 + int nr_calls = 0;
8268 int ret = 0;
8269
8270 ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
8271 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
8272 index f76d6f77dd5e..7d54c7c28054 100644
8273 --- a/kernel/debug/debug_core.c
8274 +++ b/kernel/debug/debug_core.c
8275 @@ -501,6 +501,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
8276
8277 if (exception_level > 1) {
8278 dump_stack();
8279 + kgdb_io_module_registered = false;
8280 panic("Recursive entry to debugger");
8281 }
8282
8283 @@ -634,6 +635,8 @@ return_normal:
8284 if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
8285 goto kgdb_restore;
8286
8287 + atomic_inc(&ignore_console_lock_warning);
8288 +
8289 /* Call the I/O driver's pre_exception routine */
8290 if (dbg_io_ops->pre_exception)
8291 dbg_io_ops->pre_exception();
8292 @@ -706,6 +709,8 @@ cpu_master_loop:
8293 if (dbg_io_ops->post_exception)
8294 dbg_io_ops->post_exception();
8295
8296 + atomic_dec(&ignore_console_lock_warning);
8297 +
8298 if (!kgdb_single_step) {
8299 raw_spin_unlock(&dbg_slave_lock);
8300 /* Wait till all the CPUs have quit from the debugger. */
8301 diff --git a/kernel/exit.c b/kernel/exit.c
8302 index 22dfaac9e48c..fa46977b9c07 100644
8303 --- a/kernel/exit.c
8304 +++ b/kernel/exit.c
8305 @@ -713,8 +713,12 @@ void __noreturn do_exit(long code)
8306 struct task_struct *tsk = current;
8307 int group_dead;
8308
8309 - profile_task_exit(tsk);
8310 - kcov_task_exit(tsk);
8311 + /*
8312 + * We can get here from a kernel oops, sometimes with preemption off.
8313 + * Start by checking for critical errors.
8314 + * Then fix up important state like USER_DS and preemption.
8315 + * Then do everything else.
8316 + */
8317
8318 WARN_ON(blk_needs_flush_plug(tsk));
8319
8320 @@ -732,6 +736,16 @@ void __noreturn do_exit(long code)
8321 */
8322 set_fs(USER_DS);
8323
8324 + if (unlikely(in_atomic())) {
8325 + pr_info("note: %s[%d] exited with preempt_count %d\n",
8326 + current->comm, task_pid_nr(current),
8327 + preempt_count());
8328 + preempt_count_set(PREEMPT_ENABLED);
8329 + }
8330 +
8331 + profile_task_exit(tsk);
8332 + kcov_task_exit(tsk);
8333 +
8334 ptrace_event(PTRACE_EVENT_EXIT, code);
8335
8336 validate_creds_for_do_exit(tsk);
8337 @@ -749,13 +763,6 @@ void __noreturn do_exit(long code)
8338
8339 exit_signals(tsk); /* sets PF_EXITING */
8340
8341 - if (unlikely(in_atomic())) {
8342 - pr_info("note: %s[%d] exited with preempt_count %d\n",
8343 - current->comm, task_pid_nr(current),
8344 - preempt_count());
8345 - preempt_count_set(PREEMPT_ENABLED);
8346 - }
8347 -
8348 /* sync mm's RSS info before statistics gathering */
8349 if (tsk->mm)
8350 sync_mm_rss(tsk->mm);
8351 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8352 index e99d326fa569..361cbc2dc966 100644
8353 --- a/kernel/sched/core.c
8354 +++ b/kernel/sched/core.c
8355 @@ -6177,13 +6177,14 @@ void idle_task_exit(void)
8356 struct mm_struct *mm = current->active_mm;
8357
8358 BUG_ON(cpu_online(smp_processor_id()));
8359 + BUG_ON(current != this_rq()->idle);
8360
8361 if (mm != &init_mm) {
8362 switch_mm(mm, &init_mm, current);
8363 - current->active_mm = &init_mm;
8364 finish_arch_post_lock_switch();
8365 }
8366 - mmdrop(mm);
8367 +
8368 + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8369 }
8370
8371 /*
8372 @@ -7373,6 +7374,8 @@ static DEFINE_MUTEX(cfs_constraints_mutex);
8373
8374 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
8375 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
8376 +/* More than 203 days if BW_SHIFT equals 20. */
8377 +static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
8378
8379 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8380
8381 @@ -7400,6 +7403,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8382 if (period > max_cfs_quota_period)
8383 return -EINVAL;
8384
8385 + /*
8386 + * Bound quota to defend quota against overflow during bandwidth shift.
8387 + */
8388 + if (quota != RUNTIME_INF && quota > max_cfs_runtime)
8389 + return -EINVAL;
8390 +
8391 /*
8392 * Prevent race between setting of cfs_rq->runtime_enabled and
8393 * unthrottle_offline_cfs_rqs().
8394 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8395 index 8a0e6bdba50d..2f81e4ae844e 100644
8396 --- a/kernel/sched/fair.c
8397 +++ b/kernel/sched/fair.c
8398 @@ -4942,6 +4942,8 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
8399 if (!overrun)
8400 break;
8401
8402 + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
8403 +
8404 if (++count > 3) {
8405 u64 new, old = ktime_to_ns(cfs_b->period);
8406
8407 @@ -4971,8 +4973,6 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
8408 /* reset count so we don't come right back in here */
8409 count = 0;
8410 }
8411 -
8412 - idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
8413 }
8414 if (idle)
8415 cfs_b->period_active = 0;
8416 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
8417 index 7bf917e4d63a..5b04bba4500d 100644
8418 --- a/kernel/sched/rt.c
8419 +++ b/kernel/sched/rt.c
8420 @@ -9,6 +9,8 @@
8421
8422 int sched_rr_timeslice = RR_TIMESLICE;
8423 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
8424 +/* More than 4 hours if BW_SHIFT equals 20. */
8425 +static const u64 max_rt_runtime = MAX_BW;
8426
8427 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
8428
8429 @@ -2513,6 +2515,12 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
8430 if (rt_period == 0)
8431 return -EINVAL;
8432
8433 + /*
8434 + * Bound quota to defend quota against overflow during bandwidth shift.
8435 + */
8436 + if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
8437 + return -EINVAL;
8438 +
8439 mutex_lock(&rt_constraints_mutex);
8440 read_lock(&tasklist_lock);
8441 err = __rt_schedulable(tg, rt_period, rt_runtime);
8442 @@ -2634,7 +2642,9 @@ static int sched_rt_global_validate(void)
8443 return -EINVAL;
8444
8445 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
8446 - (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
8447 + ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
8448 + ((u64)sysctl_sched_rt_runtime *
8449 + NSEC_PER_USEC > max_rt_runtime)))
8450 return -EINVAL;
8451
8452 return 0;
8453 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
8454 index c7e7481968bf..570659f1c6e2 100644
8455 --- a/kernel/sched/sched.h
8456 +++ b/kernel/sched/sched.h
8457 @@ -1889,6 +1889,8 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
8458 #define BW_SHIFT 20
8459 #define BW_UNIT (1 << BW_SHIFT)
8460 #define RATIO_SHIFT 8
8461 +#define MAX_BW_BITS (64 - BW_SHIFT)
8462 +#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
8463 unsigned long to_ratio(u64 period, u64 runtime);
8464
8465 extern void init_entity_runnable_average(struct sched_entity *se);
8466 diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
8467 index 891e1c3549c4..afbd99987cf8 100644
8468 --- a/lib/mpi/longlong.h
8469 +++ b/lib/mpi/longlong.h
8470 @@ -653,7 +653,7 @@ do { \
8471 ************** MIPS/64 **************
8472 ***************************************/
8473 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
8474 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
8475 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
8476 /*
8477 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
8478 * code below, so we special case MIPS64r6 until the compiler can do better.
8479 diff --git a/lib/test_kasan.c b/lib/test_kasan.c
8480 index bd3d9ef7d39e..83344c9c38f4 100644
8481 --- a/lib/test_kasan.c
8482 +++ b/lib/test_kasan.c
8483 @@ -22,6 +22,14 @@
8484
8485 #include <asm/page.h>
8486
8487 +/*
8488 + * We assign some test results to these globals to make sure the tests
8489 + * are not eliminated as dead code.
8490 + */
8491 +
8492 +int kasan_int_result;
8493 +void *kasan_ptr_result;
8494 +
8495 /*
8496 * Note: test functions are marked noinline so that their names appear in
8497 * reports.
8498 @@ -603,7 +611,7 @@ static noinline void __init kasan_memchr(void)
8499 if (!ptr)
8500 return;
8501
8502 - memchr(ptr, '1', size + 1);
8503 + kasan_ptr_result = memchr(ptr, '1', size + 1);
8504 kfree(ptr);
8505 }
8506
8507 @@ -619,7 +627,7 @@ static noinline void __init kasan_memcmp(void)
8508 return;
8509
8510 memset(arr, 0, sizeof(arr));
8511 - memcmp(ptr, arr, size+1);
8512 + kasan_int_result = memcmp(ptr, arr, size + 1);
8513 kfree(ptr);
8514 }
8515
8516 @@ -642,22 +650,22 @@ static noinline void __init kasan_strings(void)
8517 * will likely point to zeroed byte.
8518 */
8519 ptr += 16;
8520 - strchr(ptr, '1');
8521 + kasan_ptr_result = strchr(ptr, '1');
8522
8523 pr_info("use-after-free in strrchr\n");
8524 - strrchr(ptr, '1');
8525 + kasan_ptr_result = strrchr(ptr, '1');
8526
8527 pr_info("use-after-free in strcmp\n");
8528 - strcmp(ptr, "2");
8529 + kasan_int_result = strcmp(ptr, "2");
8530
8531 pr_info("use-after-free in strncmp\n");
8532 - strncmp(ptr, "2", 1);
8533 + kasan_int_result = strncmp(ptr, "2", 1);
8534
8535 pr_info("use-after-free in strlen\n");
8536 - strlen(ptr);
8537 + kasan_int_result = strlen(ptr);
8538
8539 pr_info("use-after-free in strnlen\n");
8540 - strnlen(ptr, 1);
8541 + kasan_int_result = strnlen(ptr, 1);
8542 }
8543
8544 static noinline void __init kasan_bitops(void)
8545 @@ -724,11 +732,12 @@ static noinline void __init kasan_bitops(void)
8546 __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
8547
8548 pr_info("out-of-bounds in test_bit\n");
8549 - (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
8550 + kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
8551
8552 #if defined(clear_bit_unlock_is_negative_byte)
8553 pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
8554 - clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
8555 + kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG +
8556 + BITS_PER_BYTE, bits);
8557 #endif
8558 kfree(bits);
8559 }
8560 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
8561 index 7ec5710afc99..da9040a6838f 100644
8562 --- a/mm/huge_memory.c
8563 +++ b/mm/huge_memory.c
8564 @@ -2301,6 +2301,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
8565 {
8566 spinlock_t *ptl;
8567 struct mmu_notifier_range range;
8568 + bool was_locked = false;
8569 + pmd_t _pmd;
8570
8571 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
8572 address & HPAGE_PMD_MASK,
8573 @@ -2313,11 +2315,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
8574 * pmd against. Otherwise we can end up replacing wrong page.
8575 */
8576 VM_BUG_ON(freeze && !page);
8577 - if (page && page != pmd_page(*pmd))
8578 - goto out;
8579 + if (page) {
8580 + VM_WARN_ON_ONCE(!PageLocked(page));
8581 + was_locked = true;
8582 + if (page != pmd_page(*pmd))
8583 + goto out;
8584 + }
8585
8586 +repeat:
8587 if (pmd_trans_huge(*pmd)) {
8588 - page = pmd_page(*pmd);
8589 + if (!page) {
8590 + page = pmd_page(*pmd);
8591 + if (unlikely(!trylock_page(page))) {
8592 + get_page(page);
8593 + _pmd = *pmd;
8594 + spin_unlock(ptl);
8595 + lock_page(page);
8596 + spin_lock(ptl);
8597 + if (unlikely(!pmd_same(*pmd, _pmd))) {
8598 + unlock_page(page);
8599 + put_page(page);
8600 + page = NULL;
8601 + goto repeat;
8602 + }
8603 + put_page(page);
8604 + }
8605 + }
8606 if (PageMlocked(page))
8607 clear_page_mlock(page);
8608 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
8609 @@ -2325,6 +2348,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
8610 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
8611 out:
8612 spin_unlock(ptl);
8613 + if (!was_locked && page)
8614 + unlock_page(page);
8615 /*
8616 * No need to double call mmu_notifier->invalidate_range() callback.
8617 * They are 3 cases to consider inside __split_huge_pmd_locked():
8618 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
8619 index 98d5c940facd..8686fe760f34 100644
8620 --- a/mm/page_alloc.c
8621 +++ b/mm/page_alloc.c
8622 @@ -1640,7 +1640,6 @@ static void __init deferred_free_pages(unsigned long pfn,
8623 } else if (!(pfn & nr_pgmask)) {
8624 deferred_free_range(pfn - nr_free, nr_free);
8625 nr_free = 1;
8626 - touch_nmi_watchdog();
8627 } else {
8628 nr_free++;
8629 }
8630 @@ -1670,7 +1669,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
8631 continue;
8632 } else if (!page || !(pfn & nr_pgmask)) {
8633 page = pfn_to_page(pfn);
8634 - touch_nmi_watchdog();
8635 } else {
8636 page++;
8637 }
8638 @@ -1793,6 +1791,13 @@ static int __init deferred_init_memmap(void *data)
8639 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
8640 pgdat->first_deferred_pfn = ULONG_MAX;
8641
8642 + /*
8643 + * Once we unlock here, the zone cannot be grown anymore, thus if an
8644 + * interrupt thread must allocate this early in boot, zone must be
8645 + * pre-grown prior to start of deferred page initialization.
8646 + */
8647 + pgdat_resize_unlock(pgdat, &flags);
8648 +
8649 /* Only the highest zone is deferred so find it */
8650 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
8651 zone = pgdat->node_zones + zid;
8652 @@ -1810,11 +1815,11 @@ static int __init deferred_init_memmap(void *data)
8653 * that we can avoid introducing any issues with the buddy
8654 * allocator.
8655 */
8656 - while (spfn < epfn)
8657 + while (spfn < epfn) {
8658 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
8659 + cond_resched();
8660 + }
8661 zone_empty:
8662 - pgdat_resize_unlock(pgdat, &flags);
8663 -
8664 /* Sanity check that the next zone really is unpopulated */
8665 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
8666
8667 @@ -1856,17 +1861,6 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
8668
8669 pgdat_resize_lock(pgdat, &flags);
8670
8671 - /*
8672 - * If deferred pages have been initialized while we were waiting for
8673 - * the lock, return true, as the zone was grown. The caller will retry
8674 - * this zone. We won't return to this function since the caller also
8675 - * has this static branch.
8676 - */
8677 - if (!static_branch_unlikely(&deferred_pages)) {
8678 - pgdat_resize_unlock(pgdat, &flags);
8679 - return true;
8680 - }
8681 -
8682 /*
8683 * If someone grew this zone while we were waiting for spinlock, return
8684 * true, as there might be enough pages already.
8685 @@ -1895,6 +1889,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
8686 first_deferred_pfn = spfn;
8687
8688 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
8689 + touch_nmi_watchdog();
8690
8691 /* We should only stop along section boundaries */
8692 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
8693 diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
8694 index 2614a9caee00..a39af0eefad3 100644
8695 --- a/net/batman-adv/bat_v_elp.c
8696 +++ b/net/batman-adv/bat_v_elp.c
8697 @@ -120,20 +120,7 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
8698 rtnl_lock();
8699 ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
8700 rtnl_unlock();
8701 -
8702 - /* Virtual interface drivers such as tun / tap interfaces, VLAN, etc
8703 - * tend to initialize the interface throughput with some value for the
8704 - * sake of having a throughput number to export via ethtool. This
8705 - * exported throughput leaves batman-adv to conclude the interface
8706 - * throughput is genuine (reflecting reality), thus no measurements
8707 - * are necessary.
8708 - *
8709 - * Based on the observation that those interface types also tend to set
8710 - * the link auto-negotiation to 'off', batman-adv shall check this
8711 - * setting to differentiate between genuine link throughput information
8712 - * and placeholders installed by virtual interfaces.
8713 - */
8714 - if (ret == 0 && link_settings.base.autoneg == AUTONEG_ENABLE) {
8715 + if (ret == 0) {
8716 /* link characteristics might change over time */
8717 if (link_settings.base.duplex == DUPLEX_FULL)
8718 hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
8719 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8720 index c1d3a303d97f..88cd410e5728 100644
8721 --- a/net/bluetooth/hci_event.c
8722 +++ b/net/bluetooth/hci_event.c
8723 @@ -4216,6 +4216,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
8724 case 0x11: /* Unsupported Feature or Parameter Value */
8725 case 0x1c: /* SCO interval rejected */
8726 case 0x1a: /* Unsupported Remote Feature */
8727 + case 0x1e: /* Invalid LMP Parameters */
8728 case 0x1f: /* Unspecified error */
8729 case 0x20: /* Unsupported LMP Parameter value */
8730 if (conn->out) {
8731 diff --git a/net/core/skmsg.c b/net/core/skmsg.c
8732 index ded2d5227678..0536ea9298e4 100644
8733 --- a/net/core/skmsg.c
8734 +++ b/net/core/skmsg.c
8735 @@ -7,6 +7,7 @@
8736
8737 #include <net/sock.h>
8738 #include <net/tcp.h>
8739 +#include <net/tls.h>
8740
8741 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
8742 {
8743 @@ -686,13 +687,75 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
8744 return container_of(parser, struct sk_psock, parser);
8745 }
8746
8747 -static void sk_psock_verdict_apply(struct sk_psock *psock,
8748 - struct sk_buff *skb, int verdict)
8749 +static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
8750 {
8751 struct sk_psock *psock_other;
8752 struct sock *sk_other;
8753 bool ingress;
8754
8755 + sk_other = tcp_skb_bpf_redirect_fetch(skb);
8756 + if (unlikely(!sk_other)) {
8757 + kfree_skb(skb);
8758 + return;
8759 + }
8760 + psock_other = sk_psock(sk_other);
8761 + if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
8762 + !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
8763 + kfree_skb(skb);
8764 + return;
8765 + }
8766 +
8767 + ingress = tcp_skb_bpf_ingress(skb);
8768 + if ((!ingress && sock_writeable(sk_other)) ||
8769 + (ingress &&
8770 + atomic_read(&sk_other->sk_rmem_alloc) <=
8771 + sk_other->sk_rcvbuf)) {
8772 + if (!ingress)
8773 + skb_set_owner_w(skb, sk_other);
8774 + skb_queue_tail(&psock_other->ingress_skb, skb);
8775 + schedule_work(&psock_other->work);
8776 + } else {
8777 + kfree_skb(skb);
8778 + }
8779 +}
8780 +
8781 +static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
8782 + struct sk_buff *skb, int verdict)
8783 +{
8784 + switch (verdict) {
8785 + case __SK_REDIRECT:
8786 + sk_psock_skb_redirect(psock, skb);
8787 + break;
8788 + case __SK_PASS:
8789 + case __SK_DROP:
8790 + default:
8791 + break;
8792 + }
8793 +}
8794 +
8795 +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
8796 +{
8797 + struct bpf_prog *prog;
8798 + int ret = __SK_PASS;
8799 +
8800 + rcu_read_lock();
8801 + prog = READ_ONCE(psock->progs.skb_verdict);
8802 + if (likely(prog)) {
8803 + tcp_skb_bpf_redirect_clear(skb);
8804 + ret = sk_psock_bpf_run(psock, prog, skb);
8805 + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
8806 + }
8807 + rcu_read_unlock();
8808 + sk_psock_tls_verdict_apply(psock, skb, ret);
8809 + return ret;
8810 +}
8811 +EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
8812 +
8813 +static void sk_psock_verdict_apply(struct sk_psock *psock,
8814 + struct sk_buff *skb, int verdict)
8815 +{
8816 + struct sock *sk_other;
8817 +
8818 switch (verdict) {
8819 case __SK_PASS:
8820 sk_other = psock->sk;
8821 @@ -711,25 +774,8 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
8822 }
8823 goto out_free;
8824 case __SK_REDIRECT:
8825 - sk_other = tcp_skb_bpf_redirect_fetch(skb);
8826 - if (unlikely(!sk_other))
8827 - goto out_free;
8828 - psock_other = sk_psock(sk_other);
8829 - if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
8830 - !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED))
8831 - goto out_free;
8832 - ingress = tcp_skb_bpf_ingress(skb);
8833 - if ((!ingress && sock_writeable(sk_other)) ||
8834 - (ingress &&
8835 - atomic_read(&sk_other->sk_rmem_alloc) <=
8836 - sk_other->sk_rcvbuf)) {
8837 - if (!ingress)
8838 - skb_set_owner_w(skb, sk_other);
8839 - skb_queue_tail(&psock_other->ingress_skb, skb);
8840 - schedule_work(&psock_other->work);
8841 - break;
8842 - }
8843 - /* fall-through */
8844 + sk_psock_skb_redirect(psock, skb);
8845 + break;
8846 case __SK_DROP:
8847 /* fall-through */
8848 default:
8849 @@ -783,9 +829,13 @@ static void sk_psock_strp_data_ready(struct sock *sk)
8850 rcu_read_lock();
8851 psock = sk_psock(sk);
8852 if (likely(psock)) {
8853 - write_lock_bh(&sk->sk_callback_lock);
8854 - strp_data_ready(&psock->parser.strp);
8855 - write_unlock_bh(&sk->sk_callback_lock);
8856 + if (tls_sw_has_ctx_rx(sk)) {
8857 + psock->parser.saved_data_ready(sk);
8858 + } else {
8859 + write_lock_bh(&sk->sk_callback_lock);
8860 + strp_data_ready(&psock->parser.strp);
8861 + write_unlock_bh(&sk->sk_callback_lock);
8862 + }
8863 }
8864 rcu_read_unlock();
8865 }
8866 diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
8867 index c3c93e95b46e..243e8107f456 100644
8868 --- a/net/netfilter/nft_nat.c
8869 +++ b/net/netfilter/nft_nat.c
8870 @@ -129,7 +129,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
8871 priv->type = NF_NAT_MANIP_DST;
8872 break;
8873 default:
8874 - return -EINVAL;
8875 + return -EOPNOTSUPP;
8876 }
8877
8878 if (tb[NFTA_NAT_FAMILY] == NULL)
8879 @@ -196,7 +196,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
8880 if (tb[NFTA_NAT_FLAGS]) {
8881 priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
8882 if (priv->flags & ~NF_NAT_RANGE_MASK)
8883 - return -EINVAL;
8884 + return -EOPNOTSUPP;
8885 }
8886
8887 return nf_ct_netns_get(ctx->net, family);
8888 diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
8889 index 8fa924c8e282..9314999bf095 100644
8890 --- a/net/sunrpc/auth_gss/gss_mech_switch.c
8891 +++ b/net/sunrpc/auth_gss/gss_mech_switch.c
8892 @@ -36,6 +36,8 @@ gss_mech_free(struct gss_api_mech *gm)
8893
8894 for (i = 0; i < gm->gm_pf_num; i++) {
8895 pf = &gm->gm_pfs[i];
8896 + if (pf->domain)
8897 + auth_domain_put(pf->domain);
8898 kfree(pf->auth_domain_name);
8899 pf->auth_domain_name = NULL;
8900 }
8901 @@ -58,6 +60,7 @@ make_auth_domain_name(char *name)
8902 static int
8903 gss_mech_svc_setup(struct gss_api_mech *gm)
8904 {
8905 + struct auth_domain *dom;
8906 struct pf_desc *pf;
8907 int i, status;
8908
8909 @@ -67,10 +70,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm)
8910 status = -ENOMEM;
8911 if (pf->auth_domain_name == NULL)
8912 goto out;
8913 - status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor,
8914 - pf->auth_domain_name);
8915 - if (status)
8916 + dom = svcauth_gss_register_pseudoflavor(
8917 + pf->pseudoflavor, pf->auth_domain_name);
8918 + if (IS_ERR(dom)) {
8919 + status = PTR_ERR(dom);
8920 goto out;
8921 + }
8922 + pf->domain = dom;
8923 }
8924 return 0;
8925 out:
8926 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
8927 index d9f7439e2431..fd91274e834d 100644
8928 --- a/net/sunrpc/auth_gss/svcauth_gss.c
8929 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
8930 @@ -800,7 +800,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
8931
8932 EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
8933
8934 -int
8935 +struct auth_domain *
8936 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
8937 {
8938 struct gss_domain *new;
8939 @@ -817,21 +817,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
8940 new->h.flavour = &svcauthops_gss;
8941 new->pseudoflavor = pseudoflavor;
8942
8943 - stat = 0;
8944 test = auth_domain_lookup(name, &new->h);
8945 - if (test != &new->h) { /* Duplicate registration */
8946 + if (test != &new->h) {
8947 + pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
8948 + name);
8949 + stat = -EADDRINUSE;
8950 auth_domain_put(test);
8951 - kfree(new->h.name);
8952 - goto out_free_dom;
8953 + goto out_free_name;
8954 }
8955 - return 0;
8956 + return test;
8957
8958 +out_free_name:
8959 + kfree(new->h.name);
8960 out_free_dom:
8961 kfree(new);
8962 out:
8963 - return stat;
8964 + return ERR_PTR(stat);
8965 }
8966 -
8967 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
8968
8969 static inline int
8970 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
8971 index fbf6a496ee8b..70b203e5d5fd 100644
8972 --- a/net/tls/tls_sw.c
8973 +++ b/net/tls/tls_sw.c
8974 @@ -1737,6 +1737,7 @@ int tls_sw_recvmsg(struct sock *sk,
8975 long timeo;
8976 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
8977 bool is_peek = flags & MSG_PEEK;
8978 + bool bpf_strp_enabled;
8979 int num_async = 0;
8980 int pending;
8981
8982 @@ -1747,6 +1748,7 @@ int tls_sw_recvmsg(struct sock *sk,
8983
8984 psock = sk_psock_get(sk);
8985 lock_sock(sk);
8986 + bpf_strp_enabled = sk_psock_strp_enabled(psock);
8987
8988 /* Process pending decrypted records. It must be non-zero-copy */
8989 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
8990 @@ -1800,11 +1802,12 @@ int tls_sw_recvmsg(struct sock *sk,
8991
8992 if (to_decrypt <= len && !is_kvec && !is_peek &&
8993 ctx->control == TLS_RECORD_TYPE_DATA &&
8994 - prot->version != TLS_1_3_VERSION)
8995 + prot->version != TLS_1_3_VERSION &&
8996 + !bpf_strp_enabled)
8997 zc = true;
8998
8999 /* Do not use async mode if record is non-data */
9000 - if (ctx->control == TLS_RECORD_TYPE_DATA)
9001 + if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
9002 async_capable = ctx->async_capable;
9003 else
9004 async_capable = false;
9005 @@ -1854,6 +1857,19 @@ int tls_sw_recvmsg(struct sock *sk,
9006 goto pick_next_record;
9007
9008 if (!zc) {
9009 + if (bpf_strp_enabled) {
9010 + err = sk_psock_tls_strp_read(psock, skb);
9011 + if (err != __SK_PASS) {
9012 + rxm->offset = rxm->offset + rxm->full_len;
9013 + rxm->full_len = 0;
9014 + if (err == __SK_DROP)
9015 + consume_skb(skb);
9016 + ctx->recv_pkt = NULL;
9017 + __strp_unpause(&ctx->strp);
9018 + continue;
9019 + }
9020 + }
9021 +
9022 if (rxm->full_len > len) {
9023 retain_skb = true;
9024 chunk = len;
9025 diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
9026 index fbc2ee6d46fc..ee6bd945f3d6 100644
9027 --- a/security/integrity/evm/evm_crypto.c
9028 +++ b/security/integrity/evm/evm_crypto.c
9029 @@ -243,7 +243,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
9030
9031 /* Portable EVM signatures must include an IMA hash */
9032 if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
9033 - return -EPERM;
9034 + error = -EPERM;
9035 out:
9036 kfree(xattr_value);
9037 kfree(desc);
9038 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
9039 index 3689081aaf38..be469fce19e1 100644
9040 --- a/security/integrity/ima/ima.h
9041 +++ b/security/integrity/ima/ima.h
9042 @@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
9043 #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
9044 #define IMA_EVENT_NAME_LEN_MAX 255
9045
9046 -#define IMA_HASH_BITS 9
9047 +#define IMA_HASH_BITS 10
9048 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
9049
9050 #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
9051 @@ -52,6 +52,7 @@ extern int ima_policy_flag;
9052 extern int ima_hash_algo;
9053 extern int ima_appraise;
9054 extern struct tpm_chip *ima_tpm_chip;
9055 +extern const char boot_aggregate_name[];
9056
9057 /* IMA event related data */
9058 struct ima_event_data {
9059 @@ -140,7 +141,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
9060 int ima_calc_field_array_hash(struct ima_field_data *field_data,
9061 struct ima_template_desc *desc, int num_fields,
9062 struct ima_digest_data *hash);
9063 -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
9064 +int ima_calc_boot_aggregate(struct ima_digest_data *hash);
9065 void ima_add_violation(struct file *file, const unsigned char *filename,
9066 struct integrity_iint_cache *iint,
9067 const char *op, const char *cause);
9068 @@ -175,9 +176,10 @@ struct ima_h_table {
9069 };
9070 extern struct ima_h_table ima_htable;
9071
9072 -static inline unsigned long ima_hash_key(u8 *digest)
9073 +static inline unsigned int ima_hash_key(u8 *digest)
9074 {
9075 - return hash_long(*digest, IMA_HASH_BITS);
9076 + /* there is no point in taking a hash of part of a digest */
9077 + return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
9078 }
9079
9080 #define __ima_hooks(hook) \
9081 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
9082 index ad6cbbccc8d9..d5ad7b2539c7 100644
9083 --- a/security/integrity/ima/ima_crypto.c
9084 +++ b/security/integrity/ima/ima_crypto.c
9085 @@ -645,7 +645,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
9086 return calc_buffer_shash(buf, len, hash);
9087 }
9088
9089 -static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
9090 +static void ima_pcrread(u32 idx, struct tpm_digest *d)
9091 {
9092 if (!ima_tpm_chip)
9093 return;
9094 @@ -655,18 +655,29 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
9095 }
9096
9097 /*
9098 - * Calculate the boot aggregate hash
9099 + * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
9100 + * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
9101 + * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
9102 + * allowing firmware to configure and enable different banks.
9103 + *
9104 + * Knowing which TPM bank is read to calculate the boot_aggregate digest
9105 + * needs to be conveyed to a verifier. For this reason, use the same
9106 + * hash algorithm for reading the TPM PCRs as for calculating the boot
9107 + * aggregate digest as stored in the measurement list.
9108 */
9109 -static int __init ima_calc_boot_aggregate_tfm(char *digest,
9110 - struct crypto_shash *tfm)
9111 +static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
9112 + struct crypto_shash *tfm)
9113 {
9114 - struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
9115 + struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
9116 int rc;
9117 u32 i;
9118 SHASH_DESC_ON_STACK(shash, tfm);
9119
9120 shash->tfm = tfm;
9121
9122 + pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
9123 + d.alg_id);
9124 +
9125 rc = crypto_shash_init(shash);
9126 if (rc != 0)
9127 return rc;
9128 @@ -675,24 +686,48 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
9129 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
9130 ima_pcrread(i, &d);
9131 /* now accumulate with current aggregate */
9132 - rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
9133 + rc = crypto_shash_update(shash, d.digest,
9134 + crypto_shash_digestsize(tfm));
9135 }
9136 if (!rc)
9137 crypto_shash_final(shash, digest);
9138 return rc;
9139 }
9140
9141 -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
9142 +int ima_calc_boot_aggregate(struct ima_digest_data *hash)
9143 {
9144 struct crypto_shash *tfm;
9145 - int rc;
9146 + u16 crypto_id, alg_id;
9147 + int rc, i, bank_idx = -1;
9148 +
9149 + for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
9150 + crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
9151 + if (crypto_id == hash->algo) {
9152 + bank_idx = i;
9153 + break;
9154 + }
9155 +
9156 + if (crypto_id == HASH_ALGO_SHA256)
9157 + bank_idx = i;
9158 +
9159 + if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
9160 + bank_idx = i;
9161 + }
9162 +
9163 + if (bank_idx == -1) {
9164 + pr_err("No suitable TPM algorithm for boot aggregate\n");
9165 + return 0;
9166 + }
9167 +
9168 + hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
9169
9170 tfm = ima_alloc_tfm(hash->algo);
9171 if (IS_ERR(tfm))
9172 return PTR_ERR(tfm);
9173
9174 hash->length = crypto_shash_digestsize(tfm);
9175 - rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
9176 + alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
9177 + rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
9178
9179 ima_free_tfm(tfm);
9180
9181 diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
9182 index 5d55ade5f3b9..a94177042eaa 100644
9183 --- a/security/integrity/ima/ima_init.c
9184 +++ b/security/integrity/ima/ima_init.c
9185 @@ -21,13 +21,13 @@
9186 #include "ima.h"
9187
9188 /* name for boot aggregate entry */
9189 -static const char boot_aggregate_name[] = "boot_aggregate";
9190 +const char boot_aggregate_name[] = "boot_aggregate";
9191 struct tpm_chip *ima_tpm_chip;
9192
9193 /* Add the boot aggregate to the IMA measurement list and extend
9194 * the PCR register.
9195 *
9196 - * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
9197 + * Calculate the boot aggregate, a hash over tpm registers 0-7,
9198 * assuming a TPM chip exists, and zeroes if the TPM chip does not
9199 * exist. Add the boot aggregate measurement to the measurement
9200 * list and extend the PCR register.
9201 @@ -51,15 +51,27 @@ static int __init ima_add_boot_aggregate(void)
9202 int violation = 0;
9203 struct {
9204 struct ima_digest_data hdr;
9205 - char digest[TPM_DIGEST_SIZE];
9206 + char digest[TPM_MAX_DIGEST_SIZE];
9207 } hash;
9208
9209 memset(iint, 0, sizeof(*iint));
9210 memset(&hash, 0, sizeof(hash));
9211 iint->ima_hash = &hash.hdr;
9212 - iint->ima_hash->algo = HASH_ALGO_SHA1;
9213 - iint->ima_hash->length = SHA1_DIGEST_SIZE;
9214 -
9215 + iint->ima_hash->algo = ima_hash_algo;
9216 + iint->ima_hash->length = hash_digest_size[ima_hash_algo];
9217 +
9218 + /*
9219 + * With TPM 2.0 hash agility, TPM chips could support multiple TPM
9220 + * PCR banks, allowing firmware to configure and enable different
9221 + * banks. The SHA1 bank is not necessarily enabled.
9222 + *
9223 + * Use the same hash algorithm for reading the TPM PCRs as for
9224 + * calculating the boot aggregate digest. Preference is given to
9225 + * the configured IMA default hash algorithm. Otherwise, use the
9226 + * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
9227 + * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
9228 + * is not found.
9229 + */
9230 if (ima_tpm_chip) {
9231 result = ima_calc_boot_aggregate(&hash.hdr);
9232 if (result < 0) {
9233 diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
9234 index 60027c643ecd..a768f37a0a4d 100644
9235 --- a/security/integrity/ima/ima_main.c
9236 +++ b/security/integrity/ima/ima_main.c
9237 @@ -712,6 +712,9 @@ static int __init init_ima(void)
9238 error = ima_init();
9239 }
9240
9241 + if (error)
9242 + return error;
9243 +
9244 error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
9245 if (error)
9246 pr_warn("Couldn't register LSM notifier, error %d\n", error);
9247 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
9248 index ee9aec5e98f0..558a7607bf93 100644
9249 --- a/security/integrity/ima/ima_policy.c
9250 +++ b/security/integrity/ima/ima_policy.c
9251 @@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init;
9252 static LIST_HEAD(ima_default_rules);
9253 static LIST_HEAD(ima_policy_rules);
9254 static LIST_HEAD(ima_temp_rules);
9255 -static struct list_head *ima_rules;
9256 +static struct list_head *ima_rules = &ima_default_rules;
9257
9258 static int ima_policy __initdata;
9259
9260 @@ -591,9 +591,12 @@ static void add_rules(struct ima_rule_entry *entries, int count,
9261 list_add_tail(&entry->list, &ima_policy_rules);
9262 }
9263 if (entries[i].action == APPRAISE) {
9264 - temp_ima_appraise |= ima_appraise_flag(entries[i].func);
9265 - if (entries[i].func == POLICY_CHECK)
9266 - temp_ima_appraise |= IMA_APPRAISE_POLICY;
9267 + if (entries != build_appraise_rules)
9268 + temp_ima_appraise |=
9269 + ima_appraise_flag(entries[i].func);
9270 + else
9271 + build_ima_appraise |=
9272 + ima_appraise_flag(entries[i].func);
9273 }
9274 }
9275 }
9276 @@ -712,7 +715,6 @@ void __init ima_init_policy(void)
9277 ARRAY_SIZE(default_appraise_rules),
9278 IMA_DEFAULT_POLICY);
9279
9280 - ima_rules = &ima_default_rules;
9281 ima_update_policy_flag();
9282 }
9283
9284 diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
9285 index 32ae05d88257..1be146e17d9f 100644
9286 --- a/security/integrity/ima/ima_template_lib.c
9287 +++ b/security/integrity/ima/ima_template_lib.c
9288 @@ -288,6 +288,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data,
9289 goto out;
9290 }
9291
9292 + if ((const char *)event_data->filename == boot_aggregate_name) {
9293 + if (ima_tpm_chip) {
9294 + hash.hdr.algo = HASH_ALGO_SHA1;
9295 + result = ima_calc_boot_aggregate(&hash.hdr);
9296 +
9297 + /* algo can change depending on available PCR banks */
9298 + if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
9299 + result = -EINVAL;
9300 +
9301 + if (result < 0)
9302 + memset(&hash, 0, sizeof(hash));
9303 + }
9304 +
9305 + cur_digest = hash.hdr.digest;
9306 + cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
9307 + goto out;
9308 + }
9309 +
9310 if (!event_data->file) /* missing info to re-calculate the digest */
9311 return -EINVAL;
9312
9313 diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
9314 index b2f87015d6e9..3f38583bed06 100644
9315 --- a/security/lockdown/lockdown.c
9316 +++ b/security/lockdown/lockdown.c
9317 @@ -177,7 +177,7 @@ static int __init lockdown_secfs_init(void)
9318 {
9319 struct dentry *dentry;
9320
9321 - dentry = securityfs_create_file("lockdown", 0600, NULL, NULL,
9322 + dentry = securityfs_create_file("lockdown", 0644, NULL, NULL,
9323 &lockdown_ops);
9324 return PTR_ERR_OR_ZERO(dentry);
9325 }
9326 diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
9327 index 1260f5fb766e..dd7aabd94a92 100644
9328 --- a/security/selinux/ss/policydb.c
9329 +++ b/security/selinux/ss/policydb.c
9330 @@ -2496,6 +2496,7 @@ int policydb_read(struct policydb *p, void *fp)
9331 if (rc)
9332 goto bad;
9333
9334 + rc = -ENOMEM;
9335 p->type_attr_map_array = kvcalloc(p->p_types.nprim,
9336 sizeof(*p->type_attr_map_array),
9337 GFP_KERNEL);
9338 diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
9339 index 7e344a78a627..b8c082c9fd7d 100644
9340 --- a/tools/cgroup/iocost_monitor.py
9341 +++ b/tools/cgroup/iocost_monitor.py
9342 @@ -112,14 +112,14 @@ class IocStat:
9343
9344 def dict(self, now):
9345 return { 'device' : devname,
9346 - 'timestamp' : str(now),
9347 - 'enabled' : str(int(self.enabled)),
9348 - 'running' : str(int(self.running)),
9349 - 'period_ms' : str(self.period_ms),
9350 - 'period_at' : str(self.period_at),
9351 - 'period_vtime_at' : str(self.vperiod_at),
9352 - 'busy_level' : str(self.busy_level),
9353 - 'vrate_pct' : str(self.vrate_pct), }
9354 + 'timestamp' : now,
9355 + 'enabled' : self.enabled,
9356 + 'running' : self.running,
9357 + 'period_ms' : self.period_ms,
9358 + 'period_at' : self.period_at,
9359 + 'period_vtime_at' : self.vperiod_at,
9360 + 'busy_level' : self.busy_level,
9361 + 'vrate_pct' : self.vrate_pct, }
9362
9363 def table_preamble_str(self):
9364 state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
9365 @@ -179,19 +179,19 @@ class IocgStat:
9366
9367 def dict(self, now, path):
9368 out = { 'cgroup' : path,
9369 - 'timestamp' : str(now),
9370 - 'is_active' : str(int(self.is_active)),
9371 - 'weight' : str(self.weight),
9372 - 'weight_active' : str(self.active),
9373 - 'weight_inuse' : str(self.inuse),
9374 - 'hweight_active_pct' : str(self.hwa_pct),
9375 - 'hweight_inuse_pct' : str(self.hwi_pct),
9376 - 'inflight_pct' : str(self.inflight_pct),
9377 - 'debt_ms' : str(self.debt_ms),
9378 - 'use_delay' : str(self.use_delay),
9379 - 'delay_ms' : str(self.delay_ms),
9380 - 'usage_pct' : str(self.usage),
9381 - 'address' : str(hex(self.address)) }
9382 + 'timestamp' : now,
9383 + 'is_active' : self.is_active,
9384 + 'weight' : self.weight,
9385 + 'weight_active' : self.active,
9386 + 'weight_inuse' : self.inuse,
9387 + 'hweight_active_pct' : self.hwa_pct,
9388 + 'hweight_inuse_pct' : self.hwi_pct,
9389 + 'inflight_pct' : self.inflight_pct,
9390 + 'debt_ms' : self.debt_ms,
9391 + 'use_delay' : self.use_delay,
9392 + 'delay_ms' : self.delay_ms,
9393 + 'usage_pct' : self.usage,
9394 + 'address' : self.address }
9395 for i in range(len(self.usages)):
9396 out[f'usage_pct_{i}'] = str(self.usages[i])
9397 return out
9398 diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
9399 index bd021a0eeef8..4cc69675c2a9 100644
9400 --- a/tools/lib/api/fs/fs.c
9401 +++ b/tools/lib/api/fs/fs.c
9402 @@ -90,6 +90,7 @@ struct fs {
9403 const char * const *mounts;
9404 char path[PATH_MAX];
9405 bool found;
9406 + bool checked;
9407 long magic;
9408 };
9409
9410 @@ -111,31 +112,37 @@ static struct fs fs__entries[] = {
9411 .name = "sysfs",
9412 .mounts = sysfs__fs_known_mountpoints,
9413 .magic = SYSFS_MAGIC,
9414 + .checked = false,
9415 },
9416 [FS__PROCFS] = {
9417 .name = "proc",
9418 .mounts = procfs__known_mountpoints,
9419 .magic = PROC_SUPER_MAGIC,
9420 + .checked = false,
9421 },
9422 [FS__DEBUGFS] = {
9423 .name = "debugfs",
9424 .mounts = debugfs__known_mountpoints,
9425 .magic = DEBUGFS_MAGIC,
9426 + .checked = false,
9427 },
9428 [FS__TRACEFS] = {
9429 .name = "tracefs",
9430 .mounts = tracefs__known_mountpoints,
9431 .magic = TRACEFS_MAGIC,
9432 + .checked = false,
9433 },
9434 [FS__HUGETLBFS] = {
9435 .name = "hugetlbfs",
9436 .mounts = hugetlbfs__known_mountpoints,
9437 .magic = HUGETLBFS_MAGIC,
9438 + .checked = false,
9439 },
9440 [FS__BPF_FS] = {
9441 .name = "bpf",
9442 .mounts = bpf_fs__known_mountpoints,
9443 .magic = BPF_FS_MAGIC,
9444 + .checked = false,
9445 },
9446 };
9447
9448 @@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs)
9449 }
9450
9451 fclose(fp);
9452 + fs->checked = true;
9453 return fs->found = found;
9454 }
9455
9456 @@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs)
9457 return false;
9458
9459 fs->found = true;
9460 + fs->checked = true;
9461 strncpy(fs->path, override_path, sizeof(fs->path) - 1);
9462 fs->path[sizeof(fs->path) - 1] = '\0';
9463 return true;
9464 @@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx)
9465 if (fs->found)
9466 return (const char *)fs->path;
9467
9468 + /* the mount point was already checked for the mount point
9469 + * but and did not exist, so return NULL to avoid scanning again.
9470 + * This makes the found and not found paths cost equivalent
9471 + * in case of multiple calls.
9472 + */
9473 + if (fs->checked)
9474 + return NULL;
9475 +
9476 return fs__get_mountpoint(fs);
9477 }
9478
9479 diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
9480 index 92d03b8396b1..3b70003e7cfb 100644
9481 --- a/tools/lib/api/fs/fs.h
9482 +++ b/tools/lib/api/fs/fs.h
9483 @@ -18,6 +18,18 @@
9484 const char *name##__mount(void); \
9485 bool name##__configured(void); \
9486
9487 +/*
9488 + * The xxxx__mountpoint() entry points find the first match mount point for each
9489 + * filesystems listed below, where xxxx is the filesystem type.
9490 + *
9491 + * The interface is as follows:
9492 + *
9493 + * - If a mount point is found on first call, it is cached and used for all
9494 + * subsequent calls.
9495 + *
9496 + * - If a mount point is not found, NULL is returned on first call and all
9497 + * subsequent calls.
9498 + */
9499 FS(sysfs)
9500 FS(procfs)
9501 FS(debugfs)
9502 diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
9503 index 6122272943e6..9ef9f6201d8b 100644
9504 --- a/tools/lib/bpf/hashmap.c
9505 +++ b/tools/lib/bpf/hashmap.c
9506 @@ -56,7 +56,14 @@ struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
9507
9508 void hashmap__clear(struct hashmap *map)
9509 {
9510 + struct hashmap_entry *cur, *tmp;
9511 + int bkt;
9512 +
9513 + hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
9514 + free(cur);
9515 + }
9516 free(map->buckets);
9517 + map->buckets = NULL;
9518 map->cap = map->cap_bits = map->sz = 0;
9519 }
9520
9521 diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
9522 index 281cc65276e0..2a1dbf52fc9a 100644
9523 --- a/tools/lib/bpf/libbpf.c
9524 +++ b/tools/lib/bpf/libbpf.c
9525 @@ -5358,9 +5358,12 @@ void perf_buffer__free(struct perf_buffer *pb)
9526 if (!pb)
9527 return;
9528 if (pb->cpu_bufs) {
9529 - for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
9530 + for (i = 0; i < pb->cpu_cnt; i++) {
9531 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
9532
9533 + if (!cpu_buf)
9534 + continue;
9535 +
9536 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
9537 perf_buffer__free_cpu_buf(pb, cpu_buf);
9538 }
9539 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
9540 index fcc6cd404f56..48b234d8f251 100644
9541 --- a/tools/objtool/check.c
9542 +++ b/tools/objtool/check.c
9543 @@ -865,6 +865,12 @@ static int add_special_section_alts(struct objtool_file *file)
9544 }
9545
9546 if (special_alt->group) {
9547 + if (!special_alt->orig_len) {
9548 + WARN_FUNC("empty alternative entry",
9549 + orig_insn->sec, orig_insn->offset);
9550 + continue;
9551 + }
9552 +
9553 ret = handle_group_alt(file, special_alt, orig_insn,
9554 &new_insn);
9555 if (ret)
9556 diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
9557 index 26bc5923e6b5..2f05f59e9758 100644
9558 --- a/tools/perf/builtin-probe.c
9559 +++ b/tools/perf/builtin-probe.c
9560 @@ -364,6 +364,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
9561
9562 for (k = 0; k < pev->ntevs; k++) {
9563 struct probe_trace_event *tev = &pev->tevs[k];
9564 + /* Skipped events have no event name */
9565 + if (!tev->event)
9566 + continue;
9567
9568 /* We use tev's name for showing new events */
9569 show_perf_probe_event(tev->group, tev->event, pev,
9570 diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
9571 index e11ddf86f2b3..ab2e130dc07a 100644
9572 --- a/tools/perf/util/dso.c
9573 +++ b/tools/perf/util/dso.c
9574 @@ -47,6 +47,7 @@ char dso__symtab_origin(const struct dso *dso)
9575 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
9576 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
9577 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
9578 + [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
9579 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
9580 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
9581 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
9582 @@ -129,6 +130,21 @@ int dso__read_binary_type_filename(const struct dso *dso,
9583 snprintf(filename + len, size - len, "%s", dso->long_name);
9584 break;
9585
9586 + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
9587 + /*
9588 + * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
9589 + * /usr/lib/debug/lib when it is expected to be in
9590 + * /usr/lib/debug/usr/lib
9591 + */
9592 + if (strlen(dso->long_name) < 9 ||
9593 + strncmp(dso->long_name, "/usr/lib/", 9)) {
9594 + ret = -1;
9595 + break;
9596 + }
9597 + len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
9598 + snprintf(filename + len, size - len, "%s", dso->long_name + 4);
9599 + break;
9600 +
9601 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
9602 {
9603 const char *last_slash;
9604 diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
9605 index e4dddb76770d..69bb77d19164 100644
9606 --- a/tools/perf/util/dso.h
9607 +++ b/tools/perf/util/dso.h
9608 @@ -30,6 +30,7 @@ enum dso_binary_type {
9609 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
9610 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
9611 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
9612 + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
9613 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
9614 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
9615 DSO_BINARY_TYPE__GUEST_KMODULE,
9616 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
9617 index 92b07be0b48b..a5cb1a3a1064 100644
9618 --- a/tools/perf/util/probe-event.c
9619 +++ b/tools/perf/util/probe-event.c
9620 @@ -102,7 +102,7 @@ void exit_probe_symbol_maps(void)
9621 symbol__exit();
9622 }
9623
9624 -static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
9625 +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
9626 {
9627 /* kmap->ref_reloc_sym should be set if host_machine is initialized */
9628 struct kmap *kmap;
9629 @@ -114,6 +114,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
9630 kmap = map__kmap(map);
9631 if (!kmap)
9632 return NULL;
9633 +
9634 + if (pmap)
9635 + *pmap = map;
9636 +
9637 return kmap->ref_reloc_sym;
9638 }
9639
9640 @@ -125,7 +129,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
9641 struct map *map;
9642
9643 /* ref_reloc_sym is just a label. Need a special fix*/
9644 - reloc_sym = kernel_get_ref_reloc_sym();
9645 + reloc_sym = kernel_get_ref_reloc_sym(NULL);
9646 if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
9647 *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
9648 else {
9649 @@ -232,21 +236,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
9650 static bool kprobe_blacklist__listed(unsigned long address);
9651 static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
9652 {
9653 - u64 etext_addr = 0;
9654 - int ret;
9655 -
9656 - /* Get the address of _etext for checking non-probable text symbol */
9657 - ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
9658 - false, false);
9659 + struct map *map;
9660 + bool ret = false;
9661
9662 - if (ret == 0 && etext_addr < address)
9663 - pr_warning("%s is out of .text, skip it.\n", symbol);
9664 - else if (kprobe_blacklist__listed(address))
9665 + map = kernel_get_module_map(NULL);
9666 + if (map) {
9667 + ret = address <= map->start || map->end < address;
9668 + if (ret)
9669 + pr_warning("%s is out of .text, skip it.\n", symbol);
9670 + map__put(map);
9671 + }
9672 + if (!ret && kprobe_blacklist__listed(address)) {
9673 pr_warning("%s is blacklisted function, skip it.\n", symbol);
9674 - else
9675 - return false;
9676 + ret = true;
9677 + }
9678
9679 - return true;
9680 + return ret;
9681 }
9682
9683 /*
9684 @@ -745,6 +750,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
9685 int ntevs)
9686 {
9687 struct ref_reloc_sym *reloc_sym;
9688 + struct map *map;
9689 char *tmp;
9690 int i, skipped = 0;
9691
9692 @@ -753,7 +759,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
9693 return post_process_offline_probe_trace_events(tevs, ntevs,
9694 symbol_conf.vmlinux_name);
9695
9696 - reloc_sym = kernel_get_ref_reloc_sym();
9697 + reloc_sym = kernel_get_ref_reloc_sym(&map);
9698 if (!reloc_sym) {
9699 pr_warning("Relocated base symbol is not found!\n");
9700 return -EINVAL;
9701 @@ -764,9 +770,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
9702 continue;
9703 if (tevs[i].point.retprobe && !kretprobe_offset_is_supported())
9704 continue;
9705 - /* If we found a wrong one, mark it by NULL symbol */
9706 + /*
9707 + * If we found a wrong one, mark it by NULL symbol.
9708 + * Since addresses in debuginfo is same as objdump, we need
9709 + * to convert it to addresses on memory.
9710 + */
9711 if (kprobe_warn_out_range(tevs[i].point.symbol,
9712 - tevs[i].point.address)) {
9713 + map__objdump_2mem(map, tevs[i].point.address))) {
9714 tmp = NULL;
9715 skipped++;
9716 } else {
9717 @@ -2922,7 +2932,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
9718 /* Note that the symbols in the kmodule are not relocated */
9719 if (!pev->uprobes && !pev->target &&
9720 (!pp->retprobe || kretprobe_offset_is_supported())) {
9721 - reloc_sym = kernel_get_ref_reloc_sym();
9722 + reloc_sym = kernel_get_ref_reloc_sym(NULL);
9723 if (!reloc_sym) {
9724 pr_warning("Relocated base symbol is not found!\n");
9725 ret = -EINVAL;
9726 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
9727 index aaf3b24fffa4..dc9d495e3d6a 100644
9728 --- a/tools/perf/util/probe-finder.c
9729 +++ b/tools/perf/util/probe-finder.c
9730 @@ -101,6 +101,7 @@ enum dso_binary_type distro_dwarf_types[] = {
9731 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
9732 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
9733 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
9734 + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
9735 DSO_BINARY_TYPE__NOT_FOUND,
9736 };
9737
9738 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
9739 index a8f80e427674..901ad7f6f4dc 100644
9740 --- a/tools/perf/util/symbol.c
9741 +++ b/tools/perf/util/symbol.c
9742 @@ -79,6 +79,7 @@ static enum dso_binary_type binary_type_symtab[] = {
9743 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
9744 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
9745 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
9746 + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
9747 DSO_BINARY_TYPE__NOT_FOUND,
9748 };
9749
9750 @@ -1220,6 +1221,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
9751
9752 m->end = old_map->start;
9753 list_add_tail(&m->node, &merged);
9754 + new_map->pgoff += old_map->end - new_map->start;
9755 new_map->start = old_map->end;
9756 }
9757 } else {
9758 @@ -1240,6 +1242,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
9759 * |new......| -> |new...|
9760 * |old....| -> |old....|
9761 */
9762 + new_map->pgoff += old_map->end - new_map->start;
9763 new_map->start = old_map->end;
9764 }
9765 }
9766 @@ -1530,6 +1533,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
9767 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
9768 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
9769 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
9770 + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
9771 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
9772 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
9773 return !kmod && dso->kernel == DSO_TYPE_USER;
9774 diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
9775 index 5dc109f4c097..b9601f13cf03 100644
9776 --- a/tools/testing/selftests/bpf/config
9777 +++ b/tools/testing/selftests/bpf/config
9778 @@ -25,6 +25,7 @@ CONFIG_XDP_SOCKETS=y
9779 CONFIG_FTRACE_SYSCALLS=y
9780 CONFIG_IPV6_TUNNEL=y
9781 CONFIG_IPV6_GRE=y
9782 +CONFIG_IPV6_SEG6_BPF=y
9783 CONFIG_NET_FOU=m
9784 CONFIG_NET_FOU_IP_TUNNELS=y
9785 CONFIG_IPV6_FOU=m
9786 diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
9787 index 92563898867c..9f3634c9971d 100644
9788 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
9789 +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
9790 @@ -523,6 +523,7 @@ void test_flow_dissector(void)
9791 CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
9792 }
9793
9794 + close(tap_fd);
9795 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
9796 bpf_object__close(obj);
9797 }
9798 diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
9799 index 3bf18364c67c..8cb3469dd11f 100644
9800 --- a/tools/testing/selftests/bpf/test_progs.c
9801 +++ b/tools/testing/selftests/bpf/test_progs.c
9802 @@ -293,6 +293,7 @@ int extract_build_id(char *build_id, size_t size)
9803 len = size;
9804 memcpy(build_id, line, len);
9805 build_id[len] = '\0';
9806 + free(line);
9807 return 0;
9808 err:
9809 fclose(fp);