Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0219-5.4.120-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 152583 byte(s)
-add missing
1 diff --git a/Documentation/arm/memory.rst b/Documentation/arm/memory.rst
2 index 0521b4ce5c961..34bb23c44a710 100644
3 --- a/Documentation/arm/memory.rst
4 +++ b/Documentation/arm/memory.rst
5 @@ -45,9 +45,14 @@ fffe8000 fffeffff DTCM mapping area for platforms with
6 fffe0000 fffe7fff ITCM mapping area for platforms with
7 ITCM mounted inside the CPU.
8
9 -ffc00000 ffefffff Fixmap mapping region. Addresses provided
10 +ffc80000 ffefffff Fixmap mapping region. Addresses provided
11 by fix_to_virt() will be located here.
12
13 +ffc00000 ffc7ffff Guard region
14 +
15 +ff800000 ffbfffff Permanent, fixed read-only mapping of the
16 + firmware provided DT blob
17 +
18 fee00000 feffffff Mapping of PCI I/O space. This is a static
19 mapping within the vmalloc space.
20
21 diff --git a/Makefile b/Makefile
22 index 9b7780de5f6bb..8b116f6fdcfc2 100644
23 --- a/Makefile
24 +++ b/Makefile
25 @@ -1,7 +1,7 @@
26 # SPDX-License-Identifier: GPL-2.0
27 VERSION = 5
28 PATCHLEVEL = 4
29 -SUBLEVEL = 119
30 +SUBLEVEL = 120
31 EXTRAVERSION =
32 NAME = Kleptomaniac Octopus
33
34 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
35 index bcd1920ae75a3..956f78ecf1938 100644
36 --- a/arch/arc/include/asm/page.h
37 +++ b/arch/arc/include/asm/page.h
38 @@ -7,6 +7,18 @@
39
40 #include <uapi/asm/page.h>
41
42 +#ifdef CONFIG_ARC_HAS_PAE40
43 +
44 +#define MAX_POSSIBLE_PHYSMEM_BITS 40
45 +#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
46 +
47 +#else /* CONFIG_ARC_HAS_PAE40 */
48 +
49 +#define MAX_POSSIBLE_PHYSMEM_BITS 32
50 +#define PAGE_MASK_PHYS PAGE_MASK
51 +
52 +#endif /* CONFIG_ARC_HAS_PAE40 */
53 +
54 #ifndef __ASSEMBLY__
55
56 #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
57 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
58 index 6bdcf9b495b83..a1987d07d08c1 100644
59 --- a/arch/arc/include/asm/pgtable.h
60 +++ b/arch/arc/include/asm/pgtable.h
61 @@ -108,8 +108,8 @@
62 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
63
64 /* Set of bits not changed in pte_modify */
65 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
66 -
67 +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
68 + _PAGE_SPECIAL)
69 /* More Abbrevaited helpers */
70 #define PAGE_U_NONE __pgprot(___DEF)
71 #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
72 @@ -133,13 +133,7 @@
73 #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
74 #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
75
76 -#ifdef CONFIG_ARC_HAS_PAE40
77 -#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
78 -#define MAX_POSSIBLE_PHYSMEM_BITS 40
79 -#else
80 -#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
81 -#define MAX_POSSIBLE_PHYSMEM_BITS 32
82 -#endif
83 +#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
84
85 /**************************************************************************
86 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
87 diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
88 index 2a97e2718a219..2a4ad619abfba 100644
89 --- a/arch/arc/include/uapi/asm/page.h
90 +++ b/arch/arc/include/uapi/asm/page.h
91 @@ -33,5 +33,4 @@
92
93 #define PAGE_MASK (~(PAGE_SIZE-1))
94
95 -
96 #endif /* _UAPI__ASM_ARC_PAGE_H */
97 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
98 index ea74a1eee5d9d..b37ca852a9f7e 100644
99 --- a/arch/arc/kernel/entry.S
100 +++ b/arch/arc/kernel/entry.S
101 @@ -165,7 +165,7 @@ tracesys:
102
103 ; Do the Sys Call as we normally would.
104 ; Validate the Sys Call number
105 - cmp r8, NR_syscalls
106 + cmp r8, NR_syscalls - 1
107 mov.hi r0, -ENOSYS
108 bhi tracesys_exit
109
110 @@ -243,7 +243,7 @@ ENTRY(EV_Trap)
111 ;============ Normal syscall case
112
113 ; syscall num shd not exceed the total system calls avail
114 - cmp r8, NR_syscalls
115 + cmp r8, NR_syscalls - 1
116 mov.hi r0, -ENOSYS
117 bhi .Lret_from_system_call
118
119 diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
120 index fac4adc902044..95c649fbc95af 100644
121 --- a/arch/arc/mm/ioremap.c
122 +++ b/arch/arc/mm/ioremap.c
123 @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
124 void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
125 unsigned long flags)
126 {
127 + unsigned int off;
128 unsigned long vaddr;
129 struct vm_struct *area;
130 - phys_addr_t off, end;
131 + phys_addr_t end;
132 pgprot_t prot = __pgprot(flags);
133
134 /* Don't allow wraparound, zero size */
135 @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
136
137 /* Mappings have to be page-aligned */
138 off = paddr & ~PAGE_MASK;
139 - paddr &= PAGE_MASK;
140 + paddr &= PAGE_MASK_PHYS;
141 size = PAGE_ALIGN(end + 1) - paddr;
142
143 /*
144 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
145 index 10025e1993533..2430d537f2d38 100644
146 --- a/arch/arc/mm/tlb.c
147 +++ b/arch/arc/mm/tlb.c
148 @@ -597,7 +597,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
149 pte_t *ptep)
150 {
151 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
152 - phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
153 + phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
154 struct page *page = pfn_to_page(pte_pfn(*ptep));
155
156 create_tlb(vma, vaddr, ptep);
157 diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
158 index 472c93db5dac5..763c3f65e30c6 100644
159 --- a/arch/arm/include/asm/fixmap.h
160 +++ b/arch/arm/include/asm/fixmap.h
161 @@ -2,7 +2,7 @@
162 #ifndef _ASM_FIXMAP_H
163 #define _ASM_FIXMAP_H
164
165 -#define FIXADDR_START 0xffc00000UL
166 +#define FIXADDR_START 0xffc80000UL
167 #define FIXADDR_END 0xfff00000UL
168 #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
169
170 diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
171 index 99035b5891ef4..f717d7122d9d1 100644
172 --- a/arch/arm/include/asm/memory.h
173 +++ b/arch/arm/include/asm/memory.h
174 @@ -67,6 +67,10 @@
175 */
176 #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
177
178 +#define FDT_FIXED_BASE UL(0xff800000)
179 +#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
180 +#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
181 +
182 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
183 /*
184 * Allow 16MB-aligned ioremap pages
185 @@ -107,6 +111,7 @@ extern unsigned long vectors_base;
186 #define MODULES_VADDR PAGE_OFFSET
187
188 #define XIP_VIRT_ADDR(physaddr) (physaddr)
189 +#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
190
191 #endif /* !CONFIG_MMU */
192
193 diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
194 index 1e36c40533c16..402e3f34c7ed8 100644
195 --- a/arch/arm/include/asm/prom.h
196 +++ b/arch/arm/include/asm/prom.h
197 @@ -9,12 +9,12 @@
198
199 #ifdef CONFIG_OF
200
201 -extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
202 +extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
203 extern void __init arm_dt_init_cpu_maps(void);
204
205 #else /* CONFIG_OF */
206
207 -static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
208 +static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
209 {
210 return NULL;
211 }
212 diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
213 index 067e12edc3419..f2819c25b6029 100644
214 --- a/arch/arm/kernel/atags.h
215 +++ b/arch/arm/kernel/atags.h
216 @@ -2,11 +2,11 @@
217 void convert_to_tag_list(struct tag *tags);
218
219 #ifdef CONFIG_ATAGS
220 -const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
221 +const struct machine_desc *setup_machine_tags(void *__atags_vaddr,
222 unsigned int machine_nr);
223 #else
224 static inline const struct machine_desc * __init __noreturn
225 -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
226 +setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr)
227 {
228 early_print("no ATAGS support: can't continue\n");
229 while (true);
230 diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
231 index ce02f92f4ab26..8288151631fc4 100644
232 --- a/arch/arm/kernel/atags_parse.c
233 +++ b/arch/arm/kernel/atags_parse.c
234 @@ -176,7 +176,7 @@ static void __init squash_mem_tags(struct tag *tag)
235 }
236
237 const struct machine_desc * __init
238 -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
239 +setup_machine_tags(void *atags_vaddr, unsigned int machine_nr)
240 {
241 struct tag *tags = (struct tag *)&default_tags;
242 const struct machine_desc *mdesc = NULL, *p;
243 @@ -197,8 +197,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
244 if (!mdesc)
245 return NULL;
246
247 - if (__atags_pointer)
248 - tags = phys_to_virt(__atags_pointer);
249 + if (atags_vaddr)
250 + tags = atags_vaddr;
251 else if (mdesc->atag_offset)
252 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
253
254 diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
255 index 39c9786984062..4e09883c276d9 100644
256 --- a/arch/arm/kernel/devtree.c
257 +++ b/arch/arm/kernel/devtree.c
258 @@ -203,12 +203,12 @@ static const void * __init arch_get_next_mach(const char *const **match)
259
260 /**
261 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
262 - * @dt_phys: physical address of dt blob
263 + * @dt_virt: virtual address of dt blob
264 *
265 * If a dtb was passed to the kernel in r2, then use it to choose the
266 * correct machine_desc and to setup the system.
267 */
268 -const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
269 +const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
270 {
271 const struct machine_desc *mdesc, *mdesc_best = NULL;
272
273 @@ -221,7 +221,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
274 mdesc_best = &__mach_desc_GENERIC_DT;
275 #endif
276
277 - if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
278 + if (!dt_virt || !early_init_dt_verify(dt_virt))
279 return NULL;
280
281 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
282 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
283 index f1cdc1f369575..5ceed4d9ee036 100644
284 --- a/arch/arm/kernel/head.S
285 +++ b/arch/arm/kernel/head.S
286 @@ -274,11 +274,10 @@ __create_page_tables:
287 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
288 */
289 mov r0, r2, lsr #SECTION_SHIFT
290 - movs r0, r0, lsl #SECTION_SHIFT
291 - subne r3, r0, r8
292 - addne r3, r3, #PAGE_OFFSET
293 - addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
294 - orrne r6, r7, r0
295 + cmp r2, #0
296 + ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
297 + addne r3, r3, r4
298 + orrne r6, r7, r0, lsl #SECTION_SHIFT
299 strne r6, [r3], #1 << PMD_ORDER
300 addne r6, r6, #1 << SECTION_SHIFT
301 strne r6, [r3]
302 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
303 index 7021ef0b4e71b..b06d9ea07c846 100644
304 --- a/arch/arm/kernel/hw_breakpoint.c
305 +++ b/arch/arm/kernel/hw_breakpoint.c
306 @@ -883,7 +883,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
307 info->trigger = addr;
308 pr_debug("breakpoint fired: address = 0x%x\n", addr);
309 perf_bp_event(bp, regs);
310 - if (!bp->overflow_handler)
311 + if (is_default_overflow_handler(bp))
312 enable_single_step(bp, addr);
313 goto unlock;
314 }
315 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
316 index d0a464e317eac..924285d0bccd9 100644
317 --- a/arch/arm/kernel/setup.c
318 +++ b/arch/arm/kernel/setup.c
319 @@ -18,6 +18,7 @@
320 #include <linux/of_platform.h>
321 #include <linux/init.h>
322 #include <linux/kexec.h>
323 +#include <linux/libfdt.h>
324 #include <linux/of_fdt.h>
325 #include <linux/cpu.h>
326 #include <linux/interrupt.h>
327 @@ -1075,19 +1076,27 @@ void __init hyp_mode_check(void)
328
329 void __init setup_arch(char **cmdline_p)
330 {
331 - const struct machine_desc *mdesc;
332 + const struct machine_desc *mdesc = NULL;
333 + void *atags_vaddr = NULL;
334 +
335 + if (__atags_pointer)
336 + atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
337
338 setup_processor();
339 - mdesc = setup_machine_fdt(__atags_pointer);
340 + if (atags_vaddr) {
341 + mdesc = setup_machine_fdt(atags_vaddr);
342 + if (mdesc)
343 + memblock_reserve(__atags_pointer,
344 + fdt_totalsize(atags_vaddr));
345 + }
346 if (!mdesc)
347 - mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
348 + mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
349 if (!mdesc) {
350 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
351 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
352 __atags_pointer);
353 if (__atags_pointer)
354 - early_print(" r2[]=%*ph\n", 16,
355 - phys_to_virt(__atags_pointer));
356 + early_print(" r2[]=%*ph\n", 16, atags_vaddr);
357 dump_machine_table();
358 }
359
360 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
361 index 6f19ba53fd1f2..0804a6af4a3b7 100644
362 --- a/arch/arm/mm/init.c
363 +++ b/arch/arm/mm/init.c
364 @@ -274,7 +274,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
365 if (mdesc->reserve)
366 mdesc->reserve();
367
368 - early_init_fdt_reserve_self();
369 early_init_fdt_scan_reserved_mem();
370
371 /* reserve memory for DMA contiguous allocations */
372 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
373 index 48c2888297dd9..ee943ac325560 100644
374 --- a/arch/arm/mm/mmu.c
375 +++ b/arch/arm/mm/mmu.c
376 @@ -39,6 +39,8 @@
377 #include "mm.h"
378 #include "tcm.h"
379
380 +extern unsigned long __atags_pointer;
381 +
382 /*
383 * empty_zero_page is a special page that is used for
384 * zero-initialized data and COW.
385 @@ -962,7 +964,7 @@ static void __init create_mapping(struct map_desc *md)
386 return;
387 }
388
389 - if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
390 + if (md->type == MT_DEVICE &&
391 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
392 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
393 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
394 @@ -1352,6 +1354,15 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
395 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
396 pmd_clear(pmd_off_k(addr));
397
398 + if (__atags_pointer) {
399 + /* create a read-only mapping of the device tree */
400 + map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
401 + map.virtual = FDT_FIXED_BASE;
402 + map.length = FDT_FIXED_SIZE;
403 + map.type = MT_ROM;
404 + create_mapping(&map);
405 + }
406 +
407 /*
408 * Map the kernel if it is XIP.
409 * It is always first in the modulearea.
410 @@ -1512,8 +1523,7 @@ static void __init map_lowmem(void)
411 }
412
413 #ifdef CONFIG_ARM_PV_FIXUP
414 -extern unsigned long __atags_pointer;
415 -typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
416 +typedef void pgtables_remap(long long offset, unsigned long pgd);
417 pgtables_remap lpae_pgtables_remap_asm;
418
419 /*
420 @@ -1526,7 +1536,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
421 unsigned long pa_pgd;
422 unsigned int cr, ttbcr;
423 long long offset;
424 - void *boot_data;
425
426 if (!mdesc->pv_fixup)
427 return;
428 @@ -1543,7 +1552,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
429 */
430 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
431 pa_pgd = __pa(swapper_pg_dir);
432 - boot_data = __va(__atags_pointer);
433 barrier();
434
435 pr_info("Switching physical address space to 0x%08llx\n",
436 @@ -1579,7 +1587,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
437 * needs to be assembly. It's fairly simple, as we're using the
438 * temporary tables setup by the initial assembly code.
439 */
440 - lpae_pgtables_remap(offset, pa_pgd, boot_data);
441 + lpae_pgtables_remap(offset, pa_pgd);
442
443 /* Re-enable the caches and cacheable TLB walks */
444 asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
445 diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
446 index 769778928356e..6d081d1cdc691 100644
447 --- a/arch/arm/mm/pv-fixup-asm.S
448 +++ b/arch/arm/mm/pv-fixup-asm.S
449 @@ -39,8 +39,8 @@ ENTRY(lpae_pgtables_remap_asm)
450
451 /* Update level 2 entries for the boot data */
452 add r7, r2, #0x1000
453 - add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
454 - bic r7, r7, #(1 << L2_ORDER) - 1
455 + movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
456 + add r7, r7, r3
457 ldrd r4, r5, [r7]
458 adds r4, r4, r0
459 adc r5, r5, r1
460 diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
461 index f319144260ce1..9fbf32e6e8813 100644
462 --- a/arch/ia64/include/asm/module.h
463 +++ b/arch/ia64/include/asm/module.h
464 @@ -14,16 +14,20 @@
465 struct elf64_shdr; /* forward declration */
466
467 struct mod_arch_specific {
468 + /* Used only at module load time. */
469 struct elf64_shdr *core_plt; /* core PLT section */
470 struct elf64_shdr *init_plt; /* init PLT section */
471 struct elf64_shdr *got; /* global offset table */
472 struct elf64_shdr *opd; /* official procedure descriptors */
473 struct elf64_shdr *unwind; /* unwind-table section */
474 unsigned long gp; /* global-pointer for module */
475 + unsigned int next_got_entry; /* index of next available got entry */
476
477 + /* Used at module run and cleanup time. */
478 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
479 void *init_unw_table; /* init unwind-table cookie returned by unwinder */
480 - unsigned int next_got_entry; /* index of next available got entry */
481 + void *opd_addr; /* symbolize uses .opd to get to actual function */
482 + unsigned long opd_size;
483 };
484
485 #define MODULE_PROC_FAMILY "ia64"
486 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
487 index 1a42ba885188a..ee693c8cec498 100644
488 --- a/arch/ia64/kernel/module.c
489 +++ b/arch/ia64/kernel/module.c
490 @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
491 int
492 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
493 {
494 + struct mod_arch_specific *mas = &mod->arch;
495 +
496 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
497 - if (mod->arch.unwind)
498 + if (mas->unwind)
499 register_unwind_table(mod);
500 +
501 + /*
502 + * ".opd" was already relocated to the final destination. Store
503 + * it's address for use in symbolizer.
504 + */
505 + mas->opd_addr = (void *)mas->opd->sh_addr;
506 + mas->opd_size = mas->opd->sh_size;
507 +
508 + /*
509 + * Module relocation was already done at this point. Section
510 + * headers are about to be deleted. Wipe out load-time context.
511 + */
512 + mas->core_plt = NULL;
513 + mas->init_plt = NULL;
514 + mas->got = NULL;
515 + mas->opd = NULL;
516 + mas->unwind = NULL;
517 + mas->gp = 0;
518 + mas->next_got_entry = 0;
519 +
520 return 0;
521 }
522
523 @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
524
525 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
526 {
527 - Elf64_Shdr *opd = mod->arch.opd;
528 + struct mod_arch_specific *mas = &mod->arch;
529
530 - if (ptr < (void *)opd->sh_addr ||
531 - ptr >= (void *)(opd->sh_addr + opd->sh_size))
532 + if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
533 return ptr;
534
535 return dereference_function_descriptor(ptr);
536 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
537 index dc5ea57364408..ceece76fc971a 100644
538 --- a/arch/mips/include/asm/div64.h
539 +++ b/arch/mips/include/asm/div64.h
540 @@ -1,5 +1,5 @@
541 /*
542 - * Copyright (C) 2000, 2004 Maciej W. Rozycki
543 + * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
544 * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
545 *
546 * This file is subject to the terms and conditions of the GNU General Public
547 @@ -9,25 +9,18 @@
548 #ifndef __ASM_DIV64_H
549 #define __ASM_DIV64_H
550
551 -#include <asm-generic/div64.h>
552 -
553 -#if BITS_PER_LONG == 64
554 +#include <asm/bitsperlong.h>
555
556 -#include <linux/types.h>
557 +#if BITS_PER_LONG == 32
558
559 /*
560 * No traps on overflows for any of these...
561 */
562
563 -#define __div64_32(n, base) \
564 -({ \
565 +#define do_div64_32(res, high, low, base) ({ \
566 unsigned long __cf, __tmp, __tmp2, __i; \
567 unsigned long __quot32, __mod32; \
568 - unsigned long __high, __low; \
569 - unsigned long long __n; \
570 \
571 - __high = *__n >> 32; \
572 - __low = __n; \
573 __asm__( \
574 " .set push \n" \
575 " .set noat \n" \
576 @@ -51,18 +44,48 @@
577 " subu %0, %0, %z6 \n" \
578 " addiu %2, %2, 1 \n" \
579 "3: \n" \
580 - " bnez %4, 0b\n\t" \
581 - " srl %5, %1, 0x1f\n\t" \
582 + " bnez %4, 0b \n" \
583 + " srl %5, %1, 0x1f \n" \
584 " .set pop" \
585 : "=&r" (__mod32), "=&r" (__tmp), \
586 "=&r" (__quot32), "=&r" (__cf), \
587 "=&r" (__i), "=&r" (__tmp2) \
588 - : "Jr" (base), "0" (__high), "1" (__low)); \
589 + : "Jr" (base), "0" (high), "1" (low)); \
590 \
591 - (__n) = __quot32; \
592 + (res) = __quot32; \
593 __mod32; \
594 })
595
596 -#endif /* BITS_PER_LONG == 64 */
597 +#define __div64_32(n, base) ({ \
598 + unsigned long __upper, __low, __high, __radix; \
599 + unsigned long long __quot; \
600 + unsigned long long __div; \
601 + unsigned long __mod; \
602 + \
603 + __div = (*n); \
604 + __radix = (base); \
605 + \
606 + __high = __div >> 32; \
607 + __low = __div; \
608 + \
609 + if (__high < __radix) { \
610 + __upper = __high; \
611 + __high = 0; \
612 + } else { \
613 + __upper = __high % __radix; \
614 + __high /= __radix; \
615 + } \
616 + \
617 + __mod = do_div64_32(__low, __upper, __low, __radix); \
618 + \
619 + __quot = __high; \
620 + __quot = __quot << 32 | __low; \
621 + (*n) = __quot; \
622 + __mod; \
623 +})
624 +
625 +#endif /* BITS_PER_LONG == 32 */
626 +
627 +#include <asm-generic/div64.h>
628
629 #endif /* __ASM_DIV64_H */
630 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
631 index 9704f3f76e63e..d7d42bd448c4a 100644
632 --- a/arch/powerpc/kernel/iommu.c
633 +++ b/arch/powerpc/kernel/iommu.c
634 @@ -1057,7 +1057,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
635
636 spin_lock_irqsave(&tbl->large_pool.lock, flags);
637 for (i = 0; i < tbl->nr_pools; i++)
638 - spin_lock(&tbl->pools[i].lock);
639 + spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
640
641 iommu_table_release_pages(tbl);
642
643 @@ -1085,7 +1085,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
644
645 spin_lock_irqsave(&tbl->large_pool.lock, flags);
646 for (i = 0; i < tbl->nr_pools; i++)
647 - spin_lock(&tbl->pools[i].lock);
648 + spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
649
650 memset(tbl->it_map, 0, sz);
651
652 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
653 index ea6adbf6a2211..b24d860bbab9b 100644
654 --- a/arch/powerpc/kernel/smp.c
655 +++ b/arch/powerpc/kernel/smp.c
656 @@ -1254,6 +1254,9 @@ void start_secondary(void *unused)
657
658 vdso_getcpu_init();
659 #endif
660 + set_numa_node(numa_cpu_lookup_table[cpu]);
661 + set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
662 +
663 /* Update topology CPU masks */
664 add_cpu_to_masks(cpu);
665
666 @@ -1266,9 +1269,6 @@ void start_secondary(void *unused)
667 if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
668 shared_caches = true;
669
670 - set_numa_node(numa_cpu_lookup_table[cpu]);
671 - set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
672 -
673 smp_wmb();
674 notify_cpu_starting(cpu);
675 set_cpu_online(cpu, true);
676 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
677 index e8b25f74454d6..c8e260e29f2c4 100644
678 --- a/arch/powerpc/lib/feature-fixups.c
679 +++ b/arch/powerpc/lib/feature-fixups.c
680 @@ -14,6 +14,7 @@
681 #include <linux/string.h>
682 #include <linux/init.h>
683 #include <linux/sched/mm.h>
684 +#include <linux/stop_machine.h>
685 #include <asm/cputable.h>
686 #include <asm/code-patching.h>
687 #include <asm/page.h>
688 @@ -221,11 +222,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
689 : "unknown");
690 }
691
692 +static int __do_stf_barrier_fixups(void *data)
693 +{
694 + enum stf_barrier_type *types = data;
695 +
696 + do_stf_entry_barrier_fixups(*types);
697 + do_stf_exit_barrier_fixups(*types);
698 +
699 + return 0;
700 +}
701
702 void do_stf_barrier_fixups(enum stf_barrier_type types)
703 {
704 - do_stf_entry_barrier_fixups(types);
705 - do_stf_exit_barrier_fixups(types);
706 + /*
707 + * The call to the fallback entry flush, and the fallback/sync-ori exit
708 + * flush can not be safely patched in/out while other CPUs are executing
709 + * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
710 + * spin in the stop machine core with interrupts hard disabled.
711 + */
712 + stop_machine(__do_stf_barrier_fixups, &types, NULL);
713 }
714
715 void do_uaccess_flush_fixups(enum l1d_flush_type types)
716 @@ -278,8 +293,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
717 : "unknown");
718 }
719
720 -void do_entry_flush_fixups(enum l1d_flush_type types)
721 +static int __do_entry_flush_fixups(void *data)
722 {
723 + enum l1d_flush_type types = *(enum l1d_flush_type *)data;
724 unsigned int instrs[3], *dest;
725 long *start, *end;
726 int i;
727 @@ -330,6 +346,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
728 : "ori type" :
729 (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
730 : "unknown");
731 +
732 + return 0;
733 +}
734 +
735 +void do_entry_flush_fixups(enum l1d_flush_type types)
736 +{
737 + /*
738 + * The call to the fallback flush can not be safely patched in/out while
739 + * other CPUs are executing it. So call __do_entry_flush_fixups() on one
740 + * CPU while all other CPUs spin in the stop machine core with interrupts
741 + * hard disabled.
742 + */
743 + stop_machine(__do_entry_flush_fixups, &types, NULL);
744 }
745
746 void do_rfi_flush_fixups(enum l1d_flush_type types)
747 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
748 index bbda646b63b54..210e6f563eb41 100644
749 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
750 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
751 @@ -91,9 +91,6 @@ static void rtas_stop_self(void)
752
753 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
754
755 - printk("cpu %u (hwid %u) Ready to die...\n",
756 - smp_processor_id(), hard_smp_processor_id());
757 -
758 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
759
760 panic("Alas, I survived.\n");
761 diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
762 index 5c9ec78422c22..098c04adbaaf6 100644
763 --- a/arch/riscv/kernel/smp.c
764 +++ b/arch/riscv/kernel/smp.c
765 @@ -51,7 +51,7 @@ int riscv_hartid_to_cpuid(int hartid)
766 return i;
767
768 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
769 - return i;
770 + return -ENOENT;
771 }
772
773 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
774 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
775 index c52b7073a5ab5..4bc476d7fa6c4 100644
776 --- a/arch/x86/include/asm/kvm_host.h
777 +++ b/arch/x86/include/asm/kvm_host.h
778 @@ -391,8 +391,6 @@ struct kvm_mmu {
779 int (*sync_page)(struct kvm_vcpu *vcpu,
780 struct kvm_mmu_page *sp);
781 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
782 - void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
783 - u64 *spte, const void *pte);
784 hpa_t root_hpa;
785 gpa_t root_cr3;
786 union kvm_mmu_role mmu_role;
787 @@ -944,7 +942,6 @@ struct kvm_arch {
788 struct kvm_vm_stat {
789 ulong mmu_shadow_zapped;
790 ulong mmu_pte_write;
791 - ulong mmu_pte_updated;
792 ulong mmu_pde_zapped;
793 ulong mmu_flooded;
794 ulong mmu_recycled;
795 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
796 index 47c27c6e38426..b9400087141df 100644
797 --- a/arch/x86/kvm/mmu.c
798 +++ b/arch/x86/kvm/mmu.c
799 @@ -2243,13 +2243,6 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
800 {
801 }
802
803 -static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
804 - struct kvm_mmu_page *sp, u64 *spte,
805 - const void *pte)
806 -{
807 - WARN_ON(1);
808 -}
809 -
810 #define KVM_PAGE_ARRAY_NR 16
811
812 struct kvm_mmu_pages {
813 @@ -4356,7 +4349,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
814 context->gva_to_gpa = nonpaging_gva_to_gpa;
815 context->sync_page = nonpaging_sync_page;
816 context->invlpg = nonpaging_invlpg;
817 - context->update_pte = nonpaging_update_pte;
818 context->root_level = 0;
819 context->shadow_root_level = PT32E_ROOT_LEVEL;
820 context->direct_map = true;
821 @@ -4935,7 +4927,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
822 context->gva_to_gpa = paging64_gva_to_gpa;
823 context->sync_page = paging64_sync_page;
824 context->invlpg = paging64_invlpg;
825 - context->update_pte = paging64_update_pte;
826 context->shadow_root_level = level;
827 context->direct_map = false;
828 }
829 @@ -4964,7 +4955,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
830 context->gva_to_gpa = paging32_gva_to_gpa;
831 context->sync_page = paging32_sync_page;
832 context->invlpg = paging32_invlpg;
833 - context->update_pte = paging32_update_pte;
834 context->shadow_root_level = PT32E_ROOT_LEVEL;
835 context->direct_map = false;
836 }
837 @@ -5039,7 +5029,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
838 context->page_fault = tdp_page_fault;
839 context->sync_page = nonpaging_sync_page;
840 context->invlpg = nonpaging_invlpg;
841 - context->update_pte = nonpaging_update_pte;
842 context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
843 context->direct_map = true;
844 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
845 @@ -5172,7 +5161,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
846 context->gva_to_gpa = ept_gva_to_gpa;
847 context->sync_page = ept_sync_page;
848 context->invlpg = ept_invlpg;
849 - context->update_pte = ept_update_pte;
850 context->root_level = PT64_ROOT_4LEVEL;
851 context->direct_map = false;
852 context->mmu_role.as_u64 = new_role.as_u64;
853 @@ -5312,19 +5300,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
854 }
855 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
856
857 -static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
858 - struct kvm_mmu_page *sp, u64 *spte,
859 - const void *new)
860 -{
861 - if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
862 - ++vcpu->kvm->stat.mmu_pde_zapped;
863 - return;
864 - }
865 -
866 - ++vcpu->kvm->stat.mmu_pte_updated;
867 - vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
868 -}
869 -
870 static bool need_remote_flush(u64 old, u64 new)
871 {
872 if (!is_shadow_present_pte(old))
873 @@ -5490,14 +5465,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
874
875 local_flush = true;
876 while (npte--) {
877 - u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
878 -
879 entry = *spte;
880 mmu_page_zap_pte(vcpu->kvm, sp, spte);
881 - if (gentry &&
882 - !((sp->role.word ^ base_role)
883 - & mmu_base_role_mask.word) && rmap_can_add(vcpu))
884 - mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
885 + if (gentry && sp->role.level != PG_LEVEL_4K)
886 + ++vcpu->kvm->stat.mmu_pde_zapped;
887 if (need_remote_flush(entry, *spte))
888 remote_flush = true;
889 ++spte;
890 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
891 index 153659e8f4039..79b5d0ca44724 100644
892 --- a/arch/x86/kvm/x86.c
893 +++ b/arch/x86/kvm/x86.c
894 @@ -208,7 +208,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
895 { "l1d_flush", VCPU_STAT(l1d_flush) },
896 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
897 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
898 - { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
899 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
900 { "mmu_flooded", VM_STAT(mmu_flooded) },
901 { "mmu_recycled", VM_STAT(mmu_recycled) },
902 @@ -7357,6 +7356,7 @@ void kvm_arch_exit(void)
903 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
904 #ifdef CONFIG_X86_64
905 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
906 + cancel_work_sync(&pvclock_gtod_work);
907 #endif
908 kvm_x86_ops = NULL;
909 kvm_mmu_module_exit();
910 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
911 index c19006d59b791..136232a01f715 100644
912 --- a/block/bfq-iosched.c
913 +++ b/block/bfq-iosched.c
914 @@ -2210,10 +2210,9 @@ static void bfq_remove_request(struct request_queue *q,
915
916 }
917
918 -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
919 +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
920 unsigned int nr_segs)
921 {
922 - struct request_queue *q = hctx->queue;
923 struct bfq_data *bfqd = q->elevator->elevator_data;
924 struct request *free = NULL;
925 /*
926 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
927 index 7620734d55429..f422c7feea7e0 100644
928 --- a/block/blk-mq-sched.c
929 +++ b/block/blk-mq-sched.c
930 @@ -334,14 +334,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
931 unsigned int nr_segs)
932 {
933 struct elevator_queue *e = q->elevator;
934 - struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
935 - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
936 + struct blk_mq_ctx *ctx;
937 + struct blk_mq_hw_ctx *hctx;
938 bool ret = false;
939 enum hctx_type type;
940
941 if (e && e->type->ops.bio_merge)
942 - return e->type->ops.bio_merge(hctx, bio, nr_segs);
943 + return e->type->ops.bio_merge(q, bio, nr_segs);
944
945 + ctx = blk_mq_get_ctx(q);
946 + hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
947 type = hctx->type;
948 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
949 !list_empty_careful(&ctx->rq_lists[type])) {
950 diff --git a/block/blk-mq.c b/block/blk-mq.c
951 index 057a634396a90..0674f53c60528 100644
952 --- a/block/blk-mq.c
953 +++ b/block/blk-mq.c
954 @@ -2970,10 +2970,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
955 /* tags can _not_ be used after returning from blk_mq_exit_queue */
956 void blk_mq_exit_queue(struct request_queue *q)
957 {
958 - struct blk_mq_tag_set *set = q->tag_set;
959 + struct blk_mq_tag_set *set = q->tag_set;
960
961 - blk_mq_del_queue_tag_set(q);
962 + /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
963 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
964 + /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
965 + blk_mq_del_queue_tag_set(q);
966 }
967
968 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
969 diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
970 index 34dcea0ef6377..77a0fcebdc77e 100644
971 --- a/block/kyber-iosched.c
972 +++ b/block/kyber-iosched.c
973 @@ -562,11 +562,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
974 }
975 }
976
977 -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
978 +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
979 unsigned int nr_segs)
980 {
981 + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
982 + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
983 struct kyber_hctx_data *khd = hctx->sched_data;
984 - struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
985 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
986 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
987 struct list_head *rq_list = &kcq->rq_list[sched_domain];
988 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
989 index b490f47fd553c..19c6922e85f1b 100644
990 --- a/block/mq-deadline.c
991 +++ b/block/mq-deadline.c
992 @@ -459,10 +459,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
993 return ELEVATOR_NO_MERGE;
994 }
995
996 -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
997 +static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
998 unsigned int nr_segs)
999 {
1000 - struct request_queue *q = hctx->queue;
1001 struct deadline_data *dd = q->elevator->elevator_data;
1002 struct request *free = NULL;
1003 bool ret;
1004 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1005 index dbb5919f23e2d..95d119ff76b65 100644
1006 --- a/drivers/acpi/scan.c
1007 +++ b/drivers/acpi/scan.c
1008 @@ -706,6 +706,7 @@ int acpi_device_add(struct acpi_device *device,
1009
1010 result = acpi_device_set_name(device, acpi_device_bus_id);
1011 if (result) {
1012 + kfree_const(acpi_device_bus_id->bus_id);
1013 kfree(acpi_device_bus_id);
1014 goto err_unlock;
1015 }
1016 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
1017 index 94785083c018a..8fbd376471de0 100644
1018 --- a/drivers/base/power/runtime.c
1019 +++ b/drivers/base/power/runtime.c
1020 @@ -1610,6 +1610,7 @@ void pm_runtime_init(struct device *dev)
1021 dev->power.request_pending = false;
1022 dev->power.request = RPM_REQ_NONE;
1023 dev->power.deferred_resume = false;
1024 + dev->power.needs_force_resume = 0;
1025 INIT_WORK(&dev->power.work, pm_runtime_work);
1026
1027 dev->power.timer_expires = 0;
1028 @@ -1777,10 +1778,12 @@ int pm_runtime_force_suspend(struct device *dev)
1029 * its parent, but set its status to RPM_SUSPENDED anyway in case this
1030 * function will be called again for it in the meantime.
1031 */
1032 - if (pm_runtime_need_not_resume(dev))
1033 + if (pm_runtime_need_not_resume(dev)) {
1034 pm_runtime_set_suspended(dev);
1035 - else
1036 + } else {
1037 __update_runtime_status(dev, RPM_SUSPENDED);
1038 + dev->power.needs_force_resume = 1;
1039 + }
1040
1041 return 0;
1042
1043 @@ -1807,7 +1810,7 @@ int pm_runtime_force_resume(struct device *dev)
1044 int (*callback)(struct device *);
1045 int ret = 0;
1046
1047 - if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1048 + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1049 goto out;
1050
1051 /*
1052 @@ -1826,6 +1829,7 @@ int pm_runtime_force_resume(struct device *dev)
1053
1054 pm_runtime_mark_last_busy(dev);
1055 out:
1056 + dev->power.needs_force_resume = 0;
1057 pm_runtime_enable(dev);
1058 return ret;
1059 }
1060 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1061 index e11fddcb73b98..839364371f9af 100644
1062 --- a/drivers/block/nbd.c
1063 +++ b/drivers/block/nbd.c
1064 @@ -2016,7 +2016,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
1065 * config ref and try to destroy the workqueue from inside the work
1066 * queue.
1067 */
1068 - flush_workqueue(nbd->recv_workq);
1069 + if (nbd->recv_workq)
1070 + flush_workqueue(nbd->recv_workq);
1071 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1072 &nbd->config->runtime_flags))
1073 nbd_config_put(nbd);
1074 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1075 index 2f8026b719339..1385c2c0acbe1 100644
1076 --- a/drivers/char/tpm/tpm2-cmd.c
1077 +++ b/drivers/char/tpm/tpm2-cmd.c
1078 @@ -962,6 +962,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
1079
1080 if (nr_commands !=
1081 be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
1082 + rc = -EFAULT;
1083 tpm_buf_destroy(&buf);
1084 goto out;
1085 }
1086 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1087 index 7da35867b6ad3..2fe26ec03552b 100644
1088 --- a/drivers/char/tpm/tpm_tis_core.c
1089 +++ b/drivers/char/tpm/tpm_tis_core.c
1090 @@ -620,16 +620,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
1091 cap_t cap;
1092 int ret;
1093
1094 - /* TPM 2.0 */
1095 - if (chip->flags & TPM_CHIP_FLAG_TPM2)
1096 - return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
1097 -
1098 - /* TPM 1.2 */
1099 ret = request_locality(chip, 0);
1100 if (ret < 0)
1101 return ret;
1102
1103 - ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
1104 + if (chip->flags & TPM_CHIP_FLAG_TPM2)
1105 + ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
1106 + else
1107 + ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
1108
1109 release_locality(chip, 0);
1110
1111 @@ -1037,12 +1035,20 @@ int tpm_tis_resume(struct device *dev)
1112 if (ret)
1113 return ret;
1114
1115 - /* TPM 1.2 requires self-test on resume. This function actually returns
1116 + /*
1117 + * TPM 1.2 requires self-test on resume. This function actually returns
1118 * an error code but for unknown reason it isn't handled.
1119 */
1120 - if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
1121 + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
1122 + ret = request_locality(chip, 0);
1123 + if (ret < 0)
1124 + return ret;
1125 +
1126 tpm1_do_selftest(chip);
1127
1128 + release_locality(chip, 0);
1129 + }
1130 +
1131 return 0;
1132 }
1133 EXPORT_SYMBOL_GPL(tpm_tis_resume);
1134 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
1135 index 87ee1bad9a9a8..4a5d2a914bd66 100644
1136 --- a/drivers/clk/samsung/clk-exynos7.c
1137 +++ b/drivers/clk/samsung/clk-exynos7.c
1138 @@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
1139 GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
1140 ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
1141 CLK_IS_CRITICAL, 0),
1142 + /*
1143 + * This clock is required for the CMU_FSYS1 registers access, keep it
1144 + * enabled permanently until proper runtime PM support is added.
1145 + */
1146 GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
1147 - ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
1148 + ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
1149 + CLK_IS_CRITICAL, 0),
1150
1151 GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
1152 "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
1153 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1154 index 092db590087c9..14dc1b8719a97 100644
1155 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1156 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1157 @@ -2050,6 +2050,10 @@ static void commit_planes_for_stream(struct dc *dc,
1158 plane_state->triplebuffer_flips = true;
1159 }
1160 }
1161 + if (update_type == UPDATE_TYPE_FULL) {
1162 + /* force vsync flip when reconfiguring pipes to prevent underflow */
1163 + plane_state->flip_immediate = false;
1164 + }
1165 }
1166 }
1167 #endif
1168 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
1169 index 69e2aae423947..b250ef75c163e 100644
1170 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
1171 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
1172 @@ -1,5 +1,5 @@
1173 /*
1174 - * Copyright 2012-17 Advanced Micro Devices, Inc.
1175 + * Copyright 2012-2021 Advanced Micro Devices, Inc.
1176 *
1177 * Permission is hereby granted, free of charge, to any person obtaining a
1178 * copy of this software and associated documentation files (the "Software"),
1179 @@ -179,11 +179,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
1180 else
1181 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
1182 */
1183 - if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
1184 - + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
1185 - value = 1;
1186 - } else
1187 - value = 0;
1188 + if (pipe_dest->htotal != 0) {
1189 + if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
1190 + + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
1191 + value = 1;
1192 + } else
1193 + value = 0;
1194 + }
1195 +
1196 REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
1197 }
1198
1199 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
1200 index 05289edbafe34..876f59098f7ef 100644
1201 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
1202 +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
1203 @@ -181,7 +181,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
1204 struct i915_ggtt_view view;
1205
1206 if (i915_gem_object_is_tiled(obj))
1207 - chunk = roundup(chunk, tile_row_pages(obj));
1208 + chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
1209
1210 view.type = I915_GGTT_VIEW_PARTIAL;
1211 view.partial.offset = rounddown(page_offset, chunk);
1212 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1213 index d59b004f66958..147087a891aa8 100644
1214 --- a/drivers/gpu/drm/radeon/radeon.h
1215 +++ b/drivers/gpu/drm/radeon/radeon.h
1216 @@ -1554,6 +1554,7 @@ struct radeon_dpm {
1217 void *priv;
1218 u32 new_active_crtcs;
1219 int new_active_crtc_count;
1220 + int high_pixelclock_count;
1221 u32 current_active_crtcs;
1222 int current_active_crtc_count;
1223 bool single_display;
1224 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1225 index 226a7bf0eb7ad..9e0aa357585fd 100644
1226 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1227 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1228 @@ -2136,11 +2136,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1229 return state_index;
1230 /* last mode is usually default, array is low to high */
1231 for (i = 0; i < num_modes; i++) {
1232 - rdev->pm.power_state[state_index].clock_info =
1233 - kcalloc(1, sizeof(struct radeon_pm_clock_info),
1234 - GFP_KERNEL);
1235 + /* avoid memory leaks from invalid modes or unknown frev. */
1236 + if (!rdev->pm.power_state[state_index].clock_info) {
1237 + rdev->pm.power_state[state_index].clock_info =
1238 + kzalloc(sizeof(struct radeon_pm_clock_info),
1239 + GFP_KERNEL);
1240 + }
1241 if (!rdev->pm.power_state[state_index].clock_info)
1242 - return state_index;
1243 + goto out;
1244 rdev->pm.power_state[state_index].num_clock_modes = 1;
1245 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1246 switch (frev) {
1247 @@ -2259,17 +2262,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1248 break;
1249 }
1250 }
1251 +out:
1252 + /* free any unused clock_info allocation. */
1253 + if (state_index && state_index < num_modes) {
1254 + kfree(rdev->pm.power_state[state_index].clock_info);
1255 + rdev->pm.power_state[state_index].clock_info = NULL;
1256 + }
1257 +
1258 /* last mode is usually default */
1259 - if (rdev->pm.default_power_state_index == -1) {
1260 + if (state_index && rdev->pm.default_power_state_index == -1) {
1261 rdev->pm.power_state[state_index - 1].type =
1262 POWER_STATE_TYPE_DEFAULT;
1263 rdev->pm.default_power_state_index = state_index - 1;
1264 rdev->pm.power_state[state_index - 1].default_clock_mode =
1265 &rdev->pm.power_state[state_index - 1].clock_info[0];
1266 - rdev->pm.power_state[state_index].flags &=
1267 + rdev->pm.power_state[state_index - 1].flags &=
1268 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1269 - rdev->pm.power_state[state_index].misc = 0;
1270 - rdev->pm.power_state[state_index].misc2 = 0;
1271 + rdev->pm.power_state[state_index - 1].misc = 0;
1272 + rdev->pm.power_state[state_index - 1].misc2 = 0;
1273 }
1274 return state_index;
1275 }
1276 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1277 index 5d10e11a92259..c9ae12be88645 100644
1278 --- a/drivers/gpu/drm/radeon/radeon_pm.c
1279 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1280 @@ -1720,6 +1720,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1281 struct drm_device *ddev = rdev->ddev;
1282 struct drm_crtc *crtc;
1283 struct radeon_crtc *radeon_crtc;
1284 + struct radeon_connector *radeon_connector;
1285
1286 if (!rdev->pm.dpm_enabled)
1287 return;
1288 @@ -1729,6 +1730,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1289 /* update active crtc counts */
1290 rdev->pm.dpm.new_active_crtcs = 0;
1291 rdev->pm.dpm.new_active_crtc_count = 0;
1292 + rdev->pm.dpm.high_pixelclock_count = 0;
1293 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1294 list_for_each_entry(crtc,
1295 &ddev->mode_config.crtc_list, head) {
1296 @@ -1736,6 +1738,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1297 if (crtc->enabled) {
1298 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1299 rdev->pm.dpm.new_active_crtc_count++;
1300 + if (!radeon_crtc->connector)
1301 + continue;
1302 +
1303 + radeon_connector = to_radeon_connector(radeon_crtc->connector);
1304 + if (radeon_connector->pixelclock_for_modeset > 297000)
1305 + rdev->pm.dpm.high_pixelclock_count++;
1306 }
1307 }
1308 }
1309 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1310 index a0b382a637a64..97bab442dd547 100644
1311 --- a/drivers/gpu/drm/radeon/si_dpm.c
1312 +++ b/drivers/gpu/drm/radeon/si_dpm.c
1313 @@ -3002,6 +3002,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1314 (rdev->pdev->device == 0x6605)) {
1315 max_sclk = 75000;
1316 }
1317 +
1318 + if (rdev->pm.dpm.high_pixelclock_count > 1)
1319 + disable_sclk_switching = true;
1320 }
1321
1322 if (rps->vce_active) {
1323 diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
1324 index 30e18eb60da79..0b689ccbb7935 100644
1325 --- a/drivers/hwmon/occ/common.c
1326 +++ b/drivers/hwmon/occ/common.c
1327 @@ -209,9 +209,9 @@ int occ_update_response(struct occ *occ)
1328 return rc;
1329
1330 /* limit the maximum rate of polling the OCC */
1331 - if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
1332 + if (time_after(jiffies, occ->next_update)) {
1333 rc = occ_poll(occ);
1334 - occ->last_update = jiffies;
1335 + occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
1336 } else {
1337 rc = occ->last_error;
1338 }
1339 @@ -1089,6 +1089,7 @@ int occ_setup(struct occ *occ, const char *name)
1340 return rc;
1341 }
1342
1343 + occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
1344 occ_parse_poll_response(occ);
1345
1346 rc = occ_setup_sensor_attrs(occ);
1347 diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
1348 index 67e6968b8978e..e6df719770e81 100644
1349 --- a/drivers/hwmon/occ/common.h
1350 +++ b/drivers/hwmon/occ/common.h
1351 @@ -99,7 +99,7 @@ struct occ {
1352 u8 poll_cmd_data; /* to perform OCC poll command */
1353 int (*send_cmd)(struct occ *occ, u8 *cmd);
1354
1355 - unsigned long last_update;
1356 + unsigned long next_update;
1357 struct mutex lock; /* lock OCC access */
1358
1359 struct device *hwmon;
1360 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
1361 index 94beacc41302f..a3fec3df11b68 100644
1362 --- a/drivers/i2c/i2c-dev.c
1363 +++ b/drivers/i2c/i2c-dev.c
1364 @@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1365 sizeof(rdwr_arg)))
1366 return -EFAULT;
1367
1368 - /* Put an arbitrary limit on the number of messages that can
1369 - * be sent at once */
1370 + if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
1371 + return -EINVAL;
1372 +
1373 + /*
1374 + * Put an arbitrary limit on the number of messages that can
1375 + * be sent at once
1376 + */
1377 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
1378 return -EINVAL;
1379
1380 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
1381 index 7046bca1d7eba..75db410b5054e 100644
1382 --- a/drivers/iio/gyro/mpu3050-core.c
1383 +++ b/drivers/iio/gyro/mpu3050-core.c
1384 @@ -271,7 +271,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
1385 case IIO_CHAN_INFO_OFFSET:
1386 switch (chan->type) {
1387 case IIO_TEMP:
1388 - /* The temperature scaling is (x+23000)/280 Celsius */
1389 + /*
1390 + * The temperature scaling is (x+23000)/280 Celsius
1391 + * for the "best fit straight line" temperature range
1392 + * of -30C..85C. The 23000 includes room temperature
1393 + * offset of +35C, 280 is the precision scale and x is
1394 + * the 16-bit signed integer reported by hardware.
1395 + *
1396 + * Temperature value itself represents temperature of
1397 + * the sensor die.
1398 + */
1399 *val = 23000;
1400 return IIO_VAL_INT;
1401 default:
1402 @@ -328,7 +337,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
1403 goto out_read_raw_unlock;
1404 }
1405
1406 - *val = be16_to_cpu(raw_val);
1407 + *val = (s16)be16_to_cpu(raw_val);
1408 ret = IIO_VAL_INT;
1409
1410 goto out_read_raw_unlock;
1411 diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
1412 index a760d14e146a8..fe6001afb7b4d 100644
1413 --- a/drivers/iio/light/tsl2583.c
1414 +++ b/drivers/iio/light/tsl2583.c
1415 @@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
1416 return lux_val;
1417 }
1418
1419 + /* Avoid division by zero of lux_value later on */
1420 + if (lux_val == 0) {
1421 + dev_err(&chip->client->dev,
1422 + "%s: lux_val of 0 will produce out of range trim_value\n",
1423 + __func__);
1424 + return -ENODATA;
1425 + }
1426 +
1427 gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
1428 * chip->als_settings.als_gain_trim) / lux_val);
1429 if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
1430 diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
1431 index 47af54f14756b..67f85268b63db 100644
1432 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
1433 +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
1434 @@ -158,6 +158,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
1435 ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
1436 if (ret < 0) {
1437 dev_err(&client->dev, "cannot send start measurement command");
1438 + pm_runtime_put_noidle(&client->dev);
1439 return ret;
1440 }
1441
1442 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1443 index ad714ff375f85..692401e941a77 100644
1444 --- a/drivers/iommu/amd_iommu_init.c
1445 +++ b/drivers/iommu/amd_iommu_init.c
1446 @@ -12,7 +12,6 @@
1447 #include <linux/acpi.h>
1448 #include <linux/list.h>
1449 #include <linux/bitmap.h>
1450 -#include <linux/delay.h>
1451 #include <linux/slab.h>
1452 #include <linux/syscore_ops.h>
1453 #include <linux/interrupt.h>
1454 @@ -254,8 +253,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
1455 static int amd_iommu_enable_interrupts(void);
1456 static int __init iommu_go_to_state(enum iommu_init_state state);
1457 static void init_device_table_dma(void);
1458 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1459 - u8 fxn, u64 *value, bool is_write);
1460
1461 static bool amd_iommu_pre_enabled = true;
1462
1463 @@ -1675,53 +1672,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
1464 return 0;
1465 }
1466
1467 -static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
1468 +static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1469 {
1470 - int retry;
1471 + u64 val;
1472 struct pci_dev *pdev = iommu->dev;
1473 - u64 val = 0xabcd, val2 = 0, save_reg, save_src;
1474
1475 if (!iommu_feature(iommu, FEATURE_PC))
1476 return;
1477
1478 amd_iommu_pc_present = true;
1479
1480 - /* save the value to restore, if writable */
1481 - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
1482 - iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
1483 - goto pc_false;
1484 -
1485 - /*
1486 - * Disable power gating by programing the performance counter
1487 - * source to 20 (i.e. counts the reads and writes from/to IOMMU
1488 - * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
1489 - * which never get incremented during this init phase.
1490 - * (Note: The event is also deprecated.)
1491 - */
1492 - val = 20;
1493 - if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
1494 - goto pc_false;
1495 -
1496 - /* Check if the performance counters can be written to */
1497 - val = 0xabcd;
1498 - for (retry = 5; retry; retry--) {
1499 - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
1500 - iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
1501 - val2)
1502 - break;
1503 -
1504 - /* Wait about 20 msec for power gating to disable and retry. */
1505 - msleep(20);
1506 - }
1507 -
1508 - /* restore */
1509 - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
1510 - iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
1511 - goto pc_false;
1512 -
1513 - if (val != val2)
1514 - goto pc_false;
1515 -
1516 pci_info(pdev, "IOMMU performance counters supported\n");
1517
1518 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1519 @@ -1729,11 +1689,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
1520 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1521
1522 return;
1523 -
1524 -pc_false:
1525 - pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1526 - amd_iommu_pc_present = false;
1527 - return;
1528 }
1529
1530 static ssize_t amd_iommu_show_cap(struct device *dev,
1531 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
1532 index b2224113987c7..de275ccb4fd0b 100644
1533 --- a/drivers/net/can/m_can/m_can.c
1534 +++ b/drivers/net/can/m_can/m_can.c
1535 @@ -1418,6 +1418,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
1536 int i;
1537 int putidx;
1538
1539 + cdev->tx_skb = NULL;
1540 +
1541 /* Generate ID field for TX buffer Element */
1542 /* Common to all supported M_CAN versions */
1543 if (cf->can_id & CAN_EFF_FLAG) {
1544 @@ -1534,7 +1536,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
1545 tx_work);
1546
1547 m_can_tx_handler(cdev);
1548 - cdev->tx_skb = NULL;
1549 }
1550
1551 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
1552 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1553 index 588389697cf91..106f2b2ce17f0 100644
1554 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1555 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1556 @@ -125,7 +125,10 @@ enum board_idx {
1557 NETXTREME_E_VF,
1558 NETXTREME_C_VF,
1559 NETXTREME_S_VF,
1560 + NETXTREME_C_VF_HV,
1561 + NETXTREME_E_VF_HV,
1562 NETXTREME_E_P5_VF,
1563 + NETXTREME_E_P5_VF_HV,
1564 };
1565
1566 /* indexed by enum above */
1567 @@ -173,7 +176,10 @@ static const struct {
1568 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
1569 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
1570 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
1571 + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
1572 + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
1573 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
1574 + [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
1575 };
1576
1577 static const struct pci_device_id bnxt_pci_tbl[] = {
1578 @@ -225,15 +231,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
1579 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
1580 #ifdef CONFIG_BNXT_SRIOV
1581 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
1582 + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
1583 + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
1584 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
1585 + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
1586 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
1587 + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
1588 + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
1589 + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
1590 + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
1591 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
1592 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
1593 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
1594 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
1595 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
1596 + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
1597 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
1598 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
1599 + { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
1600 + { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
1601 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
1602 #endif
1603 { 0 }
1604 @@ -263,7 +279,8 @@ static struct workqueue_struct *bnxt_pf_wq;
1605 static bool bnxt_vf_pciid(enum board_idx idx)
1606 {
1607 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
1608 - idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
1609 + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
1610 + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
1611 }
1612
1613 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
1614 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
1615 index 8314102002b0f..03c8af58050c9 100644
1616 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
1617 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
1618 @@ -803,7 +803,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
1619 return err;
1620 }
1621
1622 -static inline void enic_queue_wq_skb(struct enic *enic,
1623 +static inline int enic_queue_wq_skb(struct enic *enic,
1624 struct vnic_wq *wq, struct sk_buff *skb)
1625 {
1626 unsigned int mss = skb_shinfo(skb)->gso_size;
1627 @@ -849,6 +849,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
1628 wq->to_use = buf->next;
1629 dev_kfree_skb(skb);
1630 }
1631 + return err;
1632 }
1633
1634 /* netif_tx_lock held, process context with BHs disabled, or BH */
1635 @@ -892,7 +893,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
1636 return NETDEV_TX_BUSY;
1637 }
1638
1639 - enic_queue_wq_skb(enic, wq, skb);
1640 + if (enic_queue_wq_skb(enic, wq, skb))
1641 + goto error;
1642
1643 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
1644 netif_tx_stop_queue(txq);
1645 @@ -900,6 +902,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
1646 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
1647 vnic_wq_doorbell(wq);
1648
1649 +error:
1650 spin_unlock(&enic->wq_lock[txq_map]);
1651
1652 return NETDEV_TX_OK;
1653 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1654 index 696f21543aa76..5f2948bafff21 100644
1655 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1656 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1657 @@ -539,8 +539,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
1658 if (h->ae_algo->ops->set_timer_task)
1659 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
1660
1661 - netif_tx_stop_all_queues(netdev);
1662 netif_carrier_off(netdev);
1663 + netif_tx_disable(netdev);
1664
1665 hns3_nic_net_down(netdev);
1666
1667 @@ -796,7 +796,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
1668 * and it is udp packet, which has a dest port as the IANA assigned.
1669 * the hardware is expected to do the checksum offload, but the
1670 * hardware will not do the checksum offload when udp dest port is
1671 - * 4789 or 6081.
1672 + * 4789, 4790 or 6081.
1673 */
1674 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1675 {
1676 @@ -806,7 +806,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1677
1678 if (!(!skb->encapsulation &&
1679 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
1680 - l4.udp->dest == htons(GENEVE_UDP_PORT))))
1681 + l4.udp->dest == htons(GENEVE_UDP_PORT) ||
1682 + l4.udp->dest == htons(4790))))
1683 return false;
1684
1685 skb_checksum_help(skb);
1686 @@ -4280,6 +4281,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
1687 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
1688 int ret = 0;
1689
1690 + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
1691 + netdev_err(kinfo->netdev, "device is not initialized yet\n");
1692 + return -EFAULT;
1693 + }
1694 +
1695 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
1696
1697 if (netif_running(kinfo->netdev)) {
1698 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
1699 index 87dece0e745dd..53fd6e4d9e2d6 100644
1700 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
1701 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
1702 @@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
1703
1704 /* configure IGU,EGU error interrupts */
1705 hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
1706 + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
1707 if (en)
1708 - desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
1709 + desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
1710
1711 desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
1712
1713 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
1714 index 876fd81ad2f17..8eccdb651a3ca 100644
1715 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
1716 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
1717 @@ -33,7 +33,8 @@
1718 #define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
1719 #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
1720 #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
1721 -#define HCLGE_IGU_ERR_INT_EN 0x0000066F
1722 +#define HCLGE_IGU_ERR_INT_EN 0x0000000F
1723 +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
1724 #define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
1725 #define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
1726 #define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
1727 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
1728 index f5da28a60d002..23a706a1765a7 100644
1729 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
1730 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
1731 @@ -455,7 +455,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
1732 unsigned long advertising;
1733 unsigned long supported;
1734 unsigned long send_data;
1735 - u8 msg_data[10];
1736 + u8 msg_data[10] = {};
1737 u8 dest_vfid;
1738
1739 advertising = hdev->hw.mac.advertising[0];
1740 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1741 index dc4dfd4602aba..c8f979c55fec0 100644
1742 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1743 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1744 @@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
1745 if (!phydev)
1746 return;
1747
1748 + phy_loopback(phydev, false);
1749 +
1750 phy_start(phydev);
1751 }
1752
1753 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1754 index d7684ac2522ef..57a8328e9b4f2 100644
1755 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1756 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1757 @@ -1893,8 +1893,10 @@ enum i40e_aq_phy_type {
1758 I40E_PHY_TYPE_25GBASE_LR = 0x22,
1759 I40E_PHY_TYPE_25GBASE_AOC = 0x23,
1760 I40E_PHY_TYPE_25GBASE_ACC = 0x24,
1761 - I40E_PHY_TYPE_2_5GBASE_T = 0x30,
1762 - I40E_PHY_TYPE_5GBASE_T = 0x31,
1763 + I40E_PHY_TYPE_2_5GBASE_T = 0x26,
1764 + I40E_PHY_TYPE_5GBASE_T = 0x27,
1765 + I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS = 0x30,
1766 + I40E_PHY_TYPE_5GBASE_T_LINK_STATUS = 0x31,
1767 I40E_PHY_TYPE_MAX,
1768 I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
1769 I40E_PHY_TYPE_EMPTY = 0xFE,
1770 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
1771 index e81530ca08d03..5706abb3c0eaa 100644
1772 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c
1773 +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
1774 @@ -377,6 +377,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
1775 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
1776 &cdev->state);
1777 i40e_client_del_instance(pf);
1778 + return;
1779 }
1780 }
1781 }
1782 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
1783 index 66f7deaf46ae2..6475f78e85f6c 100644
1784 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
1785 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
1786 @@ -1156,8 +1156,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1787 break;
1788 case I40E_PHY_TYPE_100BASE_TX:
1789 case I40E_PHY_TYPE_1000BASE_T:
1790 - case I40E_PHY_TYPE_2_5GBASE_T:
1791 - case I40E_PHY_TYPE_5GBASE_T:
1792 + case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1793 + case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1794 case I40E_PHY_TYPE_10GBASE_T:
1795 media = I40E_MEDIA_TYPE_BASET;
1796 break;
1797 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1798 index b519e5af5ed94..e4d0b7747e84d 100644
1799 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1800 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1801 @@ -839,8 +839,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
1802 10000baseT_Full);
1803 break;
1804 case I40E_PHY_TYPE_10GBASE_T:
1805 - case I40E_PHY_TYPE_5GBASE_T:
1806 - case I40E_PHY_TYPE_2_5GBASE_T:
1807 + case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1808 + case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1809 case I40E_PHY_TYPE_1000BASE_T:
1810 case I40E_PHY_TYPE_100BASE_TX:
1811 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
1812 @@ -1406,7 +1406,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
1813
1814 memset(&config, 0, sizeof(config));
1815 config.phy_type = abilities.phy_type;
1816 - config.abilities = abilities.abilities;
1817 + config.abilities = abilities.abilities |
1818 + I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1819 config.phy_type_ext = abilities.phy_type_ext;
1820 config.link_speed = abilities.link_speed;
1821 config.eee_capability = abilities.eee_capability;
1822 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
1823 index b43ec94a0f293..666a251e8c723 100644
1824 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h
1825 +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
1826 @@ -253,11 +253,8 @@ struct i40e_phy_info {
1827 #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
1828 I40E_PHY_TYPE_OFFSET)
1829 /* Offset for 2.5G/5G PHY Types value to bit number conversion */
1830 -#define I40E_PHY_TYPE_OFFSET2 (-10)
1831 -#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
1832 - I40E_PHY_TYPE_OFFSET2)
1833 -#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
1834 - I40E_PHY_TYPE_OFFSET2)
1835 +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
1836 +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
1837 #define I40E_HW_CAP_MAX_GPIO 30
1838 /* Capabilities of a PF or a VF or the whole device */
1839 struct i40e_hw_capabilities {
1840 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
1841 index cffc8c1044f20..a97e1f9ca1ede 100644
1842 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
1843 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
1844 @@ -3906,8 +3906,6 @@ static void iavf_remove(struct pci_dev *pdev)
1845
1846 iounmap(hw->hw_addr);
1847 pci_release_regions(pdev);
1848 - iavf_free_all_tx_resources(adapter);
1849 - iavf_free_all_rx_resources(adapter);
1850 iavf_free_queues(adapter);
1851 kfree(adapter->vf_res);
1852 spin_lock_bh(&adapter->mac_vlan_list_lock);
1853 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1854 index d01b3a1b40f4a..7e3806fd70b21 100644
1855 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1856 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
1857 @@ -1315,7 +1315,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
1858 skb->protocol = eth_type_trans(skb, netdev);
1859
1860 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1861 - RX_DMA_VID(trxd.rxd3))
1862 + (trxd.rxd2 & RX_DMA_VTAG))
1863 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1864 RX_DMA_VID(trxd.rxd3));
1865 skb_record_rx_queue(skb, 0);
1866 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1867 index 1e787f3577aa5..1e9202b34d352 100644
1868 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1869 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
1870 @@ -293,6 +293,7 @@
1871 #define RX_DMA_LSO BIT(30)
1872 #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
1873 #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
1874 +#define RX_DMA_VTAG BIT(15)
1875
1876 /* QDMA descriptor rxd3 */
1877 #define RX_DMA_VID(_x) ((_x) & 0xfff)
1878 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1879 index 826626e870d5c..0f56f8e336917 100644
1880 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1881 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1882 @@ -351,6 +351,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
1883 plat_dat->bsp_priv = gmac;
1884 plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
1885 plat_dat->multicast_filter_bins = 0;
1886 + plat_dat->tx_fifo_size = 8192;
1887 + plat_dat->rx_fifo_size = 8192;
1888
1889 err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
1890 if (err)
1891 diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
1892 index 3b412a56f2cbe..b807134ce90c7 100644
1893 --- a/drivers/net/fddi/Kconfig
1894 +++ b/drivers/net/fddi/Kconfig
1895 @@ -40,17 +40,20 @@ config DEFXX
1896
1897 config DEFXX_MMIO
1898 bool
1899 - prompt "Use MMIO instead of PIO" if PCI || EISA
1900 + prompt "Use MMIO instead of IOP" if PCI || EISA
1901 depends on DEFXX
1902 - default n if PCI || EISA
1903 + default n if EISA
1904 default y
1905 ---help---
1906 This instructs the driver to use EISA or PCI memory-mapped I/O
1907 - (MMIO) as appropriate instead of programmed I/O ports (PIO).
1908 + (MMIO) as appropriate instead of programmed I/O ports (IOP).
1909 Enabling this gives an improvement in processing time in parts
1910 - of the driver, but it may cause problems with EISA (DEFEA)
1911 - adapters. TURBOchannel does not have the concept of I/O ports,
1912 - so MMIO is always used for these (DEFTA) adapters.
1913 + of the driver, but it requires a memory window to be configured
1914 + for EISA (DEFEA) adapters that may not always be available.
1915 + Conversely some PCIe host bridges do not support IOP, so MMIO
1916 + may be required to access PCI (DEFPA) adapters on downstream PCI
1917 + buses with some systems. TURBOchannel does not have the concept
1918 + of I/O ports, so MMIO is always used for these (DEFTA) adapters.
1919
1920 If unsure, say N.
1921
1922 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
1923 index de0d6f21c621c..075871f52bad6 100644
1924 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
1925 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
1926 @@ -450,6 +450,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1927 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1928 return -EOPNOTSUPP;
1929
1930 + /* MT76x0 GTK offloading does not work with more than one VIF */
1931 + if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1932 + return -EOPNOTSUPP;
1933 +
1934 msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
1935 wcid = msta ? &msta->wcid : &mvif->group_wcid;
1936
1937 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
1938 index 7846383c88283..3f24dbdae8d0e 100644
1939 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c
1940 +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
1941 @@ -599,8 +599,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
1942 return 0;
1943
1944 if (ev->ssid_len) {
1945 - memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
1946 - auth.ssid.ssid_len = ev->ssid_len;
1947 + int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
1948 +
1949 + memcpy(auth.ssid.ssid, ev->ssid, len);
1950 + auth.ssid.ssid_len = len;
1951 }
1952
1953 auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
1954 diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
1955 index efdce9ae36ea7..a10ee5a680129 100644
1956 --- a/drivers/net/wireless/wl3501.h
1957 +++ b/drivers/net/wireless/wl3501.h
1958 @@ -379,16 +379,7 @@ struct wl3501_get_confirm {
1959 u8 mib_value[100];
1960 };
1961
1962 -struct wl3501_join_req {
1963 - u16 next_blk;
1964 - u8 sig_id;
1965 - u8 reserved;
1966 - struct iw_mgmt_data_rset operational_rset;
1967 - u16 reserved2;
1968 - u16 timeout;
1969 - u16 probe_delay;
1970 - u8 timestamp[8];
1971 - u8 local_time[8];
1972 +struct wl3501_req {
1973 u16 beacon_period;
1974 u16 dtim_period;
1975 u16 cap_info;
1976 @@ -401,6 +392,19 @@ struct wl3501_join_req {
1977 struct iw_mgmt_data_rset bss_basic_rset;
1978 };
1979
1980 +struct wl3501_join_req {
1981 + u16 next_blk;
1982 + u8 sig_id;
1983 + u8 reserved;
1984 + struct iw_mgmt_data_rset operational_rset;
1985 + u16 reserved2;
1986 + u16 timeout;
1987 + u16 probe_delay;
1988 + u8 timestamp[8];
1989 + u8 local_time[8];
1990 + struct wl3501_req req;
1991 +};
1992 +
1993 struct wl3501_join_confirm {
1994 u16 next_blk;
1995 u8 sig_id;
1996 @@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
1997 u16 status;
1998 char timestamp[8];
1999 char localtime[8];
2000 - u16 beacon_period;
2001 - u16 dtim_period;
2002 - u16 cap_info;
2003 - u8 bss_type;
2004 - u8 bssid[ETH_ALEN];
2005 - struct iw_mgmt_essid_pset ssid;
2006 - struct iw_mgmt_ds_pset ds_pset;
2007 - struct iw_mgmt_cf_pset cf_pset;
2008 - struct iw_mgmt_ibss_pset ibss_pset;
2009 - struct iw_mgmt_data_rset bss_basic_rset;
2010 + struct wl3501_req req;
2011 u8 rssi;
2012 };
2013
2014 @@ -471,8 +466,10 @@ struct wl3501_md_req {
2015 u16 size;
2016 u8 pri;
2017 u8 service_class;
2018 - u8 daddr[ETH_ALEN];
2019 - u8 saddr[ETH_ALEN];
2020 + struct {
2021 + u8 daddr[ETH_ALEN];
2022 + u8 saddr[ETH_ALEN];
2023 + } addr;
2024 };
2025
2026 struct wl3501_md_ind {
2027 @@ -484,8 +481,10 @@ struct wl3501_md_ind {
2028 u8 reception;
2029 u8 pri;
2030 u8 service_class;
2031 - u8 daddr[ETH_ALEN];
2032 - u8 saddr[ETH_ALEN];
2033 + struct {
2034 + u8 daddr[ETH_ALEN];
2035 + u8 saddr[ETH_ALEN];
2036 + } addr;
2037 };
2038
2039 struct wl3501_md_confirm {
2040 diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
2041 index 007bf68032939..122d36439319c 100644
2042 --- a/drivers/net/wireless/wl3501_cs.c
2043 +++ b/drivers/net/wireless/wl3501_cs.c
2044 @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
2045 struct wl3501_md_req sig = {
2046 .sig_id = WL3501_SIG_MD_REQ,
2047 };
2048 + size_t sig_addr_len = sizeof(sig.addr);
2049 u8 *pdata = (char *)data;
2050 int rc = -EIO;
2051
2052 @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
2053 goto out;
2054 }
2055 rc = 0;
2056 - memcpy(&sig.daddr[0], pdata, 12);
2057 - pktlen = len - 12;
2058 - pdata += 12;
2059 + memcpy(&sig.addr, pdata, sig_addr_len);
2060 + pktlen = len - sig_addr_len;
2061 + pdata += sig_addr_len;
2062 sig.data = bf;
2063 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
2064 u8 addr4[ETH_ALEN] = {
2065 @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
2066 struct wl3501_join_req sig = {
2067 .sig_id = WL3501_SIG_JOIN_REQ,
2068 .timeout = 10,
2069 - .ds_pset = {
2070 + .req.ds_pset = {
2071 .el = {
2072 .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
2073 .len = 1,
2074 @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
2075 },
2076 };
2077
2078 - memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
2079 + memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
2080 return wl3501_esbq_exec(this, &sig, sizeof(sig));
2081 }
2082
2083 @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
2084 if (sig.status == WL3501_STATUS_SUCCESS) {
2085 pr_debug("success");
2086 if ((this->net_type == IW_MODE_INFRA &&
2087 - (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
2088 + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
2089 (this->net_type == IW_MODE_ADHOC &&
2090 - (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
2091 + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
2092 this->net_type == IW_MODE_AUTO) {
2093 if (!this->essid.el.len)
2094 matchflag = 1;
2095 else if (this->essid.el.len == 3 &&
2096 !memcmp(this->essid.essid, "ANY", 3))
2097 matchflag = 1;
2098 - else if (this->essid.el.len != sig.ssid.el.len)
2099 + else if (this->essid.el.len != sig.req.ssid.el.len)
2100 matchflag = 0;
2101 - else if (memcmp(this->essid.essid, sig.ssid.essid,
2102 + else if (memcmp(this->essid.essid, sig.req.ssid.essid,
2103 this->essid.el.len))
2104 matchflag = 0;
2105 else
2106 matchflag = 1;
2107 if (matchflag) {
2108 for (i = 0; i < this->bss_cnt; i++) {
2109 - if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
2110 + if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
2111 + sig.req.bssid)) {
2112 matchflag = 0;
2113 break;
2114 }
2115 }
2116 }
2117 if (matchflag && (i < 20)) {
2118 - memcpy(&this->bss_set[i].beacon_period,
2119 - &sig.beacon_period, 73);
2120 + memcpy(&this->bss_set[i].req,
2121 + &sig.req, sizeof(sig.req));
2122 this->bss_cnt++;
2123 this->rssi = sig.rssi;
2124 + this->bss_set[i].rssi = sig.rssi;
2125 }
2126 }
2127 } else if (sig.status == WL3501_STATUS_TIMEOUT) {
2128 @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
2129 if (this->join_sta_bss < this->bss_cnt) {
2130 const int i = this->join_sta_bss;
2131 memcpy(this->bssid,
2132 - this->bss_set[i].bssid, ETH_ALEN);
2133 - this->chan = this->bss_set[i].ds_pset.chan;
2134 + this->bss_set[i].req.bssid, ETH_ALEN);
2135 + this->chan = this->bss_set[i].req.ds_pset.chan;
2136 iw_copy_mgmt_info_element(&this->keep_essid.el,
2137 - &this->bss_set[i].ssid.el);
2138 + &this->bss_set[i].req.ssid.el);
2139 wl3501_mgmt_auth(this);
2140 }
2141 } else {
2142 const int i = this->join_sta_bss;
2143
2144 - memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
2145 - this->chan = this->bss_set[i].ds_pset.chan;
2146 + memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
2147 + this->chan = this->bss_set[i].req.ds_pset.chan;
2148 iw_copy_mgmt_info_element(&this->keep_essid.el,
2149 - &this->bss_set[i].ssid.el);
2150 + &this->bss_set[i].req.ssid.el);
2151 wl3501_online(dev);
2152 }
2153 } else {
2154 @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
2155 } else {
2156 skb->dev = dev;
2157 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
2158 - skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
2159 + skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
2160 + sizeof(sig.addr));
2161 wl3501_receive(this, skb->data, pkt_len);
2162 skb_put(skb, pkt_len);
2163 skb->protocol = eth_type_trans(skb, dev);
2164 @@ -1573,30 +1577,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
2165 for (i = 0; i < this->bss_cnt; ++i) {
2166 iwe.cmd = SIOCGIWAP;
2167 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
2168 - memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
2169 + memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
2170 current_ev = iwe_stream_add_event(info, current_ev,
2171 extra + IW_SCAN_MAX_DATA,
2172 &iwe, IW_EV_ADDR_LEN);
2173 iwe.cmd = SIOCGIWESSID;
2174 iwe.u.data.flags = 1;
2175 - iwe.u.data.length = this->bss_set[i].ssid.el.len;
2176 + iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
2177 current_ev = iwe_stream_add_point(info, current_ev,
2178 extra + IW_SCAN_MAX_DATA,
2179 &iwe,
2180 - this->bss_set[i].ssid.essid);
2181 + this->bss_set[i].req.ssid.essid);
2182 iwe.cmd = SIOCGIWMODE;
2183 - iwe.u.mode = this->bss_set[i].bss_type;
2184 + iwe.u.mode = this->bss_set[i].req.bss_type;
2185 current_ev = iwe_stream_add_event(info, current_ev,
2186 extra + IW_SCAN_MAX_DATA,
2187 &iwe, IW_EV_UINT_LEN);
2188 iwe.cmd = SIOCGIWFREQ;
2189 - iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
2190 + iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
2191 iwe.u.freq.e = 0;
2192 current_ev = iwe_stream_add_event(info, current_ev,
2193 extra + IW_SCAN_MAX_DATA,
2194 &iwe, IW_EV_FREQ_LEN);
2195 iwe.cmd = SIOCGIWENCODE;
2196 - if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
2197 + if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
2198 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
2199 else
2200 iwe.u.data.flags = IW_ENCODE_DISABLED;
2201 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2202 index 67ea531e8b34b..6041511b8b76d 100644
2203 --- a/drivers/nvme/host/core.c
2204 +++ b/drivers/nvme/host/core.c
2205 @@ -2414,7 +2414,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2206
2207 if (ctrl->ps_max_latency_us != latency) {
2208 ctrl->ps_max_latency_us = latency;
2209 - nvme_configure_apst(ctrl);
2210 + if (ctrl->state == NVME_CTRL_LIVE)
2211 + nvme_configure_apst(ctrl);
2212 }
2213 }
2214
2215 diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
2216 index a1298f6784ac9..f40d17b285c5e 100644
2217 --- a/drivers/pci/controller/pcie-iproc-msi.c
2218 +++ b/drivers/pci/controller/pcie-iproc-msi.c
2219 @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
2220 NULL, NULL);
2221 }
2222
2223 - return hwirq;
2224 + return 0;
2225 }
2226
2227 static void iproc_msi_irq_domain_free(struct irq_domain *domain,
2228 diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
2229 index 1cfe3687a2119..6dcee39b364a3 100644
2230 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
2231 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
2232 @@ -604,6 +604,7 @@ static int __init pci_epf_test_init(void)
2233
2234 ret = pci_epf_register_driver(&test_driver);
2235 if (ret) {
2236 + destroy_workqueue(kpcitest_workqueue);
2237 pr_err("Failed to register pci epf test driver --> %d\n", ret);
2238 return ret;
2239 }
2240 @@ -614,6 +615,8 @@ module_init(pci_epf_test_init);
2241
2242 static void __exit pci_epf_test_exit(void)
2243 {
2244 + if (kpcitest_workqueue)
2245 + destroy_workqueue(kpcitest_workqueue);
2246 pci_epf_unregister_driver(&test_driver);
2247 }
2248 module_exit(pci_epf_test_exit);
2249 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2250 index 8fa13486f2f15..f28213b625279 100644
2251 --- a/drivers/pci/probe.c
2252 +++ b/drivers/pci/probe.c
2253 @@ -2299,6 +2299,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2254 pci_set_of_node(dev);
2255
2256 if (pci_setup_device(dev)) {
2257 + pci_release_of_node(dev);
2258 pci_bus_put(dev->bus);
2259 kfree(dev);
2260 return NULL;
2261 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
2262 index 84501c7854734..1cf31fe2674da 100644
2263 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
2264 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
2265 @@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
2266 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
2267 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
2268 unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
2269 - unsigned long mask;
2270 + unsigned int mask;
2271 unsigned long flags;
2272
2273 spin_lock_irqsave(&bank->slock, flags);
2274 @@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
2275 struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
2276 struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
2277 unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
2278 - unsigned long mask;
2279 + unsigned int mask;
2280 unsigned long flags;
2281
2282 /*
2283 @@ -474,7 +474,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
2284 chained_irq_exit(chip, desc);
2285 }
2286
2287 -static inline void exynos_irq_demux_eint(unsigned long pend,
2288 +static inline void exynos_irq_demux_eint(unsigned int pend,
2289 struct irq_domain *domain)
2290 {
2291 unsigned int irq;
2292 @@ -491,8 +491,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
2293 {
2294 struct irq_chip *chip = irq_desc_get_chip(desc);
2295 struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
2296 - unsigned long pend;
2297 - unsigned long mask;
2298 + unsigned int pend;
2299 + unsigned int mask;
2300 int i;
2301
2302 chained_irq_enter(chip, desc);
2303 diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
2304 index d5114abcde197..0f10b3f847051 100644
2305 --- a/drivers/rpmsg/qcom_glink_native.c
2306 +++ b/drivers/rpmsg/qcom_glink_native.c
2307 @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
2308 dev_err(glink->dev,
2309 "no intent found for channel %s intent %d",
2310 channel->name, liid);
2311 + ret = -ENOENT;
2312 goto advance_rx;
2313 }
2314 }
2315 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
2316 index 1f7e8aefc1eb6..99b93f56a2d50 100644
2317 --- a/drivers/rtc/rtc-ds1307.c
2318 +++ b/drivers/rtc/rtc-ds1307.c
2319 @@ -265,7 +265,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
2320 t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
2321 tmp = regs[DS1307_REG_HOUR] & 0x3f;
2322 t->tm_hour = bcd2bin(tmp);
2323 - t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
2324 + /* rx8130 is bit position, not BCD */
2325 + if (ds1307->type == rx_8130)
2326 + t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
2327 + else
2328 + t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
2329 t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
2330 tmp = regs[DS1307_REG_MONTH] & 0x1f;
2331 t->tm_mon = bcd2bin(tmp) - 1;
2332 @@ -312,7 +316,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
2333 regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
2334 regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
2335 regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
2336 - regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
2337 + /* rx8130 is bit position, not BCD */
2338 + if (ds1307->type == rx_8130)
2339 + regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
2340 + else
2341 + regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
2342 regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
2343 regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
2344
2345 diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
2346 index 8df2075af9a27..835695bedaac1 100644
2347 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c
2348 +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
2349 @@ -316,6 +316,7 @@ static const struct of_device_id ftm_rtc_match[] = {
2350 { .compatible = "fsl,lx2160a-ftm-alarm", },
2351 { },
2352 };
2353 +MODULE_DEVICE_TABLE(of, ftm_rtc_match);
2354
2355 static struct platform_driver ftm_rtc_driver = {
2356 .probe = ftm_rtc_probe,
2357 diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
2358 index afd99f668c65d..031df45ed67b8 100644
2359 --- a/drivers/thermal/fair_share.c
2360 +++ b/drivers/thermal/fair_share.c
2361 @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
2362 int total_instance = 0;
2363 int cur_trip_level = get_trip_level(tz);
2364
2365 + mutex_lock(&tz->lock);
2366 +
2367 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
2368 if (instance->trip != trip)
2369 continue;
2370 @@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
2371 mutex_unlock(&instance->cdev->lock);
2372 thermal_cdev_update(cdev);
2373 }
2374 +
2375 + mutex_unlock(&tz->lock);
2376 return 0;
2377 }
2378
2379 diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
2380 index dc5093be553ec..68d0c181ec7bb 100644
2381 --- a/drivers/thermal/of-thermal.c
2382 +++ b/drivers/thermal/of-thermal.c
2383 @@ -712,14 +712,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
2384
2385 count = of_count_phandle_with_args(np, "cooling-device",
2386 "#cooling-cells");
2387 - if (!count) {
2388 + if (count <= 0) {
2389 pr_err("Add a cooling_device property with at least one device\n");
2390 + ret = -ENOENT;
2391 goto end;
2392 }
2393
2394 __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
2395 - if (!__tcbp)
2396 + if (!__tcbp) {
2397 + ret = -ENOMEM;
2398 goto end;
2399 + }
2400
2401 for (i = 0; i < count; i++) {
2402 ret = of_parse_phandle_with_args(np, "cooling-device",
2403 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2404 index fc1a219ad0a76..de7bb8e6a1efc 100644
2405 --- a/drivers/usb/class/cdc-wdm.c
2406 +++ b/drivers/usb/class/cdc-wdm.c
2407 @@ -321,12 +321,23 @@ exit:
2408
2409 }
2410
2411 -static void kill_urbs(struct wdm_device *desc)
2412 +static void poison_urbs(struct wdm_device *desc)
2413 {
2414 /* the order here is essential */
2415 - usb_kill_urb(desc->command);
2416 - usb_kill_urb(desc->validity);
2417 - usb_kill_urb(desc->response);
2418 + usb_poison_urb(desc->command);
2419 + usb_poison_urb(desc->validity);
2420 + usb_poison_urb(desc->response);
2421 +}
2422 +
2423 +static void unpoison_urbs(struct wdm_device *desc)
2424 +{
2425 + /*
2426 + * the order here is not essential
2427 + * it is symmetrical just to be nice
2428 + */
2429 + usb_unpoison_urb(desc->response);
2430 + usb_unpoison_urb(desc->validity);
2431 + usb_unpoison_urb(desc->command);
2432 }
2433
2434 static void free_urbs(struct wdm_device *desc)
2435 @@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
2436 if (!desc->count) {
2437 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
2438 dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
2439 - kill_urbs(desc);
2440 + poison_urbs(desc);
2441 spin_lock_irq(&desc->iuspin);
2442 desc->resp_count = 0;
2443 spin_unlock_irq(&desc->iuspin);
2444 desc->manage_power(desc->intf, 0);
2445 + unpoison_urbs(desc);
2446 } else {
2447 /* must avoid dev_printk here as desc->intf is invalid */
2448 pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
2449 @@ -1036,9 +1048,9 @@ static void wdm_disconnect(struct usb_interface *intf)
2450 wake_up_all(&desc->wait);
2451 mutex_lock(&desc->rlock);
2452 mutex_lock(&desc->wlock);
2453 + poison_urbs(desc);
2454 cancel_work_sync(&desc->rxwork);
2455 cancel_work_sync(&desc->service_outs_intr);
2456 - kill_urbs(desc);
2457 mutex_unlock(&desc->wlock);
2458 mutex_unlock(&desc->rlock);
2459
2460 @@ -1079,9 +1091,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
2461 set_bit(WDM_SUSPENDING, &desc->flags);
2462 spin_unlock_irq(&desc->iuspin);
2463 /* callback submits work - order is essential */
2464 - kill_urbs(desc);
2465 + poison_urbs(desc);
2466 cancel_work_sync(&desc->rxwork);
2467 cancel_work_sync(&desc->service_outs_intr);
2468 + unpoison_urbs(desc);
2469 }
2470 if (!PMSG_IS_AUTO(message)) {
2471 mutex_unlock(&desc->wlock);
2472 @@ -1139,7 +1152,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
2473 wake_up_all(&desc->wait);
2474 mutex_lock(&desc->rlock);
2475 mutex_lock(&desc->wlock);
2476 - kill_urbs(desc);
2477 + poison_urbs(desc);
2478 cancel_work_sync(&desc->rxwork);
2479 cancel_work_sync(&desc->service_outs_intr);
2480 return 0;
2481 @@ -1150,6 +1163,7 @@ static int wdm_post_reset(struct usb_interface *intf)
2482 struct wdm_device *desc = wdm_find_device(intf);
2483 int rv;
2484
2485 + unpoison_urbs(desc);
2486 clear_bit(WDM_OVERFLOW, &desc->flags);
2487 clear_bit(WDM_RESETTING, &desc->flags);
2488 rv = recover_from_urb_loss(desc);
2489 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2490 index cd61860cada5e..6c89d714adb62 100644
2491 --- a/drivers/usb/core/hub.c
2492 +++ b/drivers/usb/core/hub.c
2493 @@ -3574,9 +3574,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2494 * sequence.
2495 */
2496 status = hub_port_status(hub, port1, &portstatus, &portchange);
2497 -
2498 - /* TRSMRCY = 10 msec */
2499 - msleep(10);
2500 }
2501
2502 SuspendCleared:
2503 @@ -3591,6 +3588,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2504 usb_clear_port_feature(hub->hdev, port1,
2505 USB_PORT_FEAT_C_SUSPEND);
2506 }
2507 +
2508 + /* TRSMRCY = 10 msec */
2509 + msleep(10);
2510 }
2511
2512 if (udev->persist_enabled)
2513 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
2514 index d08d070a0fb6f..9bb2efc55e9d5 100644
2515 --- a/drivers/usb/dwc2/core.h
2516 +++ b/drivers/usb/dwc2/core.h
2517 @@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
2518 * @debugfs: File entry for debugfs file for this endpoint.
2519 * @dir_in: Set to true if this endpoint is of the IN direction, which
2520 * means that it is sending data to the Host.
2521 + * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
2522 * @index: The index for the endpoint registers.
2523 * @mc: Multi Count - number of transactions per microframe
2524 * @interval: Interval for periodic endpoints, in frames or microframes.
2525 @@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
2526 unsigned short fifo_index;
2527
2528 unsigned char dir_in;
2529 + unsigned char map_dir;
2530 unsigned char index;
2531 unsigned char mc;
2532 u16 interval;
2533 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2534 index e3f1f20c49221..566bc1e604af4 100644
2535 --- a/drivers/usb/dwc2/gadget.c
2536 +++ b/drivers/usb/dwc2/gadget.c
2537 @@ -421,7 +421,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
2538 {
2539 struct usb_request *req = &hs_req->req;
2540
2541 - usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
2542 + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
2543 }
2544
2545 /*
2546 @@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
2547 {
2548 int ret;
2549
2550 + hs_ep->map_dir = hs_ep->dir_in;
2551 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
2552 if (ret)
2553 goto dma_error;
2554 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
2555 index 8c3de2d258bf7..e8acad49a53a8 100644
2556 --- a/drivers/usb/dwc3/dwc3-omap.c
2557 +++ b/drivers/usb/dwc3/dwc3-omap.c
2558 @@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
2559
2560 if (extcon_get_state(edev, EXTCON_USB) == true)
2561 dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
2562 + else
2563 + dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
2564 +
2565 if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
2566 dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
2567 + else
2568 + dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
2569
2570 omap->edev = edev;
2571 }
2572 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
2573 index 58b8801ce8816..5a7c152c9ee39 100644
2574 --- a/drivers/usb/dwc3/dwc3-pci.c
2575 +++ b/drivers/usb/dwc3/dwc3-pci.c
2576 @@ -138,6 +138,7 @@ static const struct property_entry dwc3_pci_amd_properties[] = {
2577 PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
2578 PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
2579 PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
2580 + PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
2581 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
2582 {}
2583 };
2584 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2585 index af8efebfaf110..40fe856184efa 100644
2586 --- a/drivers/usb/dwc3/gadget.c
2587 +++ b/drivers/usb/dwc3/gadget.c
2588 @@ -1566,7 +1566,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
2589 }
2590 }
2591
2592 - return __dwc3_gadget_kick_transfer(dep);
2593 + __dwc3_gadget_kick_transfer(dep);
2594 +
2595 + return 0;
2596 }
2597
2598 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2599 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
2600 index 9e0c98d6bdb09..c3f74d6674e1d 100644
2601 --- a/drivers/usb/host/fotg210-hcd.c
2602 +++ b/drivers/usb/host/fotg210-hcd.c
2603 @@ -5571,7 +5571,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
2604 struct usb_hcd *hcd;
2605 struct resource *res;
2606 int irq;
2607 - int retval = -ENODEV;
2608 + int retval;
2609 struct fotg210_hcd *fotg210;
2610
2611 if (usb_disabled())
2612 @@ -5591,7 +5591,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
2613 hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
2614 dev_name(dev));
2615 if (!hcd) {
2616 - dev_err(dev, "failed to create hcd with err %d\n", retval);
2617 + dev_err(dev, "failed to create hcd\n");
2618 retval = -ENOMEM;
2619 goto fail_create_hcd;
2620 }
2621 diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
2622 index 268328c206816..2208fa6c74109 100644
2623 --- a/drivers/usb/host/xhci-ext-caps.h
2624 +++ b/drivers/usb/host/xhci-ext-caps.h
2625 @@ -7,8 +7,9 @@
2626 * Author: Sarah Sharp
2627 * Some code borrowed from the Linux EHCI driver.
2628 */
2629 -/* Up to 16 ms to halt an HC */
2630 -#define XHCI_MAX_HALT_USEC (16*1000)
2631 +
2632 +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
2633 +#define XHCI_MAX_HALT_USEC (32 * 1000)
2634 /* HC not running - set to 1 when run/stop bit is cleared. */
2635 #define XHCI_STS_HALT (1<<0)
2636
2637 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2638 index 71ef473df585f..d242779297ba7 100644
2639 --- a/drivers/usb/host/xhci-pci.c
2640 +++ b/drivers/usb/host/xhci-pci.c
2641 @@ -153,8 +153,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2642 (pdev->device == 0x15e0 || pdev->device == 0x15e1))
2643 xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
2644
2645 - if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
2646 + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
2647 xhci->quirks |= XHCI_DISABLE_SPARSE;
2648 + xhci->quirks |= XHCI_RESET_ON_RESUME;
2649 + }
2650
2651 if (pdev->vendor == PCI_VENDOR_ID_AMD)
2652 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2653 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2654 index de05ac9d3ae15..a3813c75a3de8 100644
2655 --- a/drivers/usb/host/xhci.c
2656 +++ b/drivers/usb/host/xhci.c
2657 @@ -1397,7 +1397,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2658 * we need to issue an evaluate context command and wait on it.
2659 */
2660 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
2661 - unsigned int ep_index, struct urb *urb)
2662 + unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
2663 {
2664 struct xhci_container_ctx *out_ctx;
2665 struct xhci_input_control_ctx *ctrl_ctx;
2666 @@ -1428,7 +1428,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
2667 * changes max packet sizes.
2668 */
2669
2670 - command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2671 + command = xhci_alloc_command(xhci, true, mem_flags);
2672 if (!command)
2673 return -ENOMEM;
2674
2675 @@ -1524,7 +1524,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
2676 */
2677 if (urb->dev->speed == USB_SPEED_FULL) {
2678 ret = xhci_check_maxpacket(xhci, slot_id,
2679 - ep_index, urb);
2680 + ep_index, urb, mem_flags);
2681 if (ret < 0) {
2682 xhci_urb_free_priv(urb_priv);
2683 urb->hcpriv = NULL;
2684 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
2685 index b7852a54efb3b..b40db48f8874d 100644
2686 --- a/drivers/usb/typec/tcpm/tcpm.c
2687 +++ b/drivers/usb/typec/tcpm/tcpm.c
2688 @@ -2339,10 +2339,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2689 port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
2690 pdo_pps_apdo_max_voltage(snk));
2691 port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
2692 - port->pps_data.req_out_volt = min(port->pps_data.max_volt,
2693 - max(port->pps_data.min_volt,
2694 + port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
2695 + max(port->pps_data.req_min_volt,
2696 port->pps_data.req_out_volt));
2697 - port->pps_data.req_op_curr = min(port->pps_data.max_curr,
2698 + port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
2699 port->pps_data.req_op_curr);
2700 }
2701
2702 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
2703 index e088843a7734c..baa6368bece59 100644
2704 --- a/fs/ceph/export.c
2705 +++ b/fs/ceph/export.c
2706 @@ -178,8 +178,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
2707 return ERR_CAST(inode);
2708 /* We need LINK caps to reliably check i_nlink */
2709 err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
2710 - if (err)
2711 + if (err) {
2712 + iput(inode);
2713 return ERR_PTR(err);
2714 + }
2715 /* -ESTALE if inode as been unlinked and no file is open */
2716 if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
2717 iput(inode);
2718 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
2719 index d6bbccb0ed152..d5bd990bcab8b 100644
2720 --- a/fs/dlm/debug_fs.c
2721 +++ b/fs/dlm/debug_fs.c
2722 @@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
2723
2724 if (bucket >= ls->ls_rsbtbl_size) {
2725 kfree(ri);
2726 + ++*pos;
2727 return NULL;
2728 }
2729 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
2730 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
2731 index cbd17e4ff920c..c6bd669f4b4e6 100644
2732 --- a/fs/f2fs/inline.c
2733 +++ b/fs/f2fs/inline.c
2734 @@ -216,7 +216,8 @@ out:
2735
2736 f2fs_put_page(page, 1);
2737
2738 - f2fs_balance_fs(sbi, dn.node_changed);
2739 + if (!err)
2740 + f2fs_balance_fs(sbi, dn.node_changed);
2741
2742 return err;
2743 }
2744 diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
2745 index a401ef72bc821..7944a08a3977e 100644
2746 --- a/fs/f2fs/verity.c
2747 +++ b/fs/f2fs/verity.c
2748 @@ -150,40 +150,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
2749 size_t desc_size, u64 merkle_tree_size)
2750 {
2751 struct inode *inode = file_inode(filp);
2752 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2753 u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
2754 struct fsverity_descriptor_location dloc = {
2755 .version = cpu_to_le32(1),
2756 .size = cpu_to_le32(desc_size),
2757 .pos = cpu_to_le64(desc_pos),
2758 };
2759 - int err = 0;
2760 + int err = 0, err2 = 0;
2761
2762 - if (desc != NULL) {
2763 - /* Succeeded; write the verity descriptor. */
2764 - err = pagecache_write(inode, desc, desc_size, desc_pos);
2765 + /*
2766 + * If an error already occurred (which fs/verity/ signals by passing
2767 + * desc == NULL), then only clean-up is needed.
2768 + */
2769 + if (desc == NULL)
2770 + goto cleanup;
2771
2772 - /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
2773 - if (!err)
2774 - err = filemap_write_and_wait(inode->i_mapping);
2775 - }
2776 + /* Append the verity descriptor. */
2777 + err = pagecache_write(inode, desc, desc_size, desc_pos);
2778 + if (err)
2779 + goto cleanup;
2780 +
2781 + /*
2782 + * Write all pages (both data and verity metadata). Note that this must
2783 + * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
2784 + * i_size won't be written properly. For crash consistency, this also
2785 + * must happen before the verity inode flag gets persisted.
2786 + */
2787 + err = filemap_write_and_wait(inode->i_mapping);
2788 + if (err)
2789 + goto cleanup;
2790 +
2791 + /* Set the verity xattr. */
2792 + err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
2793 + F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
2794 + NULL, XATTR_CREATE);
2795 + if (err)
2796 + goto cleanup;
2797
2798 - /* If we failed, truncate anything we wrote past i_size. */
2799 - if (desc == NULL || err)
2800 - f2fs_truncate(inode);
2801 + /* Finally, set the verity inode flag. */
2802 + file_set_verity(inode);
2803 + f2fs_set_inode_flags(inode);
2804 + f2fs_mark_inode_dirty_sync(inode, true);
2805
2806 clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
2807 + return 0;
2808
2809 - if (desc != NULL && !err) {
2810 - err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
2811 - F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
2812 - NULL, XATTR_CREATE);
2813 - if (!err) {
2814 - file_set_verity(inode);
2815 - f2fs_set_inode_flags(inode);
2816 - f2fs_mark_inode_dirty_sync(inode, true);
2817 - }
2818 +cleanup:
2819 + /*
2820 + * Verity failed to be enabled, so clean up by truncating any verity
2821 + * metadata that was written beyond i_size (both from cache and from
2822 + * disk) and clearing FI_VERITY_IN_PROGRESS.
2823 + *
2824 + * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
2825 + * from re-instantiating cached pages we are truncating (since unlike
2826 + * normal file accesses, garbage collection isn't limited by i_size).
2827 + */
2828 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2829 + truncate_inode_pages(inode->i_mapping, inode->i_size);
2830 + err2 = f2fs_truncate(inode);
2831 + if (err2) {
2832 + f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
2833 + err2);
2834 + set_sbi_flag(sbi, SBI_NEED_FSCK);
2835 }
2836 - return err;
2837 + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2838 + clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
2839 + return err ?: err2;
2840 }
2841
2842 static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
2843 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
2844 index 00015d8513829..e51b7019e8871 100644
2845 --- a/fs/fuse/cuse.c
2846 +++ b/fs/fuse/cuse.c
2847 @@ -624,6 +624,8 @@ static int __init cuse_init(void)
2848 cuse_channel_fops.owner = THIS_MODULE;
2849 cuse_channel_fops.open = cuse_channel_open;
2850 cuse_channel_fops.release = cuse_channel_release;
2851 + /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
2852 + cuse_channel_fops.unlocked_ioctl = NULL;
2853
2854 cuse_class = class_create(THIS_MODULE, "cuse");
2855 if (IS_ERR(cuse_class))
2856 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
2857 index a930ddd156819..7054a542689f9 100644
2858 --- a/fs/hfsplus/extents.c
2859 +++ b/fs/hfsplus/extents.c
2860 @@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
2861 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
2862 if (res)
2863 break;
2864 - hfs_brec_remove(&fd);
2865
2866 - mutex_unlock(&fd.tree->tree_lock);
2867 start = hip->cached_start;
2868 + if (blk_cnt <= start)
2869 + hfs_brec_remove(&fd);
2870 + mutex_unlock(&fd.tree->tree_lock);
2871 hfsplus_free_extents(sb, hip->cached_extents,
2872 alloc_cnt - start, alloc_cnt - blk_cnt);
2873 hfsplus_dump_extent(hip->cached_extents);
2874 + mutex_lock(&fd.tree->tree_lock);
2875 if (blk_cnt > start) {
2876 hip->extent_state |= HFSPLUS_EXT_DIRTY;
2877 break;
2878 @@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
2879 alloc_cnt = start;
2880 hip->cached_start = hip->cached_blocks = 0;
2881 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
2882 - mutex_lock(&fd.tree->tree_lock);
2883 }
2884 hfs_find_exit(&fd);
2885
2886 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2887 index a2e9354b9d534..50ad3522ce365 100644
2888 --- a/fs/hugetlbfs/inode.c
2889 +++ b/fs/hugetlbfs/inode.c
2890 @@ -135,6 +135,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
2891 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
2892 {
2893 struct inode *inode = file_inode(file);
2894 + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
2895 loff_t len, vma_len;
2896 int ret;
2897 struct hstate *h = hstate_file(file);
2898 @@ -150,6 +151,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
2899 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
2900 vma->vm_ops = &hugetlb_vm_ops;
2901
2902 + ret = seal_check_future_write(info->seals, vma);
2903 + if (ret)
2904 + return ret;
2905 +
2906 /*
2907 * page based offset in vm_pgoff could be sufficiently large to
2908 * overflow a loff_t when converted to byte offset. This can
2909 diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
2910 index 80867a1a94f26..5c73751adb2d3 100644
2911 --- a/fs/iomap/buffered-io.c
2912 +++ b/fs/iomap/buffered-io.c
2913 @@ -30,6 +30,7 @@ iomap_page_create(struct inode *inode, struct page *page)
2914 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
2915 atomic_set(&iop->read_count, 0);
2916 atomic_set(&iop->write_count, 0);
2917 + spin_lock_init(&iop->uptodate_lock);
2918 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
2919
2920 /*
2921 @@ -118,25 +119,38 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
2922 }
2923
2924 static void
2925 -iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
2926 +iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
2927 {
2928 struct iomap_page *iop = to_iomap_page(page);
2929 struct inode *inode = page->mapping->host;
2930 unsigned first = off >> inode->i_blkbits;
2931 unsigned last = (off + len - 1) >> inode->i_blkbits;
2932 - unsigned int i;
2933 bool uptodate = true;
2934 + unsigned long flags;
2935 + unsigned int i;
2936
2937 - if (iop) {
2938 - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
2939 - if (i >= first && i <= last)
2940 - set_bit(i, iop->uptodate);
2941 - else if (!test_bit(i, iop->uptodate))
2942 - uptodate = false;
2943 - }
2944 + spin_lock_irqsave(&iop->uptodate_lock, flags);
2945 + for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
2946 + if (i >= first && i <= last)
2947 + set_bit(i, iop->uptodate);
2948 + else if (!test_bit(i, iop->uptodate))
2949 + uptodate = false;
2950 }
2951
2952 - if (uptodate && !PageError(page))
2953 + if (uptodate)
2954 + SetPageUptodate(page);
2955 + spin_unlock_irqrestore(&iop->uptodate_lock, flags);
2956 +}
2957 +
2958 +static void
2959 +iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
2960 +{
2961 + if (PageError(page))
2962 + return;
2963 +
2964 + if (page_has_private(page))
2965 + iomap_iop_set_range_uptodate(page, off, len);
2966 + else
2967 SetPageUptodate(page);
2968 }
2969
2970 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
2971 index 1741d902b0d8f..fa1c920afb494 100644
2972 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
2973 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
2974 @@ -103,7 +103,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
2975 if (unlikely(!p))
2976 return -ENOBUFS;
2977 fh->size = be32_to_cpup(p++);
2978 - if (fh->size > sizeof(struct nfs_fh)) {
2979 + if (fh->size > NFS_MAXFHSIZE) {
2980 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
2981 fh->size);
2982 return -EOVERFLOW;
2983 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2984 index 53604cc090ca5..8c0f916380c4e 100644
2985 --- a/fs/nfs/inode.c
2986 +++ b/fs/nfs/inode.c
2987 @@ -1618,10 +1618,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
2988 */
2989 static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
2990 {
2991 - const struct nfs_inode *nfsi = NFS_I(inode);
2992 + unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
2993
2994 - return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
2995 - ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
2996 + return (long)(fattr->gencount - attr_gencount) > 0 ||
2997 + (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
2998 }
2999
3000 static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
3001 @@ -2049,7 +2049,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
3002 nfsi->attrtimeo_timestamp = now;
3003 }
3004 /* Set the barrier to be more recent than this fattr */
3005 - if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
3006 + if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
3007 nfsi->attr_gencount = fattr->gencount;
3008 }
3009
3010 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
3011 index 9b61c80a93e9e..6b7c926824ae0 100644
3012 --- a/fs/nfs/nfs42proc.c
3013 +++ b/fs/nfs/nfs42proc.c
3014 @@ -59,7 +59,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
3015 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
3016 loff_t offset, loff_t len)
3017 {
3018 - struct nfs_server *server = NFS_SERVER(file_inode(filep));
3019 + struct inode *inode = file_inode(filep);
3020 + struct nfs_server *server = NFS_SERVER(inode);
3021 struct nfs4_exception exception = { };
3022 struct nfs_lock_context *lock;
3023 int err;
3024 @@ -68,9 +69,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
3025 if (IS_ERR(lock))
3026 return PTR_ERR(lock);
3027
3028 - exception.inode = file_inode(filep);
3029 + exception.inode = inode;
3030 exception.state = lock->open_context->state;
3031
3032 + err = nfs_sync_inode(inode);
3033 + if (err)
3034 + goto out;
3035 +
3036 do {
3037 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
3038 if (err == -ENOTSUPP) {
3039 @@ -79,7 +84,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
3040 }
3041 err = nfs4_handle_exception(server, err, &exception);
3042 } while (exception.retry);
3043 -
3044 +out:
3045 nfs_put_lock_context(lock);
3046 return err;
3047 }
3048 @@ -117,16 +122,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
3049 return -EOPNOTSUPP;
3050
3051 inode_lock(inode);
3052 - err = nfs_sync_inode(inode);
3053 - if (err)
3054 - goto out_unlock;
3055
3056 err = nfs42_proc_fallocate(&msg, filep, offset, len);
3057 if (err == 0)
3058 truncate_pagecache_range(inode, offset, (offset + len) -1);
3059 if (err == -EOPNOTSUPP)
3060 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
3061 -out_unlock:
3062 +
3063 inode_unlock(inode);
3064 return err;
3065 }
3066 @@ -498,7 +500,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
3067 if (status)
3068 return status;
3069
3070 - return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
3071 + if (whence == SEEK_DATA && res.sr_eof)
3072 + return -NFS4ERR_NXIO;
3073 + else
3074 + return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
3075 }
3076
3077 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
3078 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
3079 index 7b1128398976e..89d492916deaf 100644
3080 --- a/fs/squashfs/file.c
3081 +++ b/fs/squashfs/file.c
3082 @@ -211,11 +211,11 @@ failure:
3083 * If the skip factor is limited in this way then the file will use multiple
3084 * slots.
3085 */
3086 -static inline int calculate_skip(int blocks)
3087 +static inline int calculate_skip(u64 blocks)
3088 {
3089 - int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
3090 + u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
3091 * SQUASHFS_META_INDEXES);
3092 - return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
3093 + return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
3094 }
3095
3096
3097 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
3098 index 901bda352dcb7..7b4d5face2043 100644
3099 --- a/include/linux/elevator.h
3100 +++ b/include/linux/elevator.h
3101 @@ -34,7 +34,7 @@ struct elevator_mq_ops {
3102 void (*depth_updated)(struct blk_mq_hw_ctx *);
3103
3104 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
3105 - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
3106 + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
3107 int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
3108 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
3109 void (*requests_merged)(struct request_queue *, struct request *, struct request *);
3110 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
3111 index 1361637c369dd..af2b799d7a665 100644
3112 --- a/include/linux/i2c.h
3113 +++ b/include/linux/i2c.h
3114 @@ -677,6 +677,8 @@ struct i2c_adapter_quirks {
3115 #define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
3116 #define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
3117 #define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
3118 +/* adapter cannot do repeated START */
3119 +#define I2C_AQ_NO_REP_START BIT(7)
3120
3121 /*
3122 * i2c_adapter is the structure used to identify a physical i2c bus along
3123 diff --git a/include/linux/iomap.h b/include/linux/iomap.h
3124 index 7aa5d61179361..53b16f104081b 100644
3125 --- a/include/linux/iomap.h
3126 +++ b/include/linux/iomap.h
3127 @@ -139,6 +139,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
3128 struct iomap_page {
3129 atomic_t read_count;
3130 atomic_t write_count;
3131 + spinlock_t uptodate_lock;
3132 DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
3133 };
3134
3135 diff --git a/include/linux/mm.h b/include/linux/mm.h
3136 index 703e0d72a05c7..5565d11f95429 100644
3137 --- a/include/linux/mm.h
3138 +++ b/include/linux/mm.h
3139 @@ -2925,5 +2925,37 @@ static inline int pages_identical(struct page *page1, struct page *page2)
3140 return !memcmp_pages(page1, page2);
3141 }
3142
3143 +/**
3144 + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
3145 + * @seals: the seals to check
3146 + * @vma: the vma to operate on
3147 + *
3148 + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
3149 + * the vma flags. Return 0 if check pass, or <0 for errors.
3150 + */
3151 +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3152 +{
3153 + if (seals & F_SEAL_FUTURE_WRITE) {
3154 + /*
3155 + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
3156 + * "future write" seal active.
3157 + */
3158 + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3159 + return -EPERM;
3160 +
3161 + /*
3162 + * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
3163 + * MAP_SHARED and read-only, take care to not allow mprotect to
3164 + * revert protections on such mappings. Do this only for shared
3165 + * mappings. For private mappings, don't need to mask
3166 + * VM_MAYWRITE as we still want them to be COW-writable.
3167 + */
3168 + if (vma->vm_flags & VM_SHARED)
3169 + vma->vm_flags &= ~(VM_MAYWRITE);
3170 + }
3171 +
3172 + return 0;
3173 +}
3174 +
3175 #endif /* __KERNEL__ */
3176 #endif /* _LINUX_MM_H */
3177 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3178 index 270aa8fd2800b..2b3b2fc1cb33f 100644
3179 --- a/include/linux/mm_types.h
3180 +++ b/include/linux/mm_types.h
3181 @@ -95,10 +95,10 @@ struct page {
3182 };
3183 struct { /* page_pool used by netstack */
3184 /**
3185 - * @dma_addr: might require a 64-bit value even on
3186 + * @dma_addr: might require a 64-bit value on
3187 * 32-bit architectures.
3188 */
3189 - dma_addr_t dma_addr;
3190 + unsigned long dma_addr[2];
3191 };
3192 struct { /* slab, slob and slub */
3193 union {
3194 diff --git a/include/linux/pm.h b/include/linux/pm.h
3195 index c1d21e9a864f3..eb28c802570dc 100644
3196 --- a/include/linux/pm.h
3197 +++ b/include/linux/pm.h
3198 @@ -608,6 +608,7 @@ struct dev_pm_info {
3199 unsigned int idle_notification:1;
3200 unsigned int request_pending:1;
3201 unsigned int deferred_resume:1;
3202 + unsigned int needs_force_resume:1;
3203 unsigned int runtime_auto:1;
3204 bool ignore_children:1;
3205 unsigned int no_callbacks:1;
3206 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
3207 index 1121faa99c122..cf086e13bd253 100644
3208 --- a/include/net/page_pool.h
3209 +++ b/include/net/page_pool.h
3210 @@ -185,7 +185,17 @@ static inline void page_pool_release_page(struct page_pool *pool,
3211
3212 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
3213 {
3214 - return page->dma_addr;
3215 + dma_addr_t ret = page->dma_addr[0];
3216 + if (sizeof(dma_addr_t) > sizeof(unsigned long))
3217 + ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
3218 + return ret;
3219 +}
3220 +
3221 +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
3222 +{
3223 + page->dma_addr[0] = addr;
3224 + if (sizeof(dma_addr_t) > sizeof(unsigned long))
3225 + page->dma_addr[1] = upper_32_bits(addr);
3226 }
3227
3228 static inline bool is_page_pool_compiled_in(void)
3229 diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
3230 index 1f2a708413f5d..beb2cadba8a9c 100644
3231 --- a/include/uapi/linux/netfilter/xt_SECMARK.h
3232 +++ b/include/uapi/linux/netfilter/xt_SECMARK.h
3233 @@ -20,4 +20,10 @@ struct xt_secmark_target_info {
3234 char secctx[SECMARK_SECCTX_MAX];
3235 };
3236
3237 +struct xt_secmark_target_info_v1 {
3238 + __u8 mode;
3239 + char secctx[SECMARK_SECCTX_MAX];
3240 + __u32 secid;
3241 +};
3242 +
3243 #endif /*_XT_SECMARK_H_target */
3244 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
3245 index 4e74db89bd23f..b17998fa03f12 100644
3246 --- a/kernel/kexec_file.c
3247 +++ b/kernel/kexec_file.c
3248 @@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
3249
3250 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
3251 sha_regions = vzalloc(sha_region_sz);
3252 - if (!sha_regions)
3253 + if (!sha_regions) {
3254 + ret = -ENOMEM;
3255 goto out_free_desc;
3256 + }
3257
3258 desc->tfm = tfm;
3259
3260 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3261 index 2ce61018e33b6..a3e95d7779e15 100644
3262 --- a/kernel/sched/core.c
3263 +++ b/kernel/sched/core.c
3264 @@ -820,7 +820,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
3265
3266 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
3267 {
3268 - return clamp_value / UCLAMP_BUCKET_DELTA;
3269 + return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
3270 }
3271
3272 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value)
3273 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3274 index 93ab546b6e16c..092aa5e47251a 100644
3275 --- a/kernel/sched/fair.c
3276 +++ b/kernel/sched/fair.c
3277 @@ -10146,16 +10146,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
3278 {
3279 struct cfs_rq *cfs_rq;
3280
3281 + list_add_leaf_cfs_rq(cfs_rq_of(se));
3282 +
3283 /* Start to propagate at parent */
3284 se = se->parent;
3285
3286 for_each_sched_entity(se) {
3287 cfs_rq = cfs_rq_of(se);
3288
3289 - if (cfs_rq_throttled(cfs_rq))
3290 - break;
3291 + if (!cfs_rq_throttled(cfs_rq)){
3292 + update_load_avg(cfs_rq, se, UPDATE_TG);
3293 + list_add_leaf_cfs_rq(cfs_rq);
3294 + continue;
3295 + }
3296
3297 - update_load_avg(cfs_rq, se, UPDATE_TG);
3298 + if (list_add_leaf_cfs_rq(cfs_rq))
3299 + break;
3300 }
3301 }
3302 #else
3303 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
3304 index 7998affa45d49..c87d5b6a8a55a 100644
3305 --- a/lib/kobject_uevent.c
3306 +++ b/lib/kobject_uevent.c
3307 @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
3308
3309 static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
3310 {
3311 + int buffer_size = sizeof(env->buf) - env->buflen;
3312 int len;
3313
3314 - len = strlcpy(&env->buf[env->buflen], subsystem,
3315 - sizeof(env->buf) - env->buflen);
3316 - if (len >= (sizeof(env->buf) - env->buflen)) {
3317 - WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
3318 + len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
3319 + if (len >= buffer_size) {
3320 + pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
3321 + buffer_size, len);
3322 return -ENOMEM;
3323 }
3324
3325 diff --git a/lib/nlattr.c b/lib/nlattr.c
3326 index cace9b3077810..0d84f79cb4b54 100644
3327 --- a/lib/nlattr.c
3328 +++ b/lib/nlattr.c
3329 @@ -609,7 +609,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
3330 int attrlen = nla_len(nla);
3331 int d;
3332
3333 - if (attrlen > 0 && buf[attrlen - 1] == '\0')
3334 + while (attrlen > 0 && buf[attrlen - 1] == '\0')
3335 attrlen--;
3336
3337 d = attrlen - len;
3338 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3339 index 5253c67acb1df..3b08e34a775df 100644
3340 --- a/mm/hugetlb.c
3341 +++ b/mm/hugetlb.c
3342 @@ -591,13 +591,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
3343 {
3344 struct hugepage_subpool *spool = subpool_inode(inode);
3345 long rsv_adjust;
3346 + bool reserved = false;
3347
3348 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
3349 - if (rsv_adjust) {
3350 + if (rsv_adjust > 0) {
3351 struct hstate *h = hstate_inode(inode);
3352
3353 - hugetlb_acct_memory(h, 1);
3354 + if (!hugetlb_acct_memory(h, 1))
3355 + reserved = true;
3356 + } else if (!rsv_adjust) {
3357 + reserved = true;
3358 }
3359 +
3360 + if (!reserved)
3361 + pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
3362 }
3363
3364 /*
3365 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
3366 index f0d7e6483ba32..3c2326568193c 100644
3367 --- a/mm/khugepaged.c
3368 +++ b/mm/khugepaged.c
3369 @@ -628,17 +628,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
3370 mmu_notifier_test_young(vma->vm_mm, address))
3371 referenced++;
3372 }
3373 - if (likely(writable)) {
3374 - if (likely(referenced)) {
3375 - result = SCAN_SUCCEED;
3376 - trace_mm_collapse_huge_page_isolate(page, none_or_zero,
3377 - referenced, writable, result);
3378 - return 1;
3379 - }
3380 - } else {
3381 +
3382 + if (unlikely(!writable)) {
3383 result = SCAN_PAGE_RO;
3384 + } else if (unlikely(!referenced)) {
3385 + result = SCAN_LACK_REFERENCED_PAGE;
3386 + } else {
3387 + result = SCAN_SUCCEED;
3388 + trace_mm_collapse_huge_page_isolate(page, none_or_zero,
3389 + referenced, writable, result);
3390 + return 1;
3391 }
3392 -
3393 out:
3394 release_pte_pages(pte, _pte);
3395 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
3396 diff --git a/mm/ksm.c b/mm/ksm.c
3397 index e486c54d921b9..0bbae78aaaa0a 100644
3398 --- a/mm/ksm.c
3399 +++ b/mm/ksm.c
3400 @@ -793,6 +793,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
3401 stable_node->rmap_hlist_len--;
3402
3403 put_anon_vma(rmap_item->anon_vma);
3404 + rmap_item->head = NULL;
3405 rmap_item->address &= PAGE_MASK;
3406
3407 } else if (rmap_item->address & UNSTABLE_FLAG) {
3408 diff --git a/mm/migrate.c b/mm/migrate.c
3409 index c4c313e47f123..00bbe57c1ce22 100644
3410 --- a/mm/migrate.c
3411 +++ b/mm/migrate.c
3412 @@ -2771,6 +2771,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
3413
3414 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
3415 entry = swp_entry_to_pte(swp_entry);
3416 + } else {
3417 + /*
3418 + * For now we only support migrating to un-addressable
3419 + * device memory.
3420 + */
3421 + pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
3422 + goto abort;
3423 }
3424 } else {
3425 entry = mk_pte(page, vma->vm_page_prot);
3426 diff --git a/mm/shmem.c b/mm/shmem.c
3427 index 98802ca76a5c3..b119c44435bff 100644
3428 --- a/mm/shmem.c
3429 +++ b/mm/shmem.c
3430 @@ -2208,25 +2208,11 @@ out_nomem:
3431 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
3432 {
3433 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
3434 + int ret;
3435
3436 - if (info->seals & F_SEAL_FUTURE_WRITE) {
3437 - /*
3438 - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
3439 - * "future write" seal active.
3440 - */
3441 - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3442 - return -EPERM;
3443 -
3444 - /*
3445 - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
3446 - * MAP_SHARED and read-only, take care to not allow mprotect to
3447 - * revert protections on such mappings. Do this only for shared
3448 - * mappings. For private mappings, don't need to mask
3449 - * VM_MAYWRITE as we still want them to be COW-writable.
3450 - */
3451 - if (vma->vm_flags & VM_SHARED)
3452 - vma->vm_flags &= ~(VM_MAYWRITE);
3453 - }
3454 + ret = seal_check_future_write(info->seals, vma);
3455 + if (ret)
3456 + return ret;
3457
3458 file_accessed(file);
3459 vma->vm_ops = &shmem_vm_ops;
3460 @@ -2327,8 +2313,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
3461 pgoff_t offset, max_off;
3462
3463 ret = -ENOMEM;
3464 - if (!shmem_inode_acct_block(inode, 1))
3465 + if (!shmem_inode_acct_block(inode, 1)) {
3466 + /*
3467 + * We may have got a page, returned -ENOENT triggering a retry,
3468 + * and now we find ourselves with -ENOMEM. Release the page, to
3469 + * avoid a BUG_ON in our caller.
3470 + */
3471 + if (unlikely(*pagep)) {
3472 + put_page(*pagep);
3473 + *pagep = NULL;
3474 + }
3475 goto out;
3476 + }
3477
3478 if (!*pagep) {
3479 page = shmem_alloc_page(gfp, info, pgoff);
3480 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3481 index 3499bace25eca..959a16b133033 100644
3482 --- a/net/bluetooth/l2cap_core.c
3483 +++ b/net/bluetooth/l2cap_core.c
3484 @@ -450,6 +450,8 @@ struct l2cap_chan *l2cap_chan_create(void)
3485 if (!chan)
3486 return NULL;
3487
3488 + skb_queue_head_init(&chan->tx_q);
3489 + skb_queue_head_init(&chan->srej_q);
3490 mutex_init(&chan->lock);
3491
3492 /* Set default lock nesting level */
3493 @@ -515,7 +517,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
3494 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
3495 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
3496 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3497 +
3498 chan->conf_state = 0;
3499 + set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
3500
3501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
3502 }
3503 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
3504 index 8648c5211ebe6..e693fee08623c 100644
3505 --- a/net/bluetooth/l2cap_sock.c
3506 +++ b/net/bluetooth/l2cap_sock.c
3507 @@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
3508 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3509 struct sockaddr_l2 la;
3510 int len, err = 0;
3511 + bool zapped;
3512
3513 BT_DBG("sk %p", sk);
3514
3515 + lock_sock(sk);
3516 + zapped = sock_flag(sk, SOCK_ZAPPED);
3517 + release_sock(sk);
3518 +
3519 + if (zapped)
3520 + return -EINVAL;
3521 +
3522 if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
3523 addr->sa_family != AF_BLUETOOTH)
3524 return -EINVAL;
3525 diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
3526 index b18cdf03edb35..c4e0f4777df59 100644
3527 --- a/net/bridge/br_arp_nd_proxy.c
3528 +++ b/net/bridge/br_arp_nd_proxy.c
3529 @@ -155,7 +155,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
3530 if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
3531 if (p && (p->flags & BR_NEIGH_SUPPRESS))
3532 return;
3533 - if (ipv4_is_zeronet(sip) || sip == tip) {
3534 + if (parp->ar_op != htons(ARPOP_RREQUEST) &&
3535 + parp->ar_op != htons(ARPOP_RREPLY) &&
3536 + (ipv4_is_zeronet(sip) || sip == tip)) {
3537 /* prevent flooding to neigh suppress ports */
3538 BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
3539 return;
3540 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
3541 index cd9bc67381b22..76506975d59a5 100644
3542 --- a/net/core/ethtool.c
3543 +++ b/net/core/ethtool.c
3544 @@ -589,7 +589,7 @@ store_link_ksettings_for_user(void __user *to,
3545 {
3546 struct ethtool_link_usettings link_usettings;
3547
3548 - memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
3549 + memcpy(&link_usettings, from, sizeof(link_usettings));
3550 bitmap_to_arr32(link_usettings.link_modes.supported,
3551 from->link_modes.supported,
3552 __ETHTOOL_LINK_MODE_MASK_NBITS);
3553 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
3554 index da86c0e1b677d..96957a7c732fa 100644
3555 --- a/net/core/flow_dissector.c
3556 +++ b/net/core/flow_dissector.c
3557 @@ -811,8 +811,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
3558 key_addrs = skb_flow_dissector_target(flow_dissector,
3559 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
3560 target_container);
3561 - memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
3562 - sizeof(key_addrs->v6addrs));
3563 + memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
3564 + sizeof(key_addrs->v6addrs.src));
3565 + memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
3566 + sizeof(key_addrs->v6addrs.dst));
3567 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
3568 }
3569
3570 diff --git a/net/core/page_pool.c b/net/core/page_pool.c
3571 index dfc2501c35d9c..335f68eaaa05c 100644
3572 --- a/net/core/page_pool.c
3573 +++ b/net/core/page_pool.c
3574 @@ -157,7 +157,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
3575 put_page(page);
3576 return NULL;
3577 }
3578 - page->dma_addr = dma;
3579 + page_pool_set_dma_addr(page, dma);
3580
3581 skip_dma_map:
3582 /* Track how many pages are held 'in-flight' */
3583 @@ -216,12 +216,12 @@ static void __page_pool_clean_page(struct page_pool *pool,
3584 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
3585 goto skip_dma_unmap;
3586
3587 - dma = page->dma_addr;
3588 + dma = page_pool_get_dma_addr(page);
3589 /* DMA unmap */
3590 dma_unmap_page_attrs(pool->p.dev, dma,
3591 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
3592 DMA_ATTR_SKIP_CPU_SYNC);
3593 - page->dma_addr = 0;
3594 + page_pool_set_dma_addr(page, 0);
3595 skip_dma_unmap:
3596 /* This may be the last page returned, releasing the pool, so
3597 * it is not safe to reference pool afterwards.
3598 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
3599 index cc6180e08a4fc..01ddb0f70c578 100644
3600 --- a/net/ipv6/ip6_vti.c
3601 +++ b/net/ipv6/ip6_vti.c
3602 @@ -192,7 +192,6 @@ static int vti6_tnl_create2(struct net_device *dev)
3603
3604 strcpy(t->parms.name, dev->name);
3605
3606 - dev_hold(dev);
3607 vti6_tnl_link(ip6n, t);
3608
3609 return 0;
3610 @@ -921,6 +920,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
3611 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3612 if (!dev->tstats)
3613 return -ENOMEM;
3614 + dev_hold(dev);
3615 return 0;
3616 }
3617
3618 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3619 index 17a3a1c938beb..44fd922cc32af 100644
3620 --- a/net/mac80211/mlme.c
3621 +++ b/net/mac80211/mlme.c
3622 @@ -1215,6 +1215,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
3623
3624 sdata->vif.csa_active = false;
3625 ifmgd->csa_waiting_bcn = false;
3626 + /*
3627 + * If the CSA IE is still present on the beacon after the switch,
3628 + * we need to consider it as a new CSA (possibly to self).
3629 + */
3630 + ifmgd->beacon_crc_valid = false;
3631
3632 ret = drv_post_channel_switch(sdata);
3633 if (ret) {
3634 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
3635 index 1a69825401263..a3faeacaa1cbb 100644
3636 --- a/net/netfilter/nf_conntrack_standalone.c
3637 +++ b/net/netfilter/nf_conntrack_standalone.c
3638 @@ -1071,8 +1071,11 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
3639 #endif
3640 }
3641
3642 - if (!net_eq(&init_net, net))
3643 + if (!net_eq(&init_net, net)) {
3644 + table[NF_SYSCTL_CT_MAX].mode = 0444;
3645 + table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
3646 table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
3647 + }
3648
3649 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
3650 if (!net->ct.sysctl_header)
3651 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
3652 index 916a3c7f9eafe..79fbf37291f38 100644
3653 --- a/net/netfilter/nfnetlink_osf.c
3654 +++ b/net/netfilter/nfnetlink_osf.c
3655 @@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
3656
3657 ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
3658 sizeof(struct tcphdr), ctx->optsize, opts);
3659 + if (!ctx->optp)
3660 + return NULL;
3661 }
3662
3663 return tcp;
3664 diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
3665 index b331a3c9a3a84..9de0eb20e9544 100644
3666 --- a/net/netfilter/nft_set_hash.c
3667 +++ b/net/netfilter/nft_set_hash.c
3668 @@ -393,9 +393,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
3669 (void *)set);
3670 }
3671
3672 +/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
3673 +#define NFT_MAX_BUCKETS (1U << 31)
3674 +
3675 static u32 nft_hash_buckets(u32 size)
3676 {
3677 - return roundup_pow_of_two(size * 4 / 3);
3678 + u64 val = div_u64((u64)size * 4, 3);
3679 +
3680 + if (val >= NFT_MAX_BUCKETS)
3681 + return NFT_MAX_BUCKETS;
3682 +
3683 + return roundup_pow_of_two(val);
3684 }
3685
3686 static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
3687 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
3688 index 2317721f3ecb1..ea7aeea19b3bb 100644
3689 --- a/net/netfilter/xt_SECMARK.c
3690 +++ b/net/netfilter/xt_SECMARK.c
3691 @@ -26,10 +26,9 @@ MODULE_ALIAS("ip6t_SECMARK");
3692 static u8 mode;
3693
3694 static unsigned int
3695 -secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
3696 +secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
3697 {
3698 u32 secmark = 0;
3699 - const struct xt_secmark_target_info *info = par->targinfo;
3700
3701 switch (mode) {
3702 case SECMARK_MODE_SEL:
3703 @@ -43,7 +42,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
3704 return XT_CONTINUE;
3705 }
3706
3707 -static int checkentry_lsm(struct xt_secmark_target_info *info)
3708 +static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
3709 {
3710 int err;
3711
3712 @@ -75,15 +74,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
3713 return 0;
3714 }
3715
3716 -static int secmark_tg_check(const struct xt_tgchk_param *par)
3717 +static int
3718 +secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
3719 {
3720 - struct xt_secmark_target_info *info = par->targinfo;
3721 int err;
3722
3723 - if (strcmp(par->table, "mangle") != 0 &&
3724 - strcmp(par->table, "security") != 0) {
3725 + if (strcmp(table, "mangle") != 0 &&
3726 + strcmp(table, "security") != 0) {
3727 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
3728 - par->table);
3729 + table);
3730 return -EINVAL;
3731 }
3732
3733 @@ -118,25 +117,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
3734 }
3735 }
3736
3737 -static struct xt_target secmark_tg_reg __read_mostly = {
3738 - .name = "SECMARK",
3739 - .revision = 0,
3740 - .family = NFPROTO_UNSPEC,
3741 - .checkentry = secmark_tg_check,
3742 - .destroy = secmark_tg_destroy,
3743 - .target = secmark_tg,
3744 - .targetsize = sizeof(struct xt_secmark_target_info),
3745 - .me = THIS_MODULE,
3746 +static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
3747 +{
3748 + struct xt_secmark_target_info *info = par->targinfo;
3749 + struct xt_secmark_target_info_v1 newinfo = {
3750 + .mode = info->mode,
3751 + };
3752 + int ret;
3753 +
3754 + memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
3755 +
3756 + ret = secmark_tg_check(par->table, &newinfo);
3757 + info->secid = newinfo.secid;
3758 +
3759 + return ret;
3760 +}
3761 +
3762 +static unsigned int
3763 +secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
3764 +{
3765 + const struct xt_secmark_target_info *info = par->targinfo;
3766 + struct xt_secmark_target_info_v1 newinfo = {
3767 + .secid = info->secid,
3768 + };
3769 +
3770 + return secmark_tg(skb, &newinfo);
3771 +}
3772 +
3773 +static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
3774 +{
3775 + return secmark_tg_check(par->table, par->targinfo);
3776 +}
3777 +
3778 +static unsigned int
3779 +secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
3780 +{
3781 + return secmark_tg(skb, par->targinfo);
3782 +}
3783 +
3784 +static struct xt_target secmark_tg_reg[] __read_mostly = {
3785 + {
3786 + .name = "SECMARK",
3787 + .revision = 0,
3788 + .family = NFPROTO_UNSPEC,
3789 + .checkentry = secmark_tg_check_v0,
3790 + .destroy = secmark_tg_destroy,
3791 + .target = secmark_tg_v0,
3792 + .targetsize = sizeof(struct xt_secmark_target_info),
3793 + .me = THIS_MODULE,
3794 + },
3795 + {
3796 + .name = "SECMARK",
3797 + .revision = 1,
3798 + .family = NFPROTO_UNSPEC,
3799 + .checkentry = secmark_tg_check_v1,
3800 + .destroy = secmark_tg_destroy,
3801 + .target = secmark_tg_v1,
3802 + .targetsize = sizeof(struct xt_secmark_target_info_v1),
3803 + .usersize = offsetof(struct xt_secmark_target_info_v1, secid),
3804 + .me = THIS_MODULE,
3805 + },
3806 };
3807
3808 static int __init secmark_tg_init(void)
3809 {
3810 - return xt_register_target(&secmark_tg_reg);
3811 + return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
3812 }
3813
3814 static void __exit secmark_tg_exit(void)
3815 {
3816 - xt_unregister_target(&secmark_tg_reg);
3817 + xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
3818 }
3819
3820 module_init(secmark_tg_init);
3821 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
3822 index 09116be995113..a4de4853c79de 100644
3823 --- a/net/sched/sch_taprio.c
3824 +++ b/net/sched/sch_taprio.c
3825 @@ -900,6 +900,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
3826
3827 list_for_each_entry(entry, &new->entries, list)
3828 cycle = ktime_add_ns(cycle, entry->interval);
3829 +
3830 + if (!cycle) {
3831 + NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
3832 + return -EINVAL;
3833 + }
3834 +
3835 new->cycle_time = cycle;
3836 }
3837
3838 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3839 index d5eda966a706a..4ffb9116b6f27 100644
3840 --- a/net/sctp/sm_make_chunk.c
3841 +++ b/net/sctp/sm_make_chunk.c
3842 @@ -3134,7 +3134,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3843 * primary.
3844 */
3845 if (af->is_any(&addr))
3846 - memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
3847 + memcpy(&addr, sctp_source(asconf), sizeof(addr));
3848
3849 if (security_sctp_bind_connect(asoc->ep->base.sk,
3850 SCTP_PARAM_SET_PRIMARY,
3851 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3852 index 84138a07e936d..82a202d71a31e 100644
3853 --- a/net/sctp/sm_statefuns.c
3854 +++ b/net/sctp/sm_statefuns.c
3855 @@ -1841,20 +1841,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
3856 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3857 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
3858
3859 - repl = sctp_make_cookie_ack(new_asoc, chunk);
3860 + /* Update the content of current association. */
3861 + if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
3862 + struct sctp_chunk *abort;
3863 +
3864 + abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
3865 + if (abort) {
3866 + sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
3867 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3868 + }
3869 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
3870 + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3871 + SCTP_PERR(SCTP_ERROR_RSRC_LOW));
3872 + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
3873 + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
3874 + goto nomem;
3875 + }
3876 +
3877 + repl = sctp_make_cookie_ack(asoc, chunk);
3878 if (!repl)
3879 goto nomem;
3880
3881 /* Report association restart to upper layer. */
3882 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
3883 - new_asoc->c.sinit_num_ostreams,
3884 - new_asoc->c.sinit_max_instreams,
3885 + asoc->c.sinit_num_ostreams,
3886 + asoc->c.sinit_max_instreams,
3887 NULL, GFP_ATOMIC);
3888 if (!ev)
3889 goto nomem_ev;
3890
3891 - /* Update the content of current association. */
3892 - sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
3893 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
3894 if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
3895 sctp_state(asoc, SHUTDOWN_SENT)) &&
3896 @@ -1918,7 +1933,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
3897 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
3898 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
3899 SCTP_STATE(SCTP_STATE_ESTABLISHED));
3900 - SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
3901 + if (asoc->state < SCTP_STATE_ESTABLISHED)
3902 + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
3903 sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
3904
3905 repl = sctp_make_cookie_ack(new_asoc, chunk);
3906 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
3907 index dc09a72f81101..51986f7ead819 100644
3908 --- a/net/smc/af_smc.c
3909 +++ b/net/smc/af_smc.c
3910 @@ -1709,6 +1709,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
3911 struct smc_sock *smc;
3912 int val, rc;
3913
3914 + if (level == SOL_TCP && optname == TCP_ULP)
3915 + return -EOPNOTSUPP;
3916 +
3917 smc = smc_sk(sk);
3918
3919 /* generic setsockopts reaching us here always apply to the
3920 @@ -1730,7 +1733,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
3921 if (rc || smc->use_fallback)
3922 goto out;
3923 switch (optname) {
3924 - case TCP_ULP:
3925 case TCP_FASTOPEN:
3926 case TCP_FASTOPEN_CONNECT:
3927 case TCP_FASTOPEN_KEY:
3928 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3929 index f1088ca39d44c..b6039642df67e 100644
3930 --- a/net/sunrpc/clnt.c
3931 +++ b/net/sunrpc/clnt.c
3932 @@ -2505,12 +2505,6 @@ call_decode(struct rpc_task *task)
3933 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
3934 }
3935
3936 - /*
3937 - * Ensure that we see all writes made by xprt_complete_rqst()
3938 - * before it changed req->rq_reply_bytes_recvd.
3939 - */
3940 - smp_rmb();
3941 -
3942 /*
3943 * Did we ever call xprt_complete_rqst()? If not, we should assume
3944 * the message is incomplete.
3945 @@ -2519,6 +2513,11 @@ call_decode(struct rpc_task *task)
3946 if (!req->rq_reply_bytes_recvd)
3947 goto out;
3948
3949 + /* Ensure that we see all writes made by xprt_complete_rqst()
3950 + * before it changed req->rq_reply_bytes_recvd.
3951 + */
3952 + smp_rmb();
3953 +
3954 req->rq_rcv_buf.len = req->rq_private_buf.len;
3955
3956 /* Check that the softirq receive buffer is valid */
3957 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3958 index 11be9a84f8de9..561ea834f7327 100644
3959 --- a/net/tipc/netlink_compat.c
3960 +++ b/net/tipc/netlink_compat.c
3961 @@ -673,7 +673,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
3962 if (err)
3963 return err;
3964
3965 - link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
3966 + link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
3967 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
3968 nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
3969 TIPC_MAX_LINK_NAME);
3970 diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
3971 index 107da148820fc..9c74b45c5720f 100644
3972 --- a/samples/bpf/tracex1_kern.c
3973 +++ b/samples/bpf/tracex1_kern.c
3974 @@ -20,7 +20,7 @@
3975 SEC("kprobe/__netif_receive_skb_core")
3976 int bpf_prog1(struct pt_regs *ctx)
3977 {
3978 - /* attaches to kprobe netif_receive_skb,
3979 + /* attaches to kprobe __netif_receive_skb_core,
3980 * looks for packets on loobpack device and prints them
3981 */
3982 char devname[IFNAMSIZ];
3983 @@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx)
3984 int len;
3985
3986 /* non-portable! works for the given kernel only */
3987 - skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
3988 + bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
3989 dev = _(skb->dev);
3990 len = _(skb->len);
3991
3992 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
3993 index b7c1ef757178a..331b2cc917ec2 100644
3994 --- a/scripts/kconfig/nconf.c
3995 +++ b/scripts/kconfig/nconf.c
3996 @@ -503,8 +503,8 @@ static int get_mext_match(const char *match_str, match_f flag)
3997 else if (flag == FIND_NEXT_MATCH_UP)
3998 --match_start;
3999
4000 + match_start = (match_start + items_num) % items_num;
4001 index = match_start;
4002 - index = (index + items_num) % items_num;
4003 while (true) {
4004 char *str = k_menu_items[index].str;
4005 if (strcasestr(str, match_str) != NULL)
4006 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
4007 index ce07ea0d4e71d..3935e90c8e8ff 100644
4008 --- a/sound/firewire/bebob/bebob_stream.c
4009 +++ b/sound/firewire/bebob/bebob_stream.c
4010 @@ -534,20 +534,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
4011 static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
4012 unsigned int rate, unsigned int index)
4013 {
4014 - struct snd_bebob_stream_formation *formation;
4015 + unsigned int pcm_channels;
4016 + unsigned int midi_ports;
4017 struct cmp_connection *conn;
4018 int err;
4019
4020 if (stream == &bebob->tx_stream) {
4021 - formation = bebob->tx_stream_formations + index;
4022 + pcm_channels = bebob->tx_stream_formations[index].pcm;
4023 + midi_ports = bebob->midi_input_ports;
4024 conn = &bebob->out_conn;
4025 } else {
4026 - formation = bebob->rx_stream_formations + index;
4027 + pcm_channels = bebob->rx_stream_formations[index].pcm;
4028 + midi_ports = bebob->midi_output_ports;
4029 conn = &bebob->in_conn;
4030 }
4031
4032 - err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
4033 - formation->midi, false);
4034 + err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
4035 if (err < 0)
4036 return err;
4037
4038 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4039 index ce38b5d4670da..f620b402b309f 100644
4040 --- a/sound/pci/hda/patch_hdmi.c
4041 +++ b/sound/pci/hda/patch_hdmi.c
4042 @@ -2567,7 +2567,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
4043 /* skip notification during system suspend (but not in runtime PM);
4044 * the state will be updated at resume
4045 */
4046 - if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
4047 + if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
4048 return;
4049 /* ditto during suspend/resume process itself */
4050 if (snd_hdac_is_in_pm(&codec->core))
4051 @@ -2772,7 +2772,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
4052 /* skip notification during system suspend (but not in runtime PM);
4053 * the state will be updated at resume
4054 */
4055 - if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
4056 + if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
4057 return;
4058 /* ditto during suspend/resume process itself */
4059 if (snd_hdac_is_in_pm(&codec->core))
4060 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
4061 index 5cbdc9be9c7e7..c7b3e76ea2d26 100644
4062 --- a/sound/pci/rme9652/hdsp.c
4063 +++ b/sound/pci/rme9652/hdsp.c
4064 @@ -5326,7 +5326,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
4065 if (hdsp->port)
4066 pci_release_regions(hdsp->pci);
4067
4068 - pci_disable_device(hdsp->pci);
4069 + if (pci_is_enabled(hdsp->pci))
4070 + pci_disable_device(hdsp->pci);
4071 return 0;
4072 }
4073
4074 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
4075 index 81a6f4b2bd3c5..e34f07c9ff470 100644
4076 --- a/sound/pci/rme9652/hdspm.c
4077 +++ b/sound/pci/rme9652/hdspm.c
4078 @@ -6889,7 +6889,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
4079 if (hdspm->port)
4080 pci_release_regions(hdspm->pci);
4081
4082 - pci_disable_device(hdspm->pci);
4083 + if (pci_is_enabled(hdspm->pci))
4084 + pci_disable_device(hdspm->pci);
4085 return 0;
4086 }
4087
4088 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
4089 index 4c851f8dcaf88..73ad6e74aac9f 100644
4090 --- a/sound/pci/rme9652/rme9652.c
4091 +++ b/sound/pci/rme9652/rme9652.c
4092 @@ -1745,7 +1745,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
4093 if (rme9652->port)
4094 pci_release_regions(rme9652->pci);
4095
4096 - pci_disable_device(rme9652->pci);
4097 + if (pci_is_enabled(rme9652->pci))
4098 + pci_disable_device(rme9652->pci);
4099 return 0;
4100 }
4101
4102 diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
4103 index 9593a9a27bf85..d8ab8af2c7869 100644
4104 --- a/sound/soc/codecs/rt286.c
4105 +++ b/sound/soc/codecs/rt286.c
4106 @@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
4107 case RT286_PROC_COEF:
4108 case RT286_SET_AMP_GAIN_ADC_IN1:
4109 case RT286_SET_AMP_GAIN_ADC_IN2:
4110 + case RT286_SET_GPIO_MASK:
4111 + case RT286_SET_GPIO_DIRECTION:
4112 + case RT286_SET_GPIO_DATA:
4113 case RT286_SET_POWER(RT286_DAC_OUT1):
4114 case RT286_SET_POWER(RT286_DAC_OUT2):
4115 case RT286_SET_POWER(RT286_ADC_IN1):
4116 @@ -1115,12 +1118,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
4117 { }
4118 };
4119
4120 -static const struct dmi_system_id dmi_dell_dino[] = {
4121 +static const struct dmi_system_id dmi_dell[] = {
4122 {
4123 - .ident = "Dell Dino",
4124 + .ident = "Dell",
4125 .matches = {
4126 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4127 - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
4128 }
4129 },
4130 { }
4131 @@ -1131,7 +1133,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
4132 {
4133 struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
4134 struct rt286_priv *rt286;
4135 - int i, ret, val;
4136 + int i, ret, vendor_id;
4137
4138 rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
4139 GFP_KERNEL);
4140 @@ -1147,14 +1149,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
4141 }
4142
4143 ret = regmap_read(rt286->regmap,
4144 - RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
4145 + RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
4146 if (ret != 0) {
4147 dev_err(&i2c->dev, "I2C error %d\n", ret);
4148 return ret;
4149 }
4150 - if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
4151 + if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
4152 dev_err(&i2c->dev,
4153 - "Device with ID register %#x is not rt286\n", val);
4154 + "Device with ID register %#x is not rt286\n",
4155 + vendor_id);
4156 return -ENODEV;
4157 }
4158
4159 @@ -1178,8 +1181,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
4160 if (pdata)
4161 rt286->pdata = *pdata;
4162
4163 - if (dmi_check_system(force_combo_jack_table) ||
4164 - dmi_check_system(dmi_dell_dino))
4165 + if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
4166 + dmi_check_system(force_combo_jack_table))
4167 rt286->pdata.cbj_en = true;
4168
4169 regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
4170 @@ -1218,7 +1221,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
4171 regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
4172 regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
4173
4174 - if (dmi_check_system(dmi_dell_dino)) {
4175 + if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
4176 regmap_update_bits(rt286->regmap,
4177 RT286_SET_GPIO_MASK, 0x40, 0x40);
4178 regmap_update_bits(rt286->regmap,
4179 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
4180 index cfd3077174731..46a81d4f0b2de 100644
4181 --- a/sound/soc/intel/boards/bytcr_rt5640.c
4182 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
4183 @@ -476,6 +476,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
4184 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
4185 },
4186 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
4187 + BYT_RT5640_JD_SRC_JD2_IN4N |
4188 + BYT_RT5640_OVCD_TH_2000UA |
4189 + BYT_RT5640_OVCD_SF_0P75 |
4190 BYT_RT5640_MONO_SPEAKER |
4191 BYT_RT5640_DIFF_MIC |
4192 BYT_RT5640_SSP0_AIF2 |
4193 @@ -509,6 +512,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
4194 BYT_RT5640_SSP0_AIF1 |
4195 BYT_RT5640_MCLK_EN),
4196 },
4197 + {
4198 + /* Chuwi Hi8 (CWI509) */
4199 + .matches = {
4200 + DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
4201 + DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
4202 + DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
4203 + DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
4204 + },
4205 + .driver_data = (void *)(BYT_RT5640_IN1_MAP |
4206 + BYT_RT5640_JD_SRC_JD2_IN4N |
4207 + BYT_RT5640_OVCD_TH_2000UA |
4208 + BYT_RT5640_OVCD_SF_0P75 |
4209 + BYT_RT5640_MONO_SPEAKER |
4210 + BYT_RT5640_DIFF_MIC |
4211 + BYT_RT5640_SSP0_AIF1 |
4212 + BYT_RT5640_MCLK_EN),
4213 + },
4214 {
4215 .matches = {
4216 DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
4217 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
4218 index a6c1cf987e6e6..df8d7b53b7600 100644
4219 --- a/sound/soc/sh/rcar/core.c
4220 +++ b/sound/soc/sh/rcar/core.c
4221 @@ -1426,8 +1426,75 @@ static int rsnd_hw_params(struct snd_pcm_substream *substream,
4222 }
4223 if (io->converted_chan)
4224 dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
4225 - if (io->converted_rate)
4226 + if (io->converted_rate) {
4227 + /*
4228 + * SRC supports convert rates from params_rate(hw_params)/k_down
4229 + * to params_rate(hw_params)*k_up, where k_up is always 6, and
4230 + * k_down depends on number of channels and SRC unit.
4231 + * So all SRC units can upsample audio up to 6 times regardless
4232 + * its number of channels. And all SRC units can downsample
4233 + * 2 channel audio up to 6 times too.
4234 + */
4235 + int k_up = 6;
4236 + int k_down = 6;
4237 + int channel;
4238 + struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
4239 +
4240 dev_dbg(dev, "convert rate = %d\n", io->converted_rate);
4241 +
4242 + channel = io->converted_chan ? io->converted_chan :
4243 + params_channels(hw_params);
4244 +
4245 + switch (rsnd_mod_id(src_mod)) {
4246 + /*
4247 + * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
4248 + * SRC1, SRC3 and SRC4 can downsample 4 channel audio
4249 + * up to 4 times.
4250 + * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
4251 + * no more than twice.
4252 + */
4253 + case 1:
4254 + case 3:
4255 + case 4:
4256 + if (channel > 4) {
4257 + k_down = 2;
4258 + break;
4259 + }
4260 + fallthrough;
4261 + case 0:
4262 + if (channel > 2)
4263 + k_down = 4;
4264 + break;
4265 +
4266 + /* Other SRC units do not support more than 2 channels */
4267 + default:
4268 + if (channel > 2)
4269 + return -EINVAL;
4270 + }
4271 +
4272 + if (params_rate(hw_params) > io->converted_rate * k_down) {
4273 + hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
4274 + io->converted_rate * k_down;
4275 + hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
4276 + io->converted_rate * k_down;
4277 + hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
4278 + } else if (params_rate(hw_params) * k_up < io->converted_rate) {
4279 + hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
4280 + (io->converted_rate + k_up - 1) / k_up;
4281 + hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
4282 + (io->converted_rate + k_up - 1) / k_up;
4283 + hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
4284 + }
4285 +
4286 + /*
4287 + * TBD: Max SRC input and output rates also depend on number
4288 + * of channels and SRC unit:
4289 + * SRC1, SRC3 and SRC4 do not support more than 128kHz
4290 + * for 6 channel and 96kHz for 8 channel audio.
4291 + * Perhaps this function should return EINVAL if the input or
4292 + * the output rate exceeds the limitation.
4293 + */
4294 + }
4295 }
4296
4297 ret = rsnd_dai_call(hw_params, io, substream, hw_params);
4298 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
4299 index 47d5ddb526f21..09af402ca31f3 100644
4300 --- a/sound/soc/sh/rcar/ssi.c
4301 +++ b/sound/soc/sh/rcar/ssi.c
4302 @@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
4303 struct rsnd_priv *priv)
4304 {
4305 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
4306 + int ret;
4307
4308 if (!rsnd_ssi_is_run_mods(mod, io))
4309 return 0;
4310
4311 + ret = rsnd_ssi_master_clk_start(mod, io);
4312 + if (ret < 0)
4313 + return ret;
4314 +
4315 ssi->usrcnt++;
4316
4317 rsnd_mod_power_on(mod);
4318 @@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
4319 SSI_SYS_STATUS(i * 2),
4320 0xf << (id * 4));
4321 stop = true;
4322 - break;
4323 }
4324 }
4325 break;
4326 @@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
4327 SSI_SYS_STATUS((i * 2) + 1),
4328 0xf << 4);
4329 stop = true;
4330 - break;
4331 }
4332 }
4333 break;
4334 @@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
4335 return 0;
4336 }
4337
4338 -static int rsnd_ssi_prepare(struct rsnd_mod *mod,
4339 - struct rsnd_dai_stream *io,
4340 - struct rsnd_priv *priv)
4341 -{
4342 - return rsnd_ssi_master_clk_start(mod, io);
4343 -}
4344 -
4345 static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
4346 .name = SSI_NAME,
4347 .probe = rsnd_ssi_common_probe,
4348 @@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
4349 .pointer = rsnd_ssi_pio_pointer,
4350 .pcm_new = rsnd_ssi_pcm_new,
4351 .hw_params = rsnd_ssi_hw_params,
4352 - .prepare = rsnd_ssi_prepare,
4353 .get_status = rsnd_ssi_get_status,
4354 };
4355
4356 @@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
4357 .pcm_new = rsnd_ssi_pcm_new,
4358 .fallback = rsnd_ssi_fallback,
4359 .hw_params = rsnd_ssi_hw_params,
4360 - .prepare = rsnd_ssi_prepare,
4361 .get_status = rsnd_ssi_get_status,
4362 };
4363
4364 diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
4365 index 3ed0134a764d4..67386aa3f31d1 100644
4366 --- a/tools/testing/selftests/lib.mk
4367 +++ b/tools/testing/selftests/lib.mk
4368 @@ -1,6 +1,10 @@
4369 # This mimics the top-level Makefile. We do it explicitly here so that this
4370 # Makefile can operate with or without the kbuild infrastructure.
4371 +ifneq ($(LLVM),)
4372 +CC := clang
4373 +else
4374 CC := $(CROSS_COMPILE)gcc
4375 +endif
4376
4377 ifeq (0,$(MAKELEVEL))
4378 ifeq ($(OUTPUT),)