Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0205-5.4.106-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 175325 byte(s)
-add missing
1 diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt
2 index 7064efd3b5ea3..fd22224853e58 100644
3 --- a/Documentation/virt/kvm/api.txt
4 +++ b/Documentation/virt/kvm/api.txt
5 @@ -172,6 +172,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
6 be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
7 ioctl() at run-time.
8
9 +Creation of the VM will fail if the requested IPA size (whether it is
10 +implicit or explicit) is unsupported on the host.
11 +
12 Please note that configuring the IPA size does not affect the capability
13 exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
14 size of the address translated by the stage2 level (guest physical to
15 diff --git a/Makefile b/Makefile
16 index e27d031f3241e..a333b378f1f71 100644
17 --- a/Makefile
18 +++ b/Makefile
19 @@ -1,7 +1,7 @@
20 # SPDX-License-Identifier: GPL-2.0
21 VERSION = 5
22 PATCHLEVEL = 4
23 -SUBLEVEL = 105
24 +SUBLEVEL = 106
25 EXTRAVERSION =
26 NAME = Kleptomaniac Octopus
27
28 @@ -1175,9 +1175,15 @@ define filechk_utsrelease.h
29 endef
30
31 define filechk_version.h
32 - echo \#define LINUX_VERSION_CODE $(shell \
33 - expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
34 - echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
35 + if [ $(SUBLEVEL) -gt 255 ]; then \
36 + echo \#define LINUX_VERSION_CODE $(shell \
37 + expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
38 + else \
39 + echo \#define LINUX_VERSION_CODE $(shell \
40 + expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
41 + fi; \
42 + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
43 + ((c) > 255 ? 255 : (c)))'
44 endef
45
46 $(version_h): FORCE
47 diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
48 index f615830f9f57b..9d0b7e677faac 100644
49 --- a/arch/arm/include/asm/kvm_asm.h
50 +++ b/arch/arm/include/asm/kvm_asm.h
51 @@ -56,7 +56,7 @@ extern char __kvm_hyp_init_end[];
52 extern void __kvm_flush_vm_context(void);
53 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
54 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
55 -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
56 +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu);
57
58 extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
59
60 diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
61 index 848f27bbad9db..80e67108d39d1 100644
62 --- a/arch/arm/kvm/hyp/tlb.c
63 +++ b/arch/arm/kvm/hyp/tlb.c
64 @@ -45,7 +45,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
65 __kvm_tlb_flush_vmid(kvm);
66 }
67
68 -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
69 +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu)
70 {
71 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
72
73 @@ -54,6 +54,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
74 isb();
75
76 write_sysreg(0, TLBIALL);
77 + write_sysreg(0, ICIALLU);
78 dsb(nsh);
79 isb();
80
81 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
82 index 64d79b2884344..c54e759896c1a 100644
83 --- a/arch/arm64/include/asm/kvm_asm.h
84 +++ b/arch/arm64/include/asm/kvm_asm.h
85 @@ -60,7 +60,7 @@ extern char __kvm_hyp_vector[];
86 extern void __kvm_flush_vm_context(void);
87 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
88 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
89 -extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
90 +extern void __kvm_flush_cpu_context(struct kvm_vcpu *vcpu);
91
92 extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
93
94 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
95 index 6c295a231882a..67b6b90f37eed 100644
96 --- a/arch/arm64/include/asm/memory.h
97 +++ b/arch/arm64/include/asm/memory.h
98 @@ -315,6 +315,11 @@ static inline void *phys_to_virt(phys_addr_t x)
99 #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
100
101 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
102 +#define page_to_virt(x) ({ \
103 + __typeof__(x) __page = x; \
104 + void *__addr = __va(page_to_phys(__page)); \
105 + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
106 +})
107 #define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
108 #else
109 #define page_to_virt(x) ({ \
110 diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
111 index 3827ff4040a3f..3a5d9f1c91b6d 100644
112 --- a/arch/arm64/include/asm/mmu_context.h
113 +++ b/arch/arm64/include/asm/mmu_context.h
114 @@ -63,10 +63,7 @@ extern u64 idmap_ptrs_per_pgd;
115
116 static inline bool __cpu_uses_extended_idmap(void)
117 {
118 - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
119 - return false;
120 -
121 - return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
122 + return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
123 }
124
125 /*
126 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
127 index 438de2301cfe3..a2e0b37549433 100644
128 --- a/arch/arm64/kernel/head.S
129 +++ b/arch/arm64/kernel/head.S
130 @@ -337,7 +337,7 @@ __create_page_tables:
131 */
132 adrp x5, __idmap_text_end
133 clz x5, x5
134 - cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
135 + cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
136 b.ge 1f // .. then skip VA range extension
137
138 adr_l x6, idmap_t0sz
139 diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
140 index eb0efc5557f30..7b7213fc17d95 100644
141 --- a/arch/arm64/kvm/hyp/tlb.c
142 +++ b/arch/arm64/kvm/hyp/tlb.c
143 @@ -182,7 +182,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
144 __tlb_switch_to_host(kvm, &cxt);
145 }
146
147 -void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
148 +void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu)
149 {
150 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
151 struct tlb_inv_context cxt;
152 @@ -191,6 +191,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
153 __tlb_switch_to_guest(kvm, &cxt);
154
155 __tlbi(vmalle1);
156 + asm volatile("ic iallu");
157 dsb(nsh);
158 isb();
159
160 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
161 index 784d485218ca1..a3105ae464be1 100644
162 --- a/arch/arm64/kvm/reset.c
163 +++ b/arch/arm64/kvm/reset.c
164 @@ -378,10 +378,10 @@ void kvm_set_ipa_limit(void)
165 pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n",
166 (va_max < pa_max) ? "Virtual" : "Physical");
167
168 - WARN(ipa_max < KVM_PHYS_SHIFT,
169 - "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
170 kvm_ipa_limit = ipa_max;
171 - kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
172 + kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
173 + ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
174 + " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
175 }
176
177 /*
178 @@ -408,6 +408,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
179 return -EINVAL;
180 } else {
181 phys_shift = KVM_PHYS_SHIFT;
182 + if (phys_shift > kvm_ipa_limit) {
183 + pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
184 + current->comm);
185 + return -EINVAL;
186 + }
187 }
188
189 parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
190 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
191 index 602bd19630ff8..cbcac03c0e0da 100644
192 --- a/arch/arm64/mm/init.c
193 +++ b/arch/arm64/mm/init.c
194 @@ -245,6 +245,18 @@ int pfn_valid(unsigned long pfn)
195
196 if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
197 return 0;
198 +
199 + /*
200 + * ZONE_DEVICE memory does not have the memblock entries.
201 + * memblock_is_map_memory() check for ZONE_DEVICE based
202 + * addresses will always fail. Even the normal hotplugged
203 + * memory will never have MEMBLOCK_NOMAP flag set in their
204 + * memblock entries. Skip memblock search for all non early
205 + * memory sections covering all of hotplug memory including
206 + * both normal and ZONE_DEVICE based.
207 + */
208 + if (!early_section(__pfn_to_section(pfn)))
209 + return pfn_section_valid(__pfn_to_section(pfn), pfn);
210 #endif
211 return memblock_is_map_memory(addr);
212 }
213 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
214 index d10247fab0fdf..99bc0289ab2b6 100644
215 --- a/arch/arm64/mm/mmu.c
216 +++ b/arch/arm64/mm/mmu.c
217 @@ -38,7 +38,7 @@
218 #define NO_BLOCK_MAPPINGS BIT(0)
219 #define NO_CONT_MAPPINGS BIT(1)
220
221 -u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
222 +u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
223 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
224
225 u64 __section(".mmuoff.data.write") vabits_actual;
226 diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
227 index 898b542628815..be0f7257b13c8 100644
228 --- a/arch/powerpc/include/asm/code-patching.h
229 +++ b/arch/powerpc/include/asm/code-patching.h
230 @@ -72,7 +72,7 @@ void __patch_exception(int exc, unsigned long addr);
231 #endif
232
233 #define OP_RT_RA_MASK 0xffff0000UL
234 -#define LIS_R2 0x3c020000UL
235 +#define LIS_R2 0x3c400000UL
236 #define ADDIS_R2_R12 0x3c4c0000UL
237 #define ADDI_R2_R2 0x38420000UL
238
239 diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
240 index 7bcb64444a394..f71c361dc356f 100644
241 --- a/arch/powerpc/include/asm/machdep.h
242 +++ b/arch/powerpc/include/asm/machdep.h
243 @@ -59,6 +59,9 @@ struct machdep_calls {
244 int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
245 *bridge);
246
247 + /* finds all the pci_controllers present at boot */
248 + void (*discover_phbs)(void);
249 +
250 /* To setup PHBs when using automatic OF platform driver for PCI */
251 int (*pci_setup_phb)(struct pci_controller *host);
252
253 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
254 index c41220f4aad9e..5a424f867c828 100644
255 --- a/arch/powerpc/include/asm/ptrace.h
256 +++ b/arch/powerpc/include/asm/ptrace.h
257 @@ -62,6 +62,9 @@ struct pt_regs
258 };
259 #endif
260
261 +
262 +#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
263 +
264 #ifdef __powerpc64__
265
266 /*
267 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
268 index 5c0a1e17219b7..af399675248ed 100644
269 --- a/arch/powerpc/kernel/asm-offsets.c
270 +++ b/arch/powerpc/kernel/asm-offsets.c
271 @@ -285,7 +285,7 @@ int main(void)
272
273 /* Interrupt register frame */
274 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
275 - DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
276 + DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
277 STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
278 STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
279 STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
280 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
281 index 126ba54384300..edaab1142498c 100644
282 --- a/arch/powerpc/kernel/head_32.S
283 +++ b/arch/powerpc/kernel/head_32.S
284 @@ -418,10 +418,11 @@ InstructionTLBMiss:
285 cmplw 0,r1,r3
286 #endif
287 mfspr r2, SPRN_SPRG_PGDIR
288 - li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
289 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
290 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
291 bge- 112f
292 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
293 + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
294 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
295 #endif
296 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
297 @@ -480,9 +481,10 @@ DataLoadTLBMiss:
298 lis r1,PAGE_OFFSET@h /* check if kernel address */
299 cmplw 0,r1,r3
300 mfspr r2, SPRN_SPRG_PGDIR
301 - li r1, _PAGE_PRESENT | _PAGE_ACCESSED
302 + li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
303 bge- 112f
304 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
305 + li r1, _PAGE_PRESENT | _PAGE_ACCESSED
306 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
307 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
308 lwz r2,0(r2) /* get pmd entry */
309 @@ -556,9 +558,10 @@ DataStoreTLBMiss:
310 lis r1,PAGE_OFFSET@h /* check if kernel address */
311 cmplw 0,r1,r3
312 mfspr r2, SPRN_SPRG_PGDIR
313 - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
314 + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
315 bge- 112f
316 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
317 + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
318 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
319 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
320 lwz r2,0(r2) /* get pmd entry */
321 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
322 index 1c448cf255061..a2c258a8d7367 100644
323 --- a/arch/powerpc/kernel/pci-common.c
324 +++ b/arch/powerpc/kernel/pci-common.c
325 @@ -1669,3 +1669,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
326 }
327 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
328 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
329 +
330 +
331 +static int __init discover_phbs(void)
332 +{
333 + if (ppc_md.discover_phbs)
334 + ppc_md.discover_phbs();
335 +
336 + return 0;
337 +}
338 +core_initcall(discover_phbs);
339 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
340 index bd0c258a1d5dd..c94bba9142e7e 100644
341 --- a/arch/powerpc/kernel/process.c
342 +++ b/arch/powerpc/kernel/process.c
343 @@ -2081,7 +2081,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
344 * See if this is an exception frame.
345 * We look for the "regshere" marker in the current frame.
346 */
347 - if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
348 + if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
349 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
350 struct pt_regs *regs = (struct pt_regs *)
351 (sp + STACK_FRAME_OVERHEAD);
352 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
353 index 206032c9b5458..ecfa460f66d17 100644
354 --- a/arch/powerpc/kernel/traps.c
355 +++ b/arch/powerpc/kernel/traps.c
356 @@ -513,8 +513,11 @@ out:
357 die("Unrecoverable nested System Reset", regs, SIGABRT);
358 #endif
359 /* Must die if the interrupt is not recoverable */
360 - if (!(regs->msr & MSR_RI))
361 + if (!(regs->msr & MSR_RI)) {
362 + /* For the reason explained in die_mce, nmi_exit before die */
363 + nmi_exit();
364 die("Unrecoverable System Reset", regs, SIGABRT);
365 + }
366
367 if (saved_hsrrs) {
368 mtspr(SPRN_HSRR0, hsrr0);
369 diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
370 index 02fc75ddcbb36..6f013e4188349 100644
371 --- a/arch/powerpc/perf/core-book3s.c
372 +++ b/arch/powerpc/perf/core-book3s.c
373 @@ -2077,7 +2077,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
374 left += period;
375 if (left <= 0)
376 left = period;
377 - record = siar_valid(regs);
378 +
379 + /*
380 + * If address is not requested in the sample via
381 + * PERF_SAMPLE_IP, just record that sample irrespective
382 + * of SIAR valid check.
383 + */
384 + if (event->attr.sample_type & PERF_SAMPLE_IP)
385 + record = siar_valid(regs);
386 + else
387 + record = 1;
388 +
389 event->hw.last_period = event->hw.sample_period;
390 }
391 if (left < 0x80000000LL)
392 @@ -2095,9 +2105,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
393 * MMCR2. Check attr.exclude_kernel and address to drop the sample in
394 * these cases.
395 */
396 - if (event->attr.exclude_kernel && record)
397 - if (is_kernel_addr(mfspr(SPRN_SIAR)))
398 - record = 0;
399 + if (event->attr.exclude_kernel &&
400 + (event->attr.sample_type & PERF_SAMPLE_IP) &&
401 + is_kernel_addr(mfspr(SPRN_SIAR)))
402 + record = 0;
403
404 /*
405 * Finally record data if requested.
406 diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
407 index b3ac2455faadc..637300330507f 100644
408 --- a/arch/powerpc/platforms/pseries/msi.c
409 +++ b/arch/powerpc/platforms/pseries/msi.c
410 @@ -4,6 +4,7 @@
411 * Copyright 2006-2007 Michael Ellerman, IBM Corp.
412 */
413
414 +#include <linux/crash_dump.h>
415 #include <linux/device.h>
416 #include <linux/irq.h>
417 #include <linux/msi.h>
418 @@ -458,8 +459,28 @@ again:
419 return hwirq;
420 }
421
422 - virq = irq_create_mapping_affinity(NULL, hwirq,
423 - entry->affinity);
424 + /*
425 + * Depending on the number of online CPUs in the original
426 + * kernel, it is likely for CPU #0 to be offline in a kdump
427 + * kernel. The associated IRQs in the affinity mappings
428 + * provided by irq_create_affinity_masks() are thus not
429 + * started by irq_startup(), as per-design for managed IRQs.
430 + * This can be a problem with multi-queue block devices driven
431 + * by blk-mq : such a non-started IRQ is very likely paired
432 + * with the single queue enforced by blk-mq during kdump (see
433 + * blk_mq_alloc_tag_set()). This causes the device to remain
434 + * silent and likely hangs the guest at some point.
435 + *
436 + * We don't really care for fine-grained affinity when doing
437 + * kdump actually : simply ignore the pre-computed affinity
438 + * masks in this case and let the default mask with all CPUs
439 + * be used when creating the IRQ mappings.
440 + */
441 + if (is_kdump_kernel())
442 + virq = irq_create_mapping(NULL, hwirq);
443 + else
444 + virq = irq_create_mapping_affinity(NULL, hwirq,
445 + entry->affinity);
446
447 if (!virq) {
448 pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
449 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
450 index 659d99af91566..8c51462f13fd1 100644
451 --- a/arch/s390/kernel/smp.c
452 +++ b/arch/s390/kernel/smp.c
453 @@ -765,7 +765,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
454 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
455 {
456 struct sclp_core_entry *core;
457 - cpumask_t avail;
458 + static cpumask_t avail;
459 bool configured;
460 u16 core_id;
461 int nr, i;
462 diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
463 index f94532f25db14..274217e7ed702 100644
464 --- a/arch/sparc/include/asm/mman.h
465 +++ b/arch/sparc/include/asm/mman.h
466 @@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
467 {
468 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
469 return 0;
470 - if (prot & PROT_ADI) {
471 - if (!adi_capable())
472 - return 0;
473 + return 1;
474 +}
475
476 - if (addr) {
477 - struct vm_area_struct *vma;
478 +#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
479 +/* arch_validate_flags() - Ensure combination of flags is valid for a
480 + * VMA.
481 + */
482 +static inline bool arch_validate_flags(unsigned long vm_flags)
483 +{
484 + /* If ADI is being enabled on this VMA, check for ADI
485 + * capability on the platform and ensure VMA is suitable
486 + * for ADI
487 + */
488 + if (vm_flags & VM_SPARC_ADI) {
489 + if (!adi_capable())
490 + return false;
491
492 - vma = find_vma(current->mm, addr);
493 - if (vma) {
494 - /* ADI can not be enabled on PFN
495 - * mapped pages
496 - */
497 - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
498 - return 0;
499 + /* ADI can not be enabled on PFN mapped pages */
500 + if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
501 + return false;
502
503 - /* Mergeable pages can become unmergeable
504 - * if ADI is enabled on them even if they
505 - * have identical data on them. This can be
506 - * because ADI enabled pages with identical
507 - * data may still not have identical ADI
508 - * tags on them. Disallow ADI on mergeable
509 - * pages.
510 - */
511 - if (vma->vm_flags & VM_MERGEABLE)
512 - return 0;
513 - }
514 - }
515 + /* Mergeable pages can become unmergeable
516 + * if ADI is enabled on them even if they
517 + * have identical data on them. This can be
518 + * because ADI enabled pages with identical
519 + * data may still not have identical ADI
520 + * tags on them. Disallow ADI on mergeable
521 + * pages.
522 + */
523 + if (vm_flags & VM_MERGEABLE)
524 + return false;
525 }
526 - return 1;
527 + return true;
528 }
529 #endif /* CONFIG_SPARC64 */
530
531 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
532 index 906eda1158b4d..40dd6cb4a4133 100644
533 --- a/arch/sparc/mm/init_32.c
534 +++ b/arch/sparc/mm/init_32.c
535 @@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
536 size = memblock_phys_mem_size() - memblock_reserved_size();
537 *pages_avail = (size >> PAGE_SHIFT) - high_pages;
538
539 + /* Only allow low memory to be allocated via memblock allocation */
540 + memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
541 +
542 return max_pfn;
543 }
544
545 diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
546 index f29f015a5e7f3..b934f9f68a168 100644
547 --- a/arch/x86/kernel/unwind_orc.c
548 +++ b/arch/x86/kernel/unwind_orc.c
549 @@ -357,8 +357,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
550 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
551 return false;
552
553 - *ip = regs->ip;
554 - *sp = regs->sp;
555 + *ip = READ_ONCE_NOCHECK(regs->ip);
556 + *sp = READ_ONCE_NOCHECK(regs->sp);
557 return true;
558 }
559
560 @@ -370,8 +370,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
561 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
562 return false;
563
564 - *ip = regs->ip;
565 - *sp = regs->sp;
566 + *ip = READ_ONCE_NOCHECK(regs->ip);
567 + *sp = READ_ONCE_NOCHECK(regs->sp);
568 return true;
569 }
570
571 @@ -392,12 +392,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
572 return false;
573
574 if (state->full_regs) {
575 - *val = ((unsigned long *)state->regs)[reg];
576 + *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
577 return true;
578 }
579
580 if (state->prev_regs) {
581 - *val = ((unsigned long *)state->prev_regs)[reg];
582 + *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
583 return true;
584 }
585
586 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
587 index 7d5236eafe845..4c3b9813b2843 100644
588 --- a/drivers/base/swnode.c
589 +++ b/drivers/base/swnode.c
590 @@ -812,6 +812,9 @@ int software_node_register(const struct software_node *node)
591 if (software_node_to_swnode(node))
592 return -EEXIST;
593
594 + if (node->parent && !parent)
595 + return -EINVAL;
596 +
597 return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
598 }
599 EXPORT_SYMBOL_GPL(software_node_register);
600 diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
601 index 804d28faa97b0..a1824bb080446 100644
602 --- a/drivers/block/rsxx/core.c
603 +++ b/drivers/block/rsxx/core.c
604 @@ -869,6 +869,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
605 card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
606 if (!card->event_wq) {
607 dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
608 + st = -ENOMEM;
609 goto failed_event_handler;
610 }
611
612 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
613 index 22aa432a68bf9..719c6b7741afa 100644
614 --- a/drivers/block/zram/zram_drv.c
615 +++ b/drivers/block/zram/zram_drv.c
616 @@ -627,7 +627,7 @@ static ssize_t writeback_store(struct device *dev,
617 struct bio_vec bio_vec;
618 struct page *page;
619 ssize_t ret = len;
620 - int mode;
621 + int mode, err;
622 unsigned long blk_idx = 0;
623
624 if (sysfs_streq(buf, "idle"))
625 @@ -719,12 +719,17 @@ static ssize_t writeback_store(struct device *dev,
626 * XXX: A single page IO would be inefficient for write
627 * but it would be not bad as starter.
628 */
629 - ret = submit_bio_wait(&bio);
630 - if (ret) {
631 + err = submit_bio_wait(&bio);
632 + if (err) {
633 zram_slot_lock(zram, index);
634 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
635 zram_clear_flag(zram, index, ZRAM_IDLE);
636 zram_slot_unlock(zram, index);
637 + /*
638 + * Return last IO error unless every IO were
639 + * not suceeded.
640 + */
641 + ret = err;
642 continue;
643 }
644
645 diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
646 index f5918707672f1..d88f4230c2219 100644
647 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
648 +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
649 @@ -474,14 +474,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
650 struct drm_gem_object *obj = vma->vm_private_data;
651 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
652 loff_t num_pages = obj->size >> PAGE_SHIFT;
653 + vm_fault_t ret;
654 struct page *page;
655 + pgoff_t page_offset;
656
657 - if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
658 - return VM_FAULT_SIGBUS;
659 + /* We don't use vmf->pgoff since that has the fake offset */
660 + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
661
662 - page = shmem->pages[vmf->pgoff];
663 + mutex_lock(&shmem->pages_lock);
664 +
665 + if (page_offset >= num_pages ||
666 + WARN_ON_ONCE(!shmem->pages) ||
667 + shmem->madv < 0) {
668 + ret = VM_FAULT_SIGBUS;
669 + } else {
670 + page = shmem->pages[page_offset];
671 +
672 + ret = vmf_insert_page(vma, vmf->address, page);
673 + }
674
675 - return vmf_insert_page(vma, vmf->address, page);
676 + mutex_unlock(&shmem->pages_lock);
677 +
678 + return ret;
679 }
680
681 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
682 @@ -549,9 +563,6 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
683 vma->vm_flags &= ~VM_PFNMAP;
684 vma->vm_flags |= VM_MIXEDMAP;
685
686 - /* Remove the fake offset */
687 - vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
688 -
689 return 0;
690 }
691 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
692 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
693 index 22c7fd7196c82..2cf053fb8d54b 100644
694 --- a/drivers/gpu/drm/drm_ioc32.c
695 +++ b/drivers/gpu/drm/drm_ioc32.c
696 @@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
697 if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
698 return -EFAULT;
699
700 + memset(&v, 0, sizeof(v));
701 +
702 v = (struct drm_version) {
703 .name_len = v32.name_len,
704 .name = compat_ptr(v32.name),
705 @@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
706
707 if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
708 return -EFAULT;
709 +
710 + memset(&uq, 0, sizeof(uq));
711 +
712 uq = (struct drm_unique){
713 .unique_len = uq32.unique_len,
714 .unique = compat_ptr(uq32.unique),
715 @@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
716 if (copy_from_user(&c32, argp, sizeof(c32)))
717 return -EFAULT;
718
719 + memset(&client, 0, sizeof(client));
720 +
721 client.idx = c32.idx;
722
723 err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
724 @@ -850,6 +857,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
725 if (copy_from_user(&req32, argp, sizeof(req32)))
726 return -EFAULT;
727
728 + memset(&req, 0, sizeof(req));
729 +
730 req.request.type = req32.request.type;
731 req.request.sequence = req32.request.sequence;
732 req.request.signal = req32.request.signal;
733 @@ -887,6 +896,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
734 struct drm_mode_fb_cmd2 req64;
735 int err;
736
737 + memset(&req64, 0, sizeof(req64));
738 +
739 if (copy_from_user(&req64, argp,
740 offsetof(drm_mode_fb_cmd232_t, modifier)))
741 return -EFAULT;
742 diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
743 index a24f8dec5adc9..86d0961112773 100644
744 --- a/drivers/gpu/drm/meson/meson_drv.c
745 +++ b/drivers/gpu/drm/meson/meson_drv.c
746 @@ -420,6 +420,16 @@ static int meson_probe_remote(struct platform_device *pdev,
747 return count;
748 }
749
750 +static void meson_drv_shutdown(struct platform_device *pdev)
751 +{
752 + struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
753 + struct drm_device *drm = priv->drm;
754 +
755 + DRM_DEBUG_DRIVER("\n");
756 + drm_kms_helper_poll_fini(drm);
757 + drm_atomic_helper_shutdown(drm);
758 +}
759 +
760 static int meson_drv_probe(struct platform_device *pdev)
761 {
762 struct component_match *match = NULL;
763 @@ -469,6 +479,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
764
765 static struct platform_driver meson_drm_platform_driver = {
766 .probe = meson_drv_probe,
767 + .shutdown = meson_drv_shutdown,
768 .driver = {
769 .name = "meson-drm",
770 .of_match_table = dt_match,
771 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
772 index 92d84280096e0..9abf3dc5ef990 100644
773 --- a/drivers/gpu/drm/qxl/qxl_display.c
774 +++ b/drivers/gpu/drm/qxl/qxl_display.c
775 @@ -325,6 +325,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
776
777 head.id = i;
778 head.flags = 0;
779 + head.surface_id = 0;
780 oldcount = qdev->monitors_config->count;
781 if (crtc->state->active) {
782 struct drm_display_mode *mode = &crtc->mode;
783 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
784 index 86001cfbdb6f1..b499ac37dc7b0 100644
785 --- a/drivers/hid/hid-logitech-dj.c
786 +++ b/drivers/hid/hid-logitech-dj.c
787 @@ -995,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
788 workitem.reports_supported |= STD_KEYBOARD;
789 break;
790 case 0x0d:
791 - device_type = "eQUAD Lightspeed 1_1";
792 + device_type = "eQUAD Lightspeed 1.1";
793 + logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
794 + workitem.reports_supported |= STD_KEYBOARD;
795 + break;
796 + case 0x0f:
797 + device_type = "eQUAD Lightspeed 1.2";
798 logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
799 workitem.reports_supported |= STD_KEYBOARD;
800 break;
801 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
802 index 9c162a01a5849..d0c4b3019e41e 100644
803 --- a/drivers/i2c/busses/i2c-rcar.c
804 +++ b/drivers/i2c/busses/i2c-rcar.c
805 @@ -89,7 +89,6 @@
806
807 #define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
808 #define RCAR_BUS_PHASE_DATA (MDBS | MIE)
809 -#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
810 #define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
811
812 #define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
813 @@ -117,6 +116,7 @@ enum rcar_i2c_type {
814 };
815
816 struct rcar_i2c_priv {
817 + u32 flags;
818 void __iomem *io;
819 struct i2c_adapter adap;
820 struct i2c_msg *msg;
821 @@ -127,7 +127,6 @@ struct rcar_i2c_priv {
822
823 int pos;
824 u32 icccr;
825 - u32 flags;
826 u8 recovery_icmcr; /* protected by adapter lock */
827 enum rcar_i2c_type devtype;
828 struct i2c_client *slave;
829 @@ -616,7 +615,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
830 /*
831 * This driver has a lock-free design because there are IP cores (at least
832 * R-Car Gen2) which have an inherent race condition in their hardware design.
833 - * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
834 + * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after
835 * the interrupt was generated, otherwise an unwanted repeated message gets
836 * generated. It turned out that taking a spinlock at the beginning of the ISR
837 * was already causing repeated messages. Thus, this driver was converted to
838 @@ -625,13 +624,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
839 static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
840 {
841 struct rcar_i2c_priv *priv = ptr;
842 - u32 msr, val;
843 + u32 msr;
844
845 /* Clear START or STOP immediately, except for REPSTART after read */
846 - if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
847 - val = rcar_i2c_read(priv, ICMCR);
848 - rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
849 - }
850 + if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
851 + rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
852
853 msr = rcar_i2c_read(priv, ICMSR);
854
855 diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
856 index d38398526965d..a4b7422de534e 100644
857 --- a/drivers/input/keyboard/applespi.c
858 +++ b/drivers/input/keyboard/applespi.c
859 @@ -48,6 +48,7 @@
860 #include <linux/efi.h>
861 #include <linux/input.h>
862 #include <linux/input/mt.h>
863 +#include <linux/ktime.h>
864 #include <linux/leds.h>
865 #include <linux/module.h>
866 #include <linux/spinlock.h>
867 @@ -400,7 +401,7 @@ struct applespi_data {
868 unsigned int cmd_msg_cntr;
869 /* lock to protect the above parameters and flags below */
870 spinlock_t cmd_msg_lock;
871 - bool cmd_msg_queued;
872 + ktime_t cmd_msg_queued;
873 enum applespi_evt_type cmd_evt_type;
874
875 struct led_classdev backlight_info;
876 @@ -716,7 +717,7 @@ static void applespi_msg_complete(struct applespi_data *applespi,
877 wake_up_all(&applespi->drain_complete);
878
879 if (is_write_msg) {
880 - applespi->cmd_msg_queued = false;
881 + applespi->cmd_msg_queued = 0;
882 applespi_send_cmd_msg(applespi);
883 }
884
885 @@ -758,8 +759,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
886 return 0;
887
888 /* check whether send is in progress */
889 - if (applespi->cmd_msg_queued)
890 - return 0;
891 + if (applespi->cmd_msg_queued) {
892 + if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000)
893 + return 0;
894 +
895 + dev_warn(&applespi->spi->dev, "Command %d timed out\n",
896 + applespi->cmd_evt_type);
897 +
898 + applespi->cmd_msg_queued = 0;
899 + applespi->write_active = false;
900 + }
901
902 /* set up packet */
903 memset(packet, 0, APPLESPI_PACKET_SIZE);
904 @@ -856,7 +865,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
905 return sts;
906 }
907
908 - applespi->cmd_msg_queued = true;
909 + applespi->cmd_msg_queued = ktime_get_coarse();
910 applespi->write_active = true;
911
912 return 0;
913 @@ -1908,7 +1917,7 @@ static int __maybe_unused applespi_resume(struct device *dev)
914 applespi->drain = false;
915 applespi->have_cl_led_on = false;
916 applespi->have_bl_level = 0;
917 - applespi->cmd_msg_queued = false;
918 + applespi->cmd_msg_queued = 0;
919 applespi->read_active = false;
920 applespi->write_active = false;
921
922 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
923 index 31d7e2d4f3040..ad714ff375f85 100644
924 --- a/drivers/iommu/amd_iommu_init.c
925 +++ b/drivers/iommu/amd_iommu_init.c
926 @@ -12,6 +12,7 @@
927 #include <linux/acpi.h>
928 #include <linux/list.h>
929 #include <linux/bitmap.h>
930 +#include <linux/delay.h>
931 #include <linux/slab.h>
932 #include <linux/syscore_ops.h>
933 #include <linux/interrupt.h>
934 @@ -253,6 +254,8 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
935 static int amd_iommu_enable_interrupts(void);
936 static int __init iommu_go_to_state(enum iommu_init_state state);
937 static void init_device_table_dma(void);
938 +static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
939 + u8 fxn, u64 *value, bool is_write);
940
941 static bool amd_iommu_pre_enabled = true;
942
943 @@ -1672,13 +1675,11 @@ static int __init init_iommu_all(struct acpi_table_header *table)
944 return 0;
945 }
946
947 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
948 - u8 fxn, u64 *value, bool is_write);
949 -
950 -static void init_iommu_perf_ctr(struct amd_iommu *iommu)
951 +static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
952 {
953 + int retry;
954 struct pci_dev *pdev = iommu->dev;
955 - u64 val = 0xabcd, val2 = 0, save_reg = 0;
956 + u64 val = 0xabcd, val2 = 0, save_reg, save_src;
957
958 if (!iommu_feature(iommu, FEATURE_PC))
959 return;
960 @@ -1686,17 +1687,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
961 amd_iommu_pc_present = true;
962
963 /* save the value to restore, if writable */
964 - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
965 + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
966 + iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
967 goto pc_false;
968
969 - /* Check if the performance counters can be written to */
970 - if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
971 - (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
972 - (val != val2))
973 + /*
974 + * Disable power gating by programing the performance counter
975 + * source to 20 (i.e. counts the reads and writes from/to IOMMU
976 + * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
977 + * which never get incremented during this init phase.
978 + * (Note: The event is also deprecated.)
979 + */
980 + val = 20;
981 + if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
982 goto pc_false;
983
984 + /* Check if the performance counters can be written to */
985 + val = 0xabcd;
986 + for (retry = 5; retry; retry--) {
987 + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
988 + iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
989 + val2)
990 + break;
991 +
992 + /* Wait about 20 msec for power gating to disable and retry. */
993 + msleep(20);
994 + }
995 +
996 /* restore */
997 - if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
998 + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
999 + iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
1000 + goto pc_false;
1001 +
1002 + if (val != val2)
1003 goto pc_false;
1004
1005 pci_info(pdev, "IOMMU performance counters supported\n");
1006 diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
1007 index a4a45d68a6efc..1c00688c71c24 100644
1008 --- a/drivers/media/platform/vsp1/vsp1_drm.c
1009 +++ b/drivers/media/platform/vsp1/vsp1_drm.c
1010 @@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
1011 brx = &vsp1->bru->entity;
1012 else if (pipe->brx && !drm_pipe->force_brx_release)
1013 brx = pipe->brx;
1014 - else if (!vsp1->bru->entity.pipe)
1015 + else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
1016 brx = &vsp1->bru->entity;
1017 else
1018 brx = &vsp1->brs->entity;
1019 @@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
1020 * make sure it is present in the pipeline's list of entities if it
1021 * wasn't already.
1022 */
1023 - if (!use_uif) {
1024 + if (drm_pipe->uif && !use_uif) {
1025 drm_pipe->uif->pipe = NULL;
1026 - } else if (!drm_pipe->uif->pipe) {
1027 + } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
1028 drm_pipe->uif->pipe = pipe;
1029 list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
1030 }
1031 diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
1032 index 48d23433b3c06..caeb51def782c 100644
1033 --- a/drivers/media/rc/Makefile
1034 +++ b/drivers/media/rc/Makefile
1035 @@ -5,6 +5,7 @@ obj-y += keymaps/
1036 obj-$(CONFIG_RC_CORE) += rc-core.o
1037 rc-core-y := rc-main.o rc-ir-raw.o
1038 rc-core-$(CONFIG_LIRC) += lirc_dev.o
1039 +rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o
1040 rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o
1041 obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
1042 obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
1043 diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
1044 index ea91a9afa6a02..d89dcc4481229 100644
1045 --- a/drivers/media/rc/keymaps/Makefile
1046 +++ b/drivers/media/rc/keymaps/Makefile
1047 @@ -20,7 +20,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
1048 rc-behold.o \
1049 rc-behold-columbus.o \
1050 rc-budget-ci-old.o \
1051 - rc-cec.o \
1052 rc-cinergy-1400.o \
1053 rc-cinergy.o \
1054 rc-d680-dmb.o \
1055 diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c
1056 index 3e3bd11092b45..068e22aeac8c3 100644
1057 --- a/drivers/media/rc/keymaps/rc-cec.c
1058 +++ b/drivers/media/rc/keymaps/rc-cec.c
1059 @@ -1,5 +1,15 @@
1060 // SPDX-License-Identifier: GPL-2.0-or-later
1061 /* Keytable for the CEC remote control
1062 + *
1063 + * This keymap is unusual in that it can't be built as a module,
1064 + * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC
1065 + * is set. This is because it can be called from drm_dp_cec_set_edid() via
1066 + * cec_register_adapter() in an asynchronous context, and it is not
1067 + * allowed to use request_module() to load rc-cec.ko in that case.
1068 + *
1069 + * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we
1070 + * just compile this keymap into the rc-core module and never as a
1071 + * separate module.
1072 *
1073 * Copyright (c) 2015 by Kamil Debski
1074 */
1075 @@ -152,7 +162,7 @@ static struct rc_map_table cec[] = {
1076 /* 0x77-0xff: Reserved */
1077 };
1078
1079 -static struct rc_map_list cec_map = {
1080 +struct rc_map_list cec_map = {
1081 .map = {
1082 .scan = cec,
1083 .size = ARRAY_SIZE(cec),
1084 @@ -160,19 +170,3 @@ static struct rc_map_list cec_map = {
1085 .name = RC_MAP_CEC,
1086 }
1087 };
1088 -
1089 -static int __init init_rc_map_cec(void)
1090 -{
1091 - return rc_map_register(&cec_map);
1092 -}
1093 -
1094 -static void __exit exit_rc_map_cec(void)
1095 -{
1096 - rc_map_unregister(&cec_map);
1097 -}
1098 -
1099 -module_init(init_rc_map_cec);
1100 -module_exit(exit_rc_map_cec);
1101 -
1102 -MODULE_LICENSE("GPL");
1103 -MODULE_AUTHOR("Kamil Debski");
1104 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1105 index c4d7e06974d2c..ee80f38970bc4 100644
1106 --- a/drivers/media/rc/rc-main.c
1107 +++ b/drivers/media/rc/rc-main.c
1108 @@ -2033,6 +2033,9 @@ static int __init rc_core_init(void)
1109
1110 led_trigger_register_simple("rc-feedback", &led_feedback);
1111 rc_map_register(&empty_map);
1112 +#ifdef CONFIG_MEDIA_CEC_RC
1113 + rc_map_register(&cec_map);
1114 +#endif
1115
1116 return 0;
1117 }
1118 @@ -2042,6 +2045,9 @@ static void __exit rc_core_exit(void)
1119 lirc_dev_exit();
1120 class_unregister(&rc_class);
1121 led_trigger_unregister_simple(led_feedback);
1122 +#ifdef CONFIG_MEDIA_CEC_RC
1123 + rc_map_unregister(&cec_map);
1124 +#endif
1125 rc_map_unregister(&empty_map);
1126 }
1127
1128 diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
1129 index 6f108996142d7..bbfaec2e6ef61 100644
1130 --- a/drivers/media/usb/usbtv/usbtv-audio.c
1131 +++ b/drivers/media/usb/usbtv/usbtv-audio.c
1132 @@ -399,7 +399,7 @@ void usbtv_audio_free(struct usbtv *usbtv)
1133 cancel_work_sync(&usbtv->snd_trigger);
1134
1135 if (usbtv->snd && usbtv->udev) {
1136 - snd_card_free(usbtv->snd);
1137 + snd_card_free_when_closed(usbtv->snd);
1138 usbtv->snd = NULL;
1139 }
1140 }
1141 diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
1142 index 3a5d2890fe2aa..beaf15807f789 100644
1143 --- a/drivers/misc/fastrpc.c
1144 +++ b/drivers/misc/fastrpc.c
1145 @@ -924,6 +924,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
1146 if (!fl->cctx->rpdev)
1147 return -EPIPE;
1148
1149 + if (handle == FASTRPC_INIT_HANDLE && !kernel) {
1150 + dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
1151 + return -EPERM;
1152 + }
1153 +
1154 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
1155 if (IS_ERR(ctx))
1156 return PTR_ERR(ctx);
1157 diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
1158 index 95ff7c5a1dfb6..0a5e5b841aeb1 100644
1159 --- a/drivers/misc/pvpanic.c
1160 +++ b/drivers/misc/pvpanic.c
1161 @@ -166,6 +166,7 @@ static const struct of_device_id pvpanic_mmio_match[] = {
1162 { .compatible = "qemu,pvpanic-mmio", },
1163 {}
1164 };
1165 +MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
1166
1167 static struct platform_driver pvpanic_mmio_driver = {
1168 .driver = {
1169 diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
1170 index 74de3f2dda38e..28501f165d457 100644
1171 --- a/drivers/mmc/core/bus.c
1172 +++ b/drivers/mmc/core/bus.c
1173 @@ -373,11 +373,6 @@ void mmc_remove_card(struct mmc_card *card)
1174 mmc_remove_card_debugfs(card);
1175 #endif
1176
1177 - if (host->cqe_enabled) {
1178 - host->cqe_ops->cqe_disable(host);
1179 - host->cqe_enabled = false;
1180 - }
1181 -
1182 if (mmc_card_present(card)) {
1183 if (mmc_host_is_spi(card->host)) {
1184 pr_info("%s: SPI card removed\n",
1185 @@ -390,6 +385,10 @@ void mmc_remove_card(struct mmc_card *card)
1186 of_node_put(card->dev.of_node);
1187 }
1188
1189 + if (host->cqe_enabled) {
1190 + host->cqe_ops->cqe_disable(host);
1191 + host->cqe_enabled = false;
1192 + }
1193 +
1194 put_device(&card->dev);
1195 }
1196 -
1197 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1198 index de14b5845f525..9f29288f2c9a9 100644
1199 --- a/drivers/mmc/core/mmc.c
1200 +++ b/drivers/mmc/core/mmc.c
1201 @@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1202
1203 /* EXT_CSD value is in units of 10ms, but we store in ms */
1204 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
1205 - /* Some eMMC set the value too low so set a minimum */
1206 - if (card->ext_csd.part_time &&
1207 - card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
1208 - card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
1209
1210 /* Sleep / awake timeout in 100ns units */
1211 if (sa_shift > 0 && sa_shift <= 0x17)
1212 @@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
1213 card->ext_csd.data_sector_size = 512;
1214 }
1215
1216 + /*
1217 + * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
1218 + * when accessing a specific field", so use it here if there is no
1219 + * PARTITION_SWITCH_TIME.
1220 + */
1221 + if (!card->ext_csd.part_time)
1222 + card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
1223 + /* Some eMMC set the value too low so set a minimum */
1224 + if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
1225 + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
1226 +
1227 /* eMMC v5 or later */
1228 if (card->ext_csd.rev >= 7) {
1229 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
1230 diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
1231 index 9d47a2bd2546b..1254a5650cfff 100644
1232 --- a/drivers/mmc/host/mtk-sd.c
1233 +++ b/drivers/mmc/host/mtk-sd.c
1234 @@ -1020,13 +1020,13 @@ static void msdc_track_cmd_data(struct msdc_host *host,
1235 static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
1236 {
1237 unsigned long flags;
1238 - bool ret;
1239
1240 - ret = cancel_delayed_work(&host->req_timeout);
1241 - if (!ret) {
1242 - /* delay work already running */
1243 - return;
1244 - }
1245 + /*
1246 + * No need check the return value of cancel_delayed_work, as only ONE
1247 + * path will go here!
1248 + */
1249 + cancel_delayed_work(&host->req_timeout);
1250 +
1251 spin_lock_irqsave(&host->lock, flags);
1252 host->mrq = NULL;
1253 spin_unlock_irqrestore(&host->lock, flags);
1254 @@ -1046,7 +1046,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
1255 bool done = false;
1256 bool sbc_error;
1257 unsigned long flags;
1258 - u32 *rsp = cmd->resp;
1259 + u32 *rsp;
1260
1261 if (mrq->sbc && cmd == mrq->cmd &&
1262 (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
1263 @@ -1067,6 +1067,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
1264
1265 if (done)
1266 return true;
1267 + rsp = cmd->resp;
1268
1269 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
1270
1271 @@ -1254,7 +1255,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
1272 static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
1273 struct mmc_request *mrq, struct mmc_data *data)
1274 {
1275 - struct mmc_command *stop = data->stop;
1276 + struct mmc_command *stop;
1277 unsigned long flags;
1278 bool done;
1279 unsigned int check_data = events &
1280 @@ -1270,6 +1271,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
1281
1282 if (done)
1283 return true;
1284 + stop = data->stop;
1285
1286 if (check_data || (stop && stop->error)) {
1287 dev_dbg(host->dev, "DMA status: 0x%8X\n",
1288 diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
1289 index 4031217d21c37..52054931c3507 100644
1290 --- a/drivers/mmc/host/mxs-mmc.c
1291 +++ b/drivers/mmc/host/mxs-mmc.c
1292 @@ -644,7 +644,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
1293
1294 ret = mmc_of_parse(mmc);
1295 if (ret)
1296 - goto out_clk_disable;
1297 + goto out_free_dma;
1298
1299 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1300
1301 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1302 index 1bd955e4c7d66..b6d00dfa8b8f6 100644
1303 --- a/drivers/net/can/flexcan.c
1304 +++ b/drivers/net/can/flexcan.c
1305 @@ -548,7 +548,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
1306 u32 reg;
1307
1308 reg = priv->read(&regs->mcr);
1309 - reg |= FLEXCAN_MCR_HALT;
1310 + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
1311 priv->write(reg, &regs->mcr);
1312
1313 while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
1314 @@ -1057,10 +1057,13 @@ static int flexcan_chip_start(struct net_device *dev)
1315
1316 flexcan_set_bittiming(dev);
1317
1318 + /* set freeze, halt */
1319 + err = flexcan_chip_freeze(priv);
1320 + if (err)
1321 + goto out_chip_disable;
1322 +
1323 /* MCR
1324 *
1325 - * enable freeze
1326 - * halt now
1327 * only supervisor access
1328 * enable warning int
1329 * enable individual RX masking
1330 @@ -1069,9 +1072,8 @@ static int flexcan_chip_start(struct net_device *dev)
1331 */
1332 reg_mcr = priv->read(&regs->mcr);
1333 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
1334 - reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
1335 - FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C |
1336 - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
1337 + reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ |
1338 + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
1339
1340 /* MCR
1341 *
1342 @@ -1432,10 +1434,14 @@ static int register_flexcandev(struct net_device *dev)
1343 if (err)
1344 goto out_chip_disable;
1345
1346 - /* set freeze, halt and activate FIFO, restrict register access */
1347 + /* set freeze, halt */
1348 + err = flexcan_chip_freeze(priv);
1349 + if (err)
1350 + goto out_chip_disable;
1351 +
1352 + /* activate FIFO, restrict register access */
1353 reg = priv->read(&regs->mcr);
1354 - reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
1355 - FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
1356 + reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
1357 priv->write(reg, &regs->mcr);
1358
1359 /* Currently we only support newer versions of this core
1360 diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
1361 index 1f8710b35c6d7..32cb479fe6ac8 100644
1362 --- a/drivers/net/can/m_can/tcan4x5x.c
1363 +++ b/drivers/net/can/m_can/tcan4x5x.c
1364 @@ -325,14 +325,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
1365 if (ret)
1366 return ret;
1367
1368 + /* Zero out the MCAN buffers */
1369 + m_can_init_ram(cdev);
1370 +
1371 ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
1372 TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
1373 if (ret)
1374 return ret;
1375
1376 - /* Zero out the MCAN buffers */
1377 - m_can_init_ram(cdev);
1378 -
1379 return ret;
1380 }
1381
1382 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1383 index aa693c8e285ab..bde8ec75ac4e9 100644
1384 --- a/drivers/net/ethernet/atheros/alx/main.c
1385 +++ b/drivers/net/ethernet/atheros/alx/main.c
1386 @@ -1897,13 +1897,16 @@ static int alx_resume(struct device *dev)
1387
1388 if (!netif_running(alx->dev))
1389 return 0;
1390 - netif_device_attach(alx->dev);
1391
1392 rtnl_lock();
1393 err = __alx_open(alx, true);
1394 rtnl_unlock();
1395 + if (err)
1396 + return err;
1397
1398 - return err;
1399 + netif_device_attach(alx->dev);
1400 +
1401 + return 0;
1402 }
1403
1404 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1405 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1406 index 4ae49d92c1eed..5a7831a97a132 100644
1407 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1408 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1409 @@ -7925,10 +7925,18 @@ static void bnxt_setup_inta(struct bnxt *bp)
1410 bp->irq_tbl[0].handler = bnxt_inta;
1411 }
1412
1413 +static int bnxt_init_int_mode(struct bnxt *bp);
1414 +
1415 static int bnxt_setup_int_mode(struct bnxt *bp)
1416 {
1417 int rc;
1418
1419 + if (!bp->irq_tbl) {
1420 + rc = bnxt_init_int_mode(bp);
1421 + if (rc || !bp->irq_tbl)
1422 + return rc ?: -ENODEV;
1423 + }
1424 +
1425 if (bp->flags & BNXT_FLAG_USING_MSIX)
1426 bnxt_setup_msix(bp);
1427 else
1428 @@ -8113,7 +8121,7 @@ static int bnxt_init_inta(struct bnxt *bp)
1429
1430 static int bnxt_init_int_mode(struct bnxt *bp)
1431 {
1432 - int rc = 0;
1433 + int rc = -ENODEV;
1434
1435 if (bp->flags & BNXT_FLAG_MSIX_CAP)
1436 rc = bnxt_init_msix(bp);
1437 @@ -8748,7 +8756,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
1438 {
1439 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
1440 struct hwrm_func_drv_if_change_input req = {0};
1441 - bool resc_reinit = false, fw_reset = false;
1442 + bool fw_reset = !bp->irq_tbl;
1443 + bool resc_reinit = false;
1444 u32 flags = 0;
1445 int rc;
1446
1447 @@ -8776,6 +8785,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
1448
1449 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
1450 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
1451 + set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
1452 return -ENODEV;
1453 }
1454 if (resc_reinit || fw_reset) {
1455 diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
1456 index 70060c51854fd..0928bec79fe4b 100644
1457 --- a/drivers/net/ethernet/davicom/dm9000.c
1458 +++ b/drivers/net/ethernet/davicom/dm9000.c
1459 @@ -134,6 +134,8 @@ struct board_info {
1460 u32 wake_state;
1461
1462 int ip_summed;
1463 +
1464 + struct regulator *power_supply;
1465 };
1466
1467 /* debug code */
1468 @@ -1454,7 +1456,7 @@ dm9000_probe(struct platform_device *pdev)
1469 if (ret) {
1470 dev_err(dev, "failed to request reset gpio %d: %d\n",
1471 reset_gpios, ret);
1472 - return -ENODEV;
1473 + goto out_regulator_disable;
1474 }
1475
1476 /* According to manual PWRST# Low Period Min 1ms */
1477 @@ -1466,8 +1468,10 @@ dm9000_probe(struct platform_device *pdev)
1478
1479 if (!pdata) {
1480 pdata = dm9000_parse_dt(&pdev->dev);
1481 - if (IS_ERR(pdata))
1482 - return PTR_ERR(pdata);
1483 + if (IS_ERR(pdata)) {
1484 + ret = PTR_ERR(pdata);
1485 + goto out_regulator_disable;
1486 + }
1487 }
1488
1489 /* Init network device */
1490 @@ -1484,6 +1488,8 @@ dm9000_probe(struct platform_device *pdev)
1491
1492 db->dev = &pdev->dev;
1493 db->ndev = ndev;
1494 + if (!IS_ERR(power))
1495 + db->power_supply = power;
1496
1497 spin_lock_init(&db->lock);
1498 mutex_init(&db->addr_lock);
1499 @@ -1708,6 +1714,10 @@ out:
1500 dm9000_release_board(pdev, db);
1501 free_netdev(ndev);
1502
1503 +out_regulator_disable:
1504 + if (!IS_ERR(power))
1505 + regulator_disable(power);
1506 +
1507 return ret;
1508 }
1509
1510 @@ -1765,10 +1775,13 @@ static int
1511 dm9000_drv_remove(struct platform_device *pdev)
1512 {
1513 struct net_device *ndev = platform_get_drvdata(pdev);
1514 + struct board_info *dm = to_dm9000_board(ndev);
1515
1516 unregister_netdev(ndev);
1517 - dm9000_release_board(pdev, netdev_priv(ndev));
1518 + dm9000_release_board(pdev, dm);
1519 free_netdev(ndev); /* free device structure */
1520 + if (dm->power_supply)
1521 + regulator_disable(dm->power_supply);
1522
1523 dev_dbg(&pdev->dev, "released and freed device\n");
1524 return 0;
1525 diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
1526 index 4ef4d41b0d8d6..b77eaf31bd4ed 100644
1527 --- a/drivers/net/ethernet/freescale/enetc/enetc.c
1528 +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
1529 @@ -942,7 +942,7 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
1530 enetc_free_tx_ring(priv->tx_ring[i]);
1531 }
1532
1533 -static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1534 +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1535 {
1536 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1537
1538 @@ -963,7 +963,7 @@ static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1539 return 0;
1540 }
1541
1542 -static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1543 +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1544 {
1545 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1546
1547 @@ -971,7 +971,7 @@ static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1548 cbdr->bd_base = NULL;
1549 }
1550
1551 -static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1552 +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1553 {
1554 /* set CBDR cache attributes */
1555 enetc_wr(hw, ENETC_SICAR2,
1556 @@ -991,7 +991,7 @@ static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1557 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1558 }
1559
1560 -static void enetc_clear_cbdr(struct enetc_hw *hw)
1561 +void enetc_clear_cbdr(struct enetc_hw *hw)
1562 {
1563 enetc_wr(hw, ENETC_SICBDRMR, 0);
1564 }
1565 @@ -1016,13 +1016,12 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1566 return 0;
1567 }
1568
1569 -static int enetc_configure_si(struct enetc_ndev_priv *priv)
1570 +int enetc_configure_si(struct enetc_ndev_priv *priv)
1571 {
1572 struct enetc_si *si = priv->si;
1573 struct enetc_hw *hw = &si->hw;
1574 int err;
1575
1576 - enetc_setup_cbdr(hw, &si->cbd_ring);
1577 /* set SI cache attributes */
1578 enetc_wr(hw, ENETC_SICAR0,
1579 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1580 @@ -1068,6 +1067,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1581 if (err)
1582 return err;
1583
1584 + enetc_setup_cbdr(&si->hw, &si->cbd_ring);
1585 +
1586 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1587 GFP_KERNEL);
1588 if (!priv->cls_rules) {
1589 @@ -1075,14 +1076,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1590 goto err_alloc_cls;
1591 }
1592
1593 - err = enetc_configure_si(priv);
1594 - if (err)
1595 - goto err_config_si;
1596 -
1597 return 0;
1598
1599 -err_config_si:
1600 - kfree(priv->cls_rules);
1601 err_alloc_cls:
1602 enetc_clear_cbdr(&si->hw);
1603 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1604 diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
1605 index 541b4e2073fe3..b8801a2b6a025 100644
1606 --- a/drivers/net/ethernet/freescale/enetc/enetc.h
1607 +++ b/drivers/net/ethernet/freescale/enetc/enetc.h
1608 @@ -221,6 +221,7 @@ void enetc_get_si_caps(struct enetc_si *si);
1609 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
1610 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
1611 void enetc_free_si_resources(struct enetc_ndev_priv *priv);
1612 +int enetc_configure_si(struct enetc_ndev_priv *priv);
1613
1614 int enetc_open(struct net_device *ndev);
1615 int enetc_close(struct net_device *ndev);
1616 @@ -236,6 +237,10 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1617 void enetc_set_ethtool_ops(struct net_device *ndev);
1618
1619 /* control buffer descriptor ring (CBDR) */
1620 +int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
1621 +void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
1622 +void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr);
1623 +void enetc_clear_cbdr(struct enetc_hw *hw);
1624 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
1625 char *mac_addr, int si_map);
1626 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
1627 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1628 index 22f70638a4055..ac62464e0416a 100644
1629 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1630 +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
1631 @@ -854,6 +854,26 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
1632 return err;
1633 }
1634
1635 +static void enetc_init_unused_port(struct enetc_si *si)
1636 +{
1637 + struct device *dev = &si->pdev->dev;
1638 + struct enetc_hw *hw = &si->hw;
1639 + int err;
1640 +
1641 + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1642 + err = enetc_alloc_cbdr(dev, &si->cbd_ring);
1643 + if (err)
1644 + return;
1645 +
1646 + enetc_setup_cbdr(hw, &si->cbd_ring);
1647 +
1648 + enetc_init_port_rfs_memory(si);
1649 + enetc_init_port_rss_memory(si);
1650 +
1651 + enetc_clear_cbdr(hw);
1652 + enetc_free_cbdr(dev, &si->cbd_ring);
1653 +}
1654 +
1655 static int enetc_pf_probe(struct pci_dev *pdev,
1656 const struct pci_device_id *ent)
1657 {
1658 @@ -863,11 +883,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
1659 struct enetc_pf *pf;
1660 int err;
1661
1662 - if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
1663 - dev_info(&pdev->dev, "device is disabled, skipping\n");
1664 - return -ENODEV;
1665 - }
1666 -
1667 err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
1668 if (err) {
1669 dev_err(&pdev->dev, "PCI probing failed\n");
1670 @@ -881,6 +896,13 @@ static int enetc_pf_probe(struct pci_dev *pdev,
1671 goto err_map_pf_space;
1672 }
1673
1674 + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
1675 + enetc_init_unused_port(si);
1676 + dev_info(&pdev->dev, "device is disabled, skipping\n");
1677 + err = -ENODEV;
1678 + goto err_device_disabled;
1679 + }
1680 +
1681 pf = enetc_si_priv(si);
1682 pf->si = si;
1683 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
1684 @@ -920,6 +942,12 @@ static int enetc_pf_probe(struct pci_dev *pdev,
1685 goto err_init_port_rss;
1686 }
1687
1688 + err = enetc_configure_si(priv);
1689 + if (err) {
1690 + dev_err(&pdev->dev, "Failed to configure SI\n");
1691 + goto err_config_si;
1692 + }
1693 +
1694 err = enetc_alloc_msix(priv);
1695 if (err) {
1696 dev_err(&pdev->dev, "MSIX alloc failed\n");
1697 @@ -945,6 +973,7 @@ err_reg_netdev:
1698 enetc_mdio_remove(pf);
1699 enetc_of_put_phy(priv);
1700 enetc_free_msix(priv);
1701 +err_config_si:
1702 err_init_port_rss:
1703 err_init_port_rfs:
1704 err_alloc_msix:
1705 @@ -953,6 +982,7 @@ err_alloc_si_res:
1706 si->ndev = NULL;
1707 free_netdev(ndev);
1708 err_alloc_netdev:
1709 +err_device_disabled:
1710 err_map_pf_space:
1711 enetc_pci_remove(pdev);
1712
1713 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1714 index ebd21bf4cfa1e..3a8c2049b417c 100644
1715 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1716 +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
1717 @@ -189,6 +189,12 @@ static int enetc_vf_probe(struct pci_dev *pdev,
1718 goto err_alloc_si_res;
1719 }
1720
1721 + err = enetc_configure_si(priv);
1722 + if (err) {
1723 + dev_err(&pdev->dev, "Failed to configure SI\n");
1724 + goto err_config_si;
1725 + }
1726 +
1727 err = enetc_alloc_msix(priv);
1728 if (err) {
1729 dev_err(&pdev->dev, "MSIX alloc failed\n");
1730 @@ -208,6 +214,7 @@ static int enetc_vf_probe(struct pci_dev *pdev,
1731
1732 err_reg_netdev:
1733 enetc_free_msix(priv);
1734 +err_config_si:
1735 err_alloc_msix:
1736 enetc_free_si_resources(priv);
1737 err_alloc_si_res:
1738 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
1739 index 1426eb5ddf3df..e34e0854635c3 100644
1740 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
1741 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
1742 @@ -1018,16 +1018,16 @@ struct hclge_fd_tcam_config_3_cmd {
1743 #define HCLGE_FD_AD_DROP_B 0
1744 #define HCLGE_FD_AD_DIRECT_QID_B 1
1745 #define HCLGE_FD_AD_QID_S 2
1746 -#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
1747 +#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
1748 #define HCLGE_FD_AD_USE_COUNTER_B 12
1749 #define HCLGE_FD_AD_COUNTER_NUM_S 13
1750 #define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
1751 #define HCLGE_FD_AD_NXT_STEP_B 20
1752 #define HCLGE_FD_AD_NXT_KEY_S 21
1753 -#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
1754 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
1755 #define HCLGE_FD_AD_WR_RULE_ID_B 0
1756 #define HCLGE_FD_AD_RULE_ID_S 1
1757 -#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
1758 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1)
1759
1760 struct hclge_fd_ad_config_cmd {
1761 u8 stage;
1762 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1763 index 08040cafc06bc..93f3865b679bf 100644
1764 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1765 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1766 @@ -4908,9 +4908,9 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
1767 case BIT(INNER_SRC_MAC):
1768 for (i = 0; i < ETH_ALEN; i++) {
1769 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
1770 - rule->tuples.src_mac[i]);
1771 + rule->tuples_mask.src_mac[i]);
1772 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
1773 - rule->tuples.src_mac[i]);
1774 + rule->tuples_mask.src_mac[i]);
1775 }
1776
1777 return true;
1778 @@ -5939,8 +5939,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
1779 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
1780 fs->m_ext.vlan_tci =
1781 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
1782 - cpu_to_be16(VLAN_VID_MASK) :
1783 - cpu_to_be16(rule->tuples_mask.vlan_tag1);
1784 + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
1785 }
1786
1787 if (fs->flow_type & FLOW_MAC_EXT) {
1788 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1789 index 309cdc5ebc1ff..79b13750fa2d2 100644
1790 --- a/drivers/net/ethernet/ibm/ibmvnic.c
1791 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
1792 @@ -1753,10 +1753,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1793 if (!is_valid_ether_addr(addr->sa_data))
1794 return -EADDRNOTAVAIL;
1795
1796 - if (adapter->state != VNIC_PROBED) {
1797 - ether_addr_copy(adapter->mac_addr, addr->sa_data);
1798 + ether_addr_copy(adapter->mac_addr, addr->sa_data);
1799 + if (adapter->state != VNIC_PROBED)
1800 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1801 - }
1802
1803 return rc;
1804 }
1805 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1806 index 0604b5aaad86f..58211590229b1 100644
1807 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1808 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1809 @@ -15142,6 +15142,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1810 if (err) {
1811 dev_info(&pdev->dev,
1812 "setup of misc vector failed: %d\n", err);
1813 + i40e_cloud_filter_exit(pf);
1814 + i40e_fdir_teardown(pf);
1815 goto err_vsis;
1816 }
1817 }
1818 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
1819 index 113f6087c7c9a..b14b164c9601f 100644
1820 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
1821 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
1822 @@ -575,6 +575,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
1823 return -EINVAL;
1824 }
1825
1826 + if (xs->props.mode != XFRM_MODE_TRANSPORT) {
1827 + netdev_err(dev, "Unsupported mode for ipsec offload\n");
1828 + return -EINVAL;
1829 + }
1830 +
1831 if (ixgbe_ipsec_check_mgmt_ip(xs)) {
1832 netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
1833 return -EINVAL;
1834 diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
1835 index 5170dd9d8705b..caaea2c920a6e 100644
1836 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
1837 +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
1838 @@ -272,6 +272,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
1839 return -EINVAL;
1840 }
1841
1842 + if (xs->props.mode != XFRM_MODE_TRANSPORT) {
1843 + netdev_err(dev, "Unsupported mode for ipsec offload\n");
1844 + return -EINVAL;
1845 + }
1846 +
1847 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
1848 struct rx_sa rsa;
1849
1850 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1851 index a1202e53710cd..5582fba2f5823 100644
1852 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1853 +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1854 @@ -47,7 +47,7 @@
1855 #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
1856 #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
1857
1858 -static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
1859 +int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
1860 {
1861 int i, t;
1862 int err = 0;
1863 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1864 index b5eb116249dda..cd165e52ed33c 100644
1865 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1866 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1867 @@ -3657,6 +3657,8 @@ int mlx4_en_reset_config(struct net_device *dev,
1868 en_err(priv, "Failed starting port\n");
1869 }
1870
1871 + if (!err)
1872 + err = mlx4_en_moderation_update(priv);
1873 out:
1874 mutex_unlock(&mdev->state_lock);
1875 kfree(tmp);
1876 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1877 index a2f69c6f0c79f..17a5bd4c68b2b 100644
1878 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1879 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
1880 @@ -797,6 +797,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
1881 #define DEV_FEATURE_CHANGED(dev, new_features, feature) \
1882 ((dev->features & feature) ^ (new_features & feature))
1883
1884 +int mlx4_en_moderation_update(struct mlx4_en_priv *priv);
1885 int mlx4_en_reset_config(struct net_device *dev,
1886 struct hwtstamp_config ts_config,
1887 netdev_features_t new_features);
1888 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1889 index 18f86e441570c..a042f4607b0d0 100644
1890 --- a/drivers/net/ethernet/renesas/sh_eth.c
1891 +++ b/drivers/net/ethernet/renesas/sh_eth.c
1892 @@ -610,6 +610,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
1893 EESR_TDE,
1894 .fdr_value = 0x0000070f,
1895
1896 + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
1897 +
1898 .no_psr = 1,
1899 .apr = 1,
1900 .mpr = 1,
1901 @@ -828,6 +830,8 @@ static struct sh_eth_cpu_data r7s9210_data = {
1902
1903 .fdr_value = 0x0000070f,
1904
1905 + .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
1906 +
1907 .apr = 1,
1908 .mpr = 1,
1909 .tpauser = 1,
1910 @@ -1131,6 +1135,9 @@ static struct sh_eth_cpu_data sh771x_data = {
1911 EESIPR_CEEFIP | EESIPR_CELFIP |
1912 EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1913 EESIPR_PREIP | EESIPR_CERFIP,
1914 +
1915 + .trscer_err_mask = DESC_I_RINT8,
1916 +
1917 .tsu = 1,
1918 .dual_port = 1,
1919 };
1920 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1921 index 68c157979b947..a41ac13cc4e55 100644
1922 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1923 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1924 @@ -116,6 +116,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
1925 ioaddr + DMA_CHAN_INTR_ENA(chan));
1926 }
1927
1928 +static void dwmac410_dma_init_channel(void __iomem *ioaddr,
1929 + struct stmmac_dma_cfg *dma_cfg, u32 chan)
1930 +{
1931 + u32 value;
1932 +
1933 + /* common channel control register config */
1934 + value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
1935 + if (dma_cfg->pblx8)
1936 + value = value | DMA_BUS_MODE_PBL;
1937 +
1938 + writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
1939 +
1940 + /* Mask interrupts by writing to CSR7 */
1941 + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
1942 + ioaddr + DMA_CHAN_INTR_ENA(chan));
1943 +}
1944 +
1945 static void dwmac4_dma_init(void __iomem *ioaddr,
1946 struct stmmac_dma_cfg *dma_cfg, int atds)
1947 {
1948 @@ -462,7 +479,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
1949 const struct stmmac_dma_ops dwmac410_dma_ops = {
1950 .reset = dwmac4_dma_reset,
1951 .init = dwmac4_dma_init,
1952 - .init_chan = dwmac4_dma_init_channel,
1953 + .init_chan = dwmac410_dma_init_channel,
1954 .init_rx_chan = dwmac4_dma_init_rx_chan,
1955 .init_tx_chan = dwmac4_dma_init_tx_chan,
1956 .axi = dwmac4_dma_axi,
1957 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
1958 index f2a29a90e0854..afdea015f4b45 100644
1959 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
1960 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
1961 @@ -60,10 +60,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
1962
1963 value &= ~DMA_CONTROL_ST;
1964 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
1965 -
1966 - value = readl(ioaddr + GMAC_CONFIG);
1967 - value &= ~GMAC_CONFIG_TE;
1968 - writel(value, ioaddr + GMAC_CONFIG);
1969 }
1970
1971 void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
1972 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1973 index ce5d3e9e5dff4..8e7c60e02fa09 100644
1974 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1975 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1976 @@ -4821,6 +4821,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
1977 tx_q->cur_tx = 0;
1978 tx_q->dirty_tx = 0;
1979 tx_q->mss = 0;
1980 +
1981 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1982 }
1983 }
1984
1985 diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
1986 index a6bbe93f29ef6..917f37c176302 100644
1987 --- a/drivers/net/netdevsim/netdev.c
1988 +++ b/drivers/net/netdevsim/netdev.c
1989 @@ -292,6 +292,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
1990
1991 ns = netdev_priv(dev);
1992 ns->netdev = dev;
1993 + u64_stats_init(&ns->syncp);
1994 ns->nsim_dev = nsim_dev;
1995 ns->nsim_dev_port = nsim_dev_port;
1996 ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
1997 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1998 index b718b11607fcd..b0b8a3ce82b68 100644
1999 --- a/drivers/net/phy/phy.c
2000 +++ b/drivers/net/phy/phy.c
2001 @@ -345,15 +345,16 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
2002
2003 phydev->autoneg = autoneg;
2004
2005 - phydev->speed = speed;
2006 + if (autoneg == AUTONEG_DISABLE) {
2007 + phydev->speed = speed;
2008 + phydev->duplex = duplex;
2009 + }
2010
2011 linkmode_copy(phydev->advertising, advertising);
2012
2013 linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2014 phydev->advertising, autoneg == AUTONEG_ENABLE);
2015
2016 - phydev->duplex = duplex;
2017 -
2018 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
2019
2020 /* Restart the PHY */
2021 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2022 index 05b85b94d9518..6508d70056b3a 100644
2023 --- a/drivers/net/usb/qmi_wwan.c
2024 +++ b/drivers/net/usb/qmi_wwan.c
2025 @@ -441,13 +441,6 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c
2026 goto err;
2027 }
2028
2029 - /* we don't want to modify a running netdev */
2030 - if (netif_running(dev->net)) {
2031 - netdev_err(dev->net, "Cannot change a running device\n");
2032 - ret = -EBUSY;
2033 - goto err;
2034 - }
2035 -
2036 ret = qmimux_register_device(dev->net, mux_id);
2037 if (!ret) {
2038 info->flags |= QMI_WWAN_FLAG_MUX;
2039 @@ -477,13 +470,6 @@ static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, c
2040 if (!rtnl_trylock())
2041 return restart_syscall();
2042
2043 - /* we don't want to modify a running netdev */
2044 - if (netif_running(dev->net)) {
2045 - netdev_err(dev->net, "Cannot change a running device\n");
2046 - ret = -EBUSY;
2047 - goto err;
2048 - }
2049 -
2050 del_dev = qmimux_find_dev(dev, mux_id);
2051 if (!del_dev) {
2052 netdev_err(dev->net, "mux_id not present\n");
2053 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
2054 index 709e3de0f6af1..60f357d2f79fa 100644
2055 --- a/drivers/net/wan/lapbether.c
2056 +++ b/drivers/net/wan/lapbether.c
2057 @@ -283,7 +283,6 @@ static int lapbeth_open(struct net_device *dev)
2058 return -ENODEV;
2059 }
2060
2061 - netif_start_queue(dev);
2062 return 0;
2063 }
2064
2065 @@ -291,8 +290,6 @@ static int lapbeth_close(struct net_device *dev)
2066 {
2067 int err;
2068
2069 - netif_stop_queue(dev);
2070 -
2071 if ((err = lapb_unregister(dev)) != LAPB_OK)
2072 pr_err("lapb_unregister error: %d\n", err);
2073
2074 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
2075 index a412b352182c8..d50022d264642 100644
2076 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
2077 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
2078 @@ -177,7 +177,8 @@ struct ath_frame_info {
2079 s8 txq;
2080 u8 keyix;
2081 u8 rtscts_rate;
2082 - u8 retries : 7;
2083 + u8 retries : 6;
2084 + u8 dyn_smps : 1;
2085 u8 baw_tracked : 1;
2086 u8 tx_power;
2087 enum ath9k_key_type keytype:2;
2088 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2089 index 31e7b108279c6..14e6871a14054 100644
2090 --- a/drivers/net/wireless/ath/ath9k/xmit.c
2091 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2092 @@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
2093 is_40, is_sgi, is_sp);
2094 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
2095 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
2096 + if (rix >= 8 && fi->dyn_smps) {
2097 + info->rates[i].RateFlags |=
2098 + ATH9K_RATESERIES_RTS_CTS;
2099 + info->flags |= ATH9K_TXDESC_CTSENA;
2100 + }
2101
2102 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
2103 is_40, false);
2104 @@ -2111,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
2105 fi->keyix = an->ps_key;
2106 else
2107 fi->keyix = ATH9K_TXKEYIX_INVALID;
2108 + fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
2109 fi->keytype = keytype;
2110 fi->framelen = framelen;
2111 fi->tx_power = txpower;
2112 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
2113 index 781952b686ed2..d3efcbd48ee1e 100644
2114 --- a/drivers/net/wireless/mediatek/mt76/dma.c
2115 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
2116 @@ -454,13 +454,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
2117 {
2118 struct sk_buff *skb = q->rx_head;
2119 struct skb_shared_info *shinfo = skb_shinfo(skb);
2120 + int nr_frags = shinfo->nr_frags;
2121
2122 - if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
2123 + if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
2124 struct page *page = virt_to_head_page(data);
2125 int offset = data - page_address(page) + q->buf_offset;
2126
2127 - skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
2128 - q->buf_size);
2129 + skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
2130 } else {
2131 skb_free_frag(data);
2132 }
2133 @@ -469,7 +469,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
2134 return;
2135
2136 q->rx_head = NULL;
2137 - dev->drv->rx_skb(dev, q - dev->q_rx, skb);
2138 + if (nr_frags < ARRAY_SIZE(shinfo->frags))
2139 + dev->drv->rx_skb(dev, q - dev->q_rx, skb);
2140 + else
2141 + dev_kfree_skb(skb);
2142 }
2143
2144 static int
2145 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2146 index 95d77a17375e1..e4e24e7bf4628 100644
2147 --- a/drivers/nvme/host/core.c
2148 +++ b/drivers/nvme/host/core.c
2149 @@ -455,7 +455,6 @@ static void nvme_free_ns_head(struct kref *ref)
2150
2151 nvme_mpath_remove_disk(head);
2152 ida_simple_remove(&head->subsys->ns_ida, head->instance);
2153 - list_del_init(&head->entry);
2154 cleanup_srcu_struct(&head->srcu);
2155 nvme_put_subsystem(head->subsys);
2156 kfree(head);
2157 @@ -3374,7 +3373,6 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
2158
2159 list_for_each_entry(h, &subsys->nsheads, entry) {
2160 if (nvme_ns_ids_valid(&new->ids) &&
2161 - !list_empty(&h->list) &&
2162 nvme_ns_ids_equal(&new->ids, &h->ids))
2163 return -EINVAL;
2164 }
2165 @@ -3469,6 +3467,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
2166 "IDs don't match for shared namespace %d\n",
2167 nsid);
2168 ret = -EINVAL;
2169 + nvme_put_ns_head(head);
2170 goto out_unlock;
2171 }
2172 }
2173 @@ -3629,6 +3628,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2174 out_unlink_ns:
2175 mutex_lock(&ctrl->subsys->lock);
2176 list_del_rcu(&ns->siblings);
2177 + if (list_empty(&ns->head->list))
2178 + list_del_init(&ns->head->entry);
2179 mutex_unlock(&ctrl->subsys->lock);
2180 nvme_put_ns_head(ns->head);
2181 out_free_id:
2182 @@ -3651,7 +3652,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2183
2184 mutex_lock(&ns->ctrl->subsys->lock);
2185 list_del_rcu(&ns->siblings);
2186 + if (list_empty(&ns->head->list))
2187 + list_del_init(&ns->head->entry);
2188 mutex_unlock(&ns->ctrl->subsys->lock);
2189 +
2190 synchronize_rcu(); /* guarantee not available in head->list */
2191 nvme_mpath_clear_current_path(ns);
2192 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
2193 diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
2194 index f4c02da84e599..0bfa5065b4405 100644
2195 --- a/drivers/pci/controller/pci-xgene-msi.c
2196 +++ b/drivers/pci/controller/pci-xgene-msi.c
2197 @@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
2198 if (!msi_group->gic_irq)
2199 continue;
2200
2201 - irq_set_chained_handler(msi_group->gic_irq,
2202 - xgene_msi_isr);
2203 - err = irq_set_handler_data(msi_group->gic_irq, msi_group);
2204 - if (err) {
2205 - pr_err("failed to register GIC IRQ handler\n");
2206 - return -EINVAL;
2207 - }
2208 + irq_set_chained_handler_and_data(msi_group->gic_irq,
2209 + xgene_msi_isr, msi_group);
2210 +
2211 /*
2212 * Statically allocate MSI GIC IRQs to each CPU core.
2213 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
2214 diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
2215 index 626a7c352dfdf..728a59655825d 100644
2216 --- a/drivers/pci/controller/pcie-mediatek.c
2217 +++ b/drivers/pci/controller/pcie-mediatek.c
2218 @@ -1063,14 +1063,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
2219 err = of_pci_get_devfn(child);
2220 if (err < 0) {
2221 dev_err(dev, "failed to parse devfn: %d\n", err);
2222 - return err;
2223 + goto error_put_node;
2224 }
2225
2226 slot = PCI_SLOT(err);
2227
2228 err = mtk_pcie_parse_port(pcie, child, slot);
2229 if (err)
2230 - return err;
2231 + goto error_put_node;
2232 }
2233
2234 err = mtk_pcie_subsys_powerup(pcie);
2235 @@ -1086,6 +1086,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
2236 mtk_pcie_subsys_powerdown(pcie);
2237
2238 return 0;
2239 +error_put_node:
2240 + of_node_put(child);
2241 + return err;
2242 }
2243
2244 static int mtk_pcie_probe(struct platform_device *pdev)
2245 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2246 index 9add26438be50..3c3bc9f584983 100644
2247 --- a/drivers/pci/pci.c
2248 +++ b/drivers/pci/pci.c
2249 @@ -3903,6 +3903,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
2250 ret = logic_pio_register_range(range);
2251 if (ret)
2252 kfree(range);
2253 +
2254 + /* Ignore duplicates due to deferred probing */
2255 + if (ret == -EEXIST)
2256 + ret = 0;
2257 #endif
2258
2259 return ret;
2260 diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
2261 index f64b82824db28..2db7113383fdc 100644
2262 --- a/drivers/platform/olpc/olpc-ec.c
2263 +++ b/drivers/platform/olpc/olpc-ec.c
2264 @@ -426,11 +426,8 @@ static int olpc_ec_probe(struct platform_device *pdev)
2265
2266 /* get the EC revision */
2267 err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1);
2268 - if (err) {
2269 - ec_priv = NULL;
2270 - kfree(ec);
2271 - return err;
2272 - }
2273 + if (err)
2274 + goto error;
2275
2276 config.dev = pdev->dev.parent;
2277 config.driver_data = ec;
2278 @@ -440,12 +437,16 @@ static int olpc_ec_probe(struct platform_device *pdev)
2279 if (IS_ERR(ec->dcon_rdev)) {
2280 dev_err(&pdev->dev, "failed to register DCON regulator\n");
2281 err = PTR_ERR(ec->dcon_rdev);
2282 - kfree(ec);
2283 - return err;
2284 + goto error;
2285 }
2286
2287 ec->dbgfs_dir = olpc_ec_setup_debugfs();
2288
2289 + return 0;
2290 +
2291 +error:
2292 + ec_priv = NULL;
2293 + kfree(ec);
2294 return err;
2295 }
2296
2297 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
2298 index a0ebc2e603949..b577c8f7e3462 100644
2299 --- a/drivers/s390/block/dasd.c
2300 +++ b/drivers/s390/block/dasd.c
2301 @@ -3087,7 +3087,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
2302
2303 basedev = block->base;
2304 spin_lock_irq(&dq->lock);
2305 - if (basedev->state < DASD_STATE_READY) {
2306 + if (basedev->state < DASD_STATE_READY ||
2307 + test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
2308 DBF_DEV_EVENT(DBF_ERR, basedev,
2309 "device not ready for request %p", req);
2310 rc = BLK_STS_IOERR;
2311 @@ -3522,8 +3523,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
2312 struct dasd_device *device;
2313 struct dasd_block *block;
2314
2315 - cdev->handler = NULL;
2316 -
2317 device = dasd_device_from_cdev(cdev);
2318 if (IS_ERR(device)) {
2319 dasd_remove_sysfs_files(cdev);
2320 @@ -3542,6 +3541,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
2321 * no quite down yet.
2322 */
2323 dasd_set_target_state(device, DASD_STATE_NEW);
2324 + cdev->handler = NULL;
2325 /* dasd_delete_device destroys the device reference. */
2326 block = device->block;
2327 dasd_delete_device(device);
2328 diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
2329 index f0d71ab77c50e..15df0a5c03ecb 100644
2330 --- a/drivers/s390/cio/vfio_ccw_ops.c
2331 +++ b/drivers/s390/cio/vfio_ccw_ops.c
2332 @@ -506,7 +506,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
2333 if (ret)
2334 return ret;
2335
2336 - return copy_to_user((void __user *)arg, &info, minsz);
2337 + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
2338 }
2339 case VFIO_DEVICE_GET_REGION_INFO:
2340 {
2341 @@ -524,7 +524,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
2342 if (ret)
2343 return ret;
2344
2345 - return copy_to_user((void __user *)arg, &info, minsz);
2346 + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
2347 }
2348 case VFIO_DEVICE_GET_IRQ_INFO:
2349 {
2350 @@ -545,7 +545,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
2351 if (info.count == -1)
2352 return -EINVAL;
2353
2354 - return copy_to_user((void __user *)arg, &info, minsz);
2355 + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
2356 }
2357 case VFIO_DEVICE_SET_IRQS:
2358 {
2359 diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
2360 index 790b0b2b36272..1ec01148018f1 100644
2361 --- a/drivers/s390/crypto/vfio_ap_ops.c
2362 +++ b/drivers/s390/crypto/vfio_ap_ops.c
2363 @@ -1279,7 +1279,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
2364 info.num_regions = 0;
2365 info.num_irqs = 0;
2366
2367 - return copy_to_user((void __user *)arg, &info, minsz);
2368 + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
2369 }
2370
2371 static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
2372 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2373 index a14057c67a12a..c5b7d18513b66 100644
2374 --- a/drivers/scsi/libiscsi.c
2375 +++ b/drivers/scsi/libiscsi.c
2376 @@ -1532,14 +1532,9 @@ check_mgmt:
2377 }
2378 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
2379 if (rc) {
2380 - if (rc == -ENOMEM || rc == -EACCES) {
2381 - spin_lock_bh(&conn->taskqueuelock);
2382 - list_add_tail(&conn->task->running,
2383 - &conn->cmdqueue);
2384 - conn->task = NULL;
2385 - spin_unlock_bh(&conn->taskqueuelock);
2386 - goto done;
2387 - } else
2388 + if (rc == -ENOMEM || rc == -EACCES)
2389 + fail_scsi_task(conn->task, DID_IMM_RETRY);
2390 + else
2391 fail_scsi_task(conn->task, DID_ABORT);
2392 spin_lock_bh(&conn->taskqueuelock);
2393 continue;
2394 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
2395 index 8622cf9d3f640..9e7a6de3c43d1 100644
2396 --- a/drivers/spi/spi-stm32.c
2397 +++ b/drivers/spi/spi-stm32.c
2398 @@ -924,8 +924,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
2399 mask |= STM32H7_SPI_SR_RXP;
2400
2401 if (!(sr & mask)) {
2402 - dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
2403 - sr, ier);
2404 + dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
2405 + sr, ier);
2406 spin_unlock_irqrestore(&spi->lock, flags);
2407 return IRQ_NONE;
2408 }
2409 @@ -952,15 +952,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
2410 }
2411
2412 if (sr & STM32H7_SPI_SR_OVR) {
2413 - dev_warn(spi->dev, "Overrun: received value discarded\n");
2414 - if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
2415 - stm32h7_spi_read_rxfifo(spi, false);
2416 - /*
2417 - * If overrun is detected while using DMA, it means that
2418 - * something went wrong, so stop the current transfer
2419 - */
2420 - if (spi->cur_usedma)
2421 - end = true;
2422 + dev_err(spi->dev, "Overrun: RX data lost\n");
2423 + end = true;
2424 }
2425
2426 if (sr & STM32H7_SPI_SR_EOT) {
2427 diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
2428 index e035c9f757a1c..2176d3289eff5 100644
2429 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c
2430 +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
2431 @@ -260,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
2432 struct apci1032_private *devpriv = dev->private;
2433 struct comedi_subdevice *s = dev->read_subdev;
2434 unsigned int ctrl;
2435 + unsigned short val;
2436
2437 /* check interrupt is from this device */
2438 if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
2439 @@ -275,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
2440 outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
2441
2442 s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
2443 - comedi_buf_write_samples(s, &s->state, 1);
2444 + val = s->state;
2445 + comedi_buf_write_samples(s, &val, 1);
2446 comedi_handle_events(dev, s);
2447
2448 /* enable the interrupt */
2449 diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
2450 index 816dd25b9d0e4..8c3eff7cf465c 100644
2451 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c
2452 +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
2453 @@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
2454 struct comedi_device *dev = d;
2455 struct apci1500_private *devpriv = dev->private;
2456 struct comedi_subdevice *s = dev->read_subdev;
2457 - unsigned int status = 0;
2458 + unsigned short status = 0;
2459 unsigned int val;
2460
2461 val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
2462 @@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
2463 *
2464 * Mask Meaning
2465 * ---------- ------------------------------------------
2466 - * 0x00000001 Event 1 has occurred
2467 - * 0x00000010 Event 2 has occurred
2468 - * 0x00000100 Counter/timer 1 has run down (not implemented)
2469 - * 0x00001000 Counter/timer 2 has run down (not implemented)
2470 - * 0x00010000 Counter 3 has run down (not implemented)
2471 - * 0x00100000 Watchdog has run down (not implemented)
2472 - * 0x01000000 Voltage error
2473 - * 0x10000000 Short-circuit error
2474 + * 0b00000001 Event 1 has occurred
2475 + * 0b00000010 Event 2 has occurred
2476 + * 0b00000100 Counter/timer 1 has run down (not implemented)
2477 + * 0b00001000 Counter/timer 2 has run down (not implemented)
2478 + * 0b00010000 Counter 3 has run down (not implemented)
2479 + * 0b00100000 Watchdog has run down (not implemented)
2480 + * 0b01000000 Voltage error
2481 + * 0b10000000 Short-circuit error
2482 */
2483 comedi_buf_write_samples(s, &status, 1);
2484 comedi_handle_events(dev, s);
2485 diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
2486 index ddc0dc93d08b6..eca5fa8a9eb8e 100644
2487 --- a/drivers/staging/comedi/drivers/adv_pci1710.c
2488 +++ b/drivers/staging/comedi/drivers/adv_pci1710.c
2489 @@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev,
2490 static int pci1710_ai_read_sample(struct comedi_device *dev,
2491 struct comedi_subdevice *s,
2492 unsigned int cur_chan,
2493 - unsigned int *val)
2494 + unsigned short *val)
2495 {
2496 const struct boardtype *board = dev->board_ptr;
2497 struct pci1710_private *devpriv = dev->private;
2498 - unsigned int sample;
2499 + unsigned short sample;
2500 unsigned int chan;
2501
2502 sample = inw(dev->iobase + PCI171X_AD_DATA_REG);
2503 @@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev,
2504 pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1);
2505
2506 for (i = 0; i < insn->n; i++) {
2507 - unsigned int val;
2508 + unsigned short val;
2509
2510 /* start conversion */
2511 outw(0, dev->iobase + PCI171X_SOFTTRG_REG);
2512 @@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
2513 {
2514 struct comedi_cmd *cmd = &s->async->cmd;
2515 unsigned int status;
2516 - unsigned int val;
2517 + unsigned short val;
2518 int ret;
2519
2520 status = inw(dev->iobase + PCI171X_STATUS_REG);
2521 @@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev,
2522 }
2523
2524 for (i = 0; i < devpriv->max_samples; i++) {
2525 - unsigned int val;
2526 + unsigned short val;
2527 int ret;
2528
2529 ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val);
2530 diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
2531 index f99211ec46deb..0034005bdf8f1 100644
2532 --- a/drivers/staging/comedi/drivers/das6402.c
2533 +++ b/drivers/staging/comedi/drivers/das6402.c
2534 @@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d)
2535 if (status & DAS6402_STATUS_FFULL) {
2536 async->events |= COMEDI_CB_OVERFLOW;
2537 } else if (status & DAS6402_STATUS_FFNE) {
2538 - unsigned int val;
2539 + unsigned short val;
2540
2541 val = das6402_ai_read_sample(dev, s);
2542 comedi_buf_write_samples(s, &val, 1);
2543 diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
2544 index 8cf09ef3012fa..4bd8fd5218c8f 100644
2545 --- a/drivers/staging/comedi/drivers/das800.c
2546 +++ b/drivers/staging/comedi/drivers/das800.c
2547 @@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d)
2548 struct comedi_cmd *cmd;
2549 unsigned long irq_flags;
2550 unsigned int status;
2551 - unsigned int val;
2552 + unsigned short val;
2553 bool fifo_empty;
2554 bool fifo_overflow;
2555 int i;
2556 diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
2557 index 75693cdde3138..c180d18ce517f 100644
2558 --- a/drivers/staging/comedi/drivers/dmm32at.c
2559 +++ b/drivers/staging/comedi/drivers/dmm32at.c
2560 @@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
2561 {
2562 struct comedi_device *dev = d;
2563 unsigned char intstat;
2564 - unsigned int val;
2565 + unsigned short val;
2566 int i;
2567
2568 if (!dev->attached) {
2569 diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
2570 index ee53571a89698..ead8000b5929b 100644
2571 --- a/drivers/staging/comedi/drivers/me4000.c
2572 +++ b/drivers/staging/comedi/drivers/me4000.c
2573 @@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
2574 struct comedi_subdevice *s = dev->read_subdev;
2575 int i;
2576 int c = 0;
2577 - unsigned int lval;
2578 + unsigned short lval;
2579
2580 if (!dev->attached)
2581 return IRQ_NONE;
2582 diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
2583 index a5937206bf1cd..e9abae4180625 100644
2584 --- a/drivers/staging/comedi/drivers/pcl711.c
2585 +++ b/drivers/staging/comedi/drivers/pcl711.c
2586 @@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d)
2587 struct comedi_device *dev = d;
2588 struct comedi_subdevice *s = dev->read_subdev;
2589 struct comedi_cmd *cmd = &s->async->cmd;
2590 - unsigned int data;
2591 + unsigned short data;
2592
2593 if (!dev->attached) {
2594 dev_err(dev->class_dev, "spurious interrupt\n");
2595 diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
2596 index 0af5315d43575..fc8afffc1815f 100644
2597 --- a/drivers/staging/comedi/drivers/pcl818.c
2598 +++ b/drivers/staging/comedi/drivers/pcl818.c
2599 @@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev,
2600
2601 static bool pcl818_ai_write_sample(struct comedi_device *dev,
2602 struct comedi_subdevice *s,
2603 - unsigned int chan, unsigned int val)
2604 + unsigned int chan, unsigned short val)
2605 {
2606 struct pcl818_private *devpriv = dev->private;
2607 struct comedi_cmd *cmd = &s->async->cmd;
2608 diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
2609 index 3cffc8be66563..e61bd8e1d246f 100644
2610 --- a/drivers/staging/ks7010/ks_wlan_net.c
2611 +++ b/drivers/staging/ks7010/ks_wlan_net.c
2612 @@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev,
2613 {
2614 struct ks_wlan_private *priv = netdev_priv(dev);
2615 struct iw_scan_req *req = NULL;
2616 + int len;
2617
2618 if (priv->sleep_mode == SLP_SLEEP)
2619 return -EPERM;
2620 @@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev,
2621 if (wrqu->data.length == sizeof(struct iw_scan_req) &&
2622 wrqu->data.flags & IW_SCAN_THIS_ESSID) {
2623 req = (struct iw_scan_req *)extra;
2624 - priv->scan_ssid_len = req->essid_len;
2625 - memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
2626 + len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
2627 + priv->scan_ssid_len = len;
2628 + memcpy(priv->scan_ssid, req->essid, len);
2629 } else {
2630 priv->scan_ssid_len = 0;
2631 }
2632 diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
2633 index 51a5b71f8c256..1c0300ce63369 100644
2634 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c
2635 +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
2636 @@ -784,6 +784,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
2637 /* SSID */
2638 p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
2639 if (p && ie_len > 0) {
2640 + ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid));
2641 memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
2642 memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
2643 pbss_network->ssid.ssid_length = ie_len;
2644 @@ -802,6 +803,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
2645 /* get supported rates */
2646 p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
2647 if (p) {
2648 + ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX);
2649 memcpy(supportRate, p + 2, ie_len);
2650 supportRateNum = ie_len;
2651 }
2652 @@ -809,6 +811,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
2653 /* get ext_supported rates */
2654 p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
2655 if (p) {
2656 + ie_len = min_t(int, ie_len,
2657 + NDIS_802_11_LENGTH_RATES_EX - supportRateNum);
2658 memcpy(supportRate + supportRateNum, p + 2, ie_len);
2659 supportRateNum += ie_len;
2660 }
2661 @@ -922,6 +926,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
2662
2663 pht_cap->mcs.rx_mask[0] = 0xff;
2664 pht_cap->mcs.rx_mask[1] = 0x0;
2665 + ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap));
2666 memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
2667 }
2668
2669 diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2670 index 630e7d933b104..7b83f0920f3c8 100644
2671 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2672 +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2673 @@ -1160,9 +1160,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
2674 break;
2675 }
2676 sec_len = *(pos++); len -= 1;
2677 - if (sec_len > 0 && sec_len <= len) {
2678 + if (sec_len > 0 &&
2679 + sec_len <= len &&
2680 + sec_len <= 32) {
2681 ssid[ssid_index].ssid_length = sec_len;
2682 - memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length);
2683 + memcpy(ssid[ssid_index].ssid, pos, sec_len);
2684 ssid_index++;
2685 }
2686 pos += sec_len;
2687 diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
2688 index 16bcee13f64b5..407effde5e71a 100644
2689 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
2690 +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
2691 @@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
2692 struct iw_scan_req *req = (struct iw_scan_req *)b;
2693
2694 if (req->essid_len) {
2695 - ieee->current_network.ssid_len = req->essid_len;
2696 - memcpy(ieee->current_network.ssid, req->essid,
2697 - req->essid_len);
2698 + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
2699 +
2700 + ieee->current_network.ssid_len = len;
2701 + memcpy(ieee->current_network.ssid, req->essid, len);
2702 }
2703 }
2704
2705 diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
2706 index 5822bb7984b91..8f10672ade37e 100644
2707 --- a/drivers/staging/rtl8192u/r8192U_wx.c
2708 +++ b/drivers/staging/rtl8192u/r8192U_wx.c
2709 @@ -333,8 +333,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
2710 struct iw_scan_req *req = (struct iw_scan_req *)b;
2711
2712 if (req->essid_len) {
2713 - ieee->current_network.ssid_len = req->essid_len;
2714 - memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
2715 + int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
2716 +
2717 + ieee->current_network.ssid_len = len;
2718 + memcpy(ieee->current_network.ssid, req->essid, len);
2719 }
2720 }
2721
2722 diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
2723 index 26b618008fcfe..d989229de88bd 100644
2724 --- a/drivers/staging/rtl8712/rtl871x_cmd.c
2725 +++ b/drivers/staging/rtl8712/rtl871x_cmd.c
2726 @@ -197,8 +197,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
2727 psurveyPara->ss_ssidlen = 0;
2728 memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1);
2729 if ((pssid != NULL) && (pssid->SsidLength)) {
2730 - memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength);
2731 - psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength);
2732 + int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE);
2733 +
2734 + memcpy(psurveyPara->ss_ssid, pssid->Ssid, len);
2735 + psurveyPara->ss_ssidlen = cpu_to_le32(len);
2736 }
2737 set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
2738 r8712_enqueue_cmd(pcmdpriv, ph2c);
2739 diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
2740 index 944336e0d2e2f..cff918d8bcb54 100644
2741 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
2742 +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
2743 @@ -928,7 +928,7 @@ static int r871x_wx_set_priv(struct net_device *dev,
2744 struct iw_point *dwrq = (struct iw_point *)awrq;
2745
2746 len = dwrq->length;
2747 - ext = memdup_user(dwrq->pointer, len);
2748 + ext = strndup_user(dwrq->pointer, len);
2749 if (IS_ERR(ext))
2750 return PTR_ERR(ext);
2751
2752 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
2753 index 5e931690e6979..51e690ab4d295 100644
2754 --- a/drivers/target/target_core_pr.c
2755 +++ b/drivers/target/target_core_pr.c
2756 @@ -3731,6 +3731,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
2757 spin_unlock(&dev->t10_pr.registration_lock);
2758
2759 put_unaligned_be32(add_len, &buf[4]);
2760 + target_set_cmd_data_length(cmd, 8 + add_len);
2761
2762 transport_kunmap_data_sg(cmd);
2763
2764 @@ -3749,7 +3750,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
2765 struct t10_pr_registration *pr_reg;
2766 unsigned char *buf;
2767 u64 pr_res_key;
2768 - u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
2769 + u32 add_len = 0;
2770
2771 if (cmd->data_length < 8) {
2772 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
2773 @@ -3767,8 +3768,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
2774 pr_reg = dev->dev_pr_res_holder;
2775 if (pr_reg) {
2776 /*
2777 - * Set the hardcoded Additional Length
2778 + * Set the Additional Length to 16 when a reservation is held
2779 */
2780 + add_len = 16;
2781 put_unaligned_be32(add_len, &buf[4]);
2782
2783 if (cmd->data_length < 22)
2784 @@ -3804,6 +3806,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
2785 (pr_reg->pr_res_type & 0x0f);
2786 }
2787
2788 + target_set_cmd_data_length(cmd, 8 + add_len);
2789 +
2790 err:
2791 spin_unlock(&dev->dev_reservation_lock);
2792 transport_kunmap_data_sg(cmd);
2793 @@ -3822,7 +3826,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
2794 struct se_device *dev = cmd->se_dev;
2795 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2796 unsigned char *buf;
2797 - u16 add_len = 8; /* Hardcoded to 8. */
2798 + u16 len = 8; /* Hardcoded to 8. */
2799
2800 if (cmd->data_length < 6) {
2801 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
2802 @@ -3834,7 +3838,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
2803 if (!buf)
2804 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2805
2806 - put_unaligned_be16(add_len, &buf[0]);
2807 + put_unaligned_be16(len, &buf[0]);
2808 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
2809 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
2810 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
2811 @@ -3863,6 +3867,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
2812 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
2813 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
2814
2815 + target_set_cmd_data_length(cmd, len);
2816 +
2817 transport_kunmap_data_sg(cmd);
2818
2819 return 0;
2820 @@ -4023,6 +4029,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
2821 * Set ADDITIONAL_LENGTH
2822 */
2823 put_unaligned_be32(add_len, &buf[4]);
2824 + target_set_cmd_data_length(cmd, 8 + add_len);
2825
2826 transport_kunmap_data_sg(cmd);
2827
2828 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2829 index b1f4be055f838..a16835c0bb1dd 100644
2830 --- a/drivers/target/target_core_transport.c
2831 +++ b/drivers/target/target_core_transport.c
2832 @@ -873,11 +873,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
2833 }
2834 EXPORT_SYMBOL(target_complete_cmd);
2835
2836 -void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
2837 +void target_set_cmd_data_length(struct se_cmd *cmd, int length)
2838 {
2839 - if ((scsi_status == SAM_STAT_GOOD ||
2840 - cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2841 - length < cmd->data_length) {
2842 + if (length < cmd->data_length) {
2843 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2844 cmd->residual_count += cmd->data_length - length;
2845 } else {
2846 @@ -887,6 +885,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
2847
2848 cmd->data_length = length;
2849 }
2850 +}
2851 +EXPORT_SYMBOL(target_set_cmd_data_length);
2852 +
2853 +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
2854 +{
2855 + if (scsi_status == SAM_STAT_GOOD ||
2856 + cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
2857 + target_set_cmd_data_length(cmd, length);
2858 + }
2859
2860 target_complete_cmd(cmd, scsi_status);
2861 }
2862 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2863 index 681374a3b3684..00bfc81f24702 100644
2864 --- a/drivers/usb/class/cdc-acm.c
2865 +++ b/drivers/usb/class/cdc-acm.c
2866 @@ -1941,6 +1941,11 @@ static const struct usb_device_id acm_ids[] = {
2867 .driver_info = SEND_ZERO_PACKET,
2868 },
2869
2870 + /* Exclude Goodix Fingerprint Reader */
2871 + { USB_DEVICE(0x27c6, 0x5395),
2872 + .driver_info = IGNORE_DEVICE,
2873 + },
2874 +
2875 /* control interfaces without any protocol set */
2876 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
2877 USB_CDC_PROTO_NONE) },
2878 diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
2879 index c9f6e97582885..f27b4aecff3d4 100644
2880 --- a/drivers/usb/class/usblp.c
2881 +++ b/drivers/usb/class/usblp.c
2882 @@ -494,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file)
2883 /* No kernel lock - fine */
2884 static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
2885 {
2886 - __poll_t ret;
2887 + struct usblp *usblp = file->private_data;
2888 + __poll_t ret = 0;
2889 unsigned long flags;
2890
2891 - struct usblp *usblp = file->private_data;
2892 /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */
2893 poll_wait(file, &usblp->rwait, wait);
2894 poll_wait(file, &usblp->wwait, wait);
2895 +
2896 + mutex_lock(&usblp->mut);
2897 + if (!usblp->present)
2898 + ret |= EPOLLHUP;
2899 + mutex_unlock(&usblp->mut);
2900 +
2901 spin_lock_irqsave(&usblp->lock, flags);
2902 - ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) |
2903 - ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0);
2904 + if (usblp->bidir && usblp->rcomplete)
2905 + ret |= EPOLLIN | EPOLLRDNORM;
2906 + if (usblp->no_paper || usblp->wcomplete)
2907 + ret |= EPOLLOUT | EPOLLWRNORM;
2908 spin_unlock_irqrestore(&usblp->lock, flags);
2909 return ret;
2910 }
2911 diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
2912 index 261af9e38dddf..7874b97e33227 100644
2913 --- a/drivers/usb/dwc3/dwc3-qcom.c
2914 +++ b/drivers/usb/dwc3/dwc3-qcom.c
2915 @@ -251,8 +251,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
2916 for (i = qcom->num_clocks - 1; i >= 0; i--)
2917 clk_disable_unprepare(qcom->clks[i]);
2918
2919 + if (device_may_wakeup(qcom->dev))
2920 + dwc3_qcom_enable_interrupts(qcom);
2921 +
2922 qcom->is_suspended = true;
2923 - dwc3_qcom_enable_interrupts(qcom);
2924
2925 return 0;
2926 }
2927 @@ -265,7 +267,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
2928 if (!qcom->is_suspended)
2929 return 0;
2930
2931 - dwc3_qcom_disable_interrupts(qcom);
2932 + if (device_may_wakeup(qcom->dev))
2933 + dwc3_qcom_disable_interrupts(qcom);
2934
2935 for (i = 0; i < qcom->num_clocks; i++) {
2936 ret = clk_prepare_enable(qcom->clks[i]);
2937 @@ -528,16 +531,19 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
2938 ret = of_platform_populate(np, NULL, NULL, dev);
2939 if (ret) {
2940 dev_err(dev, "failed to register dwc3 core - %d\n", ret);
2941 - return ret;
2942 + goto node_put;
2943 }
2944
2945 qcom->dwc3 = of_find_device_by_node(dwc3_np);
2946 if (!qcom->dwc3) {
2947 + ret = -ENODEV;
2948 dev_err(dev, "failed to get dwc3 platform device\n");
2949 - return -ENODEV;
2950 }
2951
2952 - return 0;
2953 +node_put:
2954 + of_node_put(dwc3_np);
2955 +
2956 + return ret;
2957 }
2958
2959 static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
2960 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
2961 index 00d346965f7a5..560382e0a8f38 100644
2962 --- a/drivers/usb/gadget/function/f_uac1.c
2963 +++ b/drivers/usb/gadget/function/f_uac1.c
2964 @@ -499,6 +499,7 @@ static void f_audio_disable(struct usb_function *f)
2965 uac1->as_out_alt = 0;
2966 uac1->as_in_alt = 0;
2967
2968 + u_audio_stop_playback(&uac1->g_audio);
2969 u_audio_stop_capture(&uac1->g_audio);
2970 }
2971
2972 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
2973 index 5d960b6603b6f..6f03e944e0e31 100644
2974 --- a/drivers/usb/gadget/function/f_uac2.c
2975 +++ b/drivers/usb/gadget/function/f_uac2.c
2976 @@ -478,7 +478,7 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
2977 }
2978
2979 max_size_bw = num_channels(chmask) * ssize *
2980 - DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
2981 + ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1);
2982 ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
2983 max_size_ep));
2984
2985 diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
2986 index d8b92485b727e..5b1d4771d44f4 100644
2987 --- a/drivers/usb/gadget/function/u_ether_configfs.h
2988 +++ b/drivers/usb/gadget/function/u_ether_configfs.h
2989 @@ -169,12 +169,11 @@ out: \
2990 size_t len) \
2991 { \
2992 struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
2993 - int ret; \
2994 + int ret = -EINVAL; \
2995 u8 val; \
2996 \
2997 mutex_lock(&opts->lock); \
2998 - ret = sscanf(page, "%02hhx", &val); \
2999 - if (ret > 0) { \
3000 + if (sscanf(page, "%02hhx", &val) > 0) { \
3001 opts->_n_ = val; \
3002 ret = len; \
3003 } \
3004 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3005 index d08b0079eecb1..71ef473df585f 100644
3006 --- a/drivers/usb/host/xhci-pci.c
3007 +++ b/drivers/usb/host/xhci-pci.c
3008 @@ -62,6 +62,7 @@
3009 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
3010 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
3011 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
3012 +#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
3013
3014 static const char hcd_name[] = "xhci_hcd";
3015
3016 @@ -258,11 +259,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3017 pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
3018 xhci->quirks |= XHCI_BROKEN_STREAMS;
3019 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
3020 - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
3021 + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
3022 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
3023 + xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
3024 + }
3025 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
3026 (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
3027 - pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI))
3028 + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI ||
3029 + pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI))
3030 xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
3031
3032 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
3033 @@ -277,6 +281,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3034 pdev->device == 0x9026)
3035 xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
3036
3037 + if (pdev->vendor == PCI_VENDOR_ID_AMD &&
3038 + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 ||
3039 + pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
3040 + xhci->quirks |= XHCI_NO_SOFT_RETRY;
3041 +
3042 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3043 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3044 "QUIRK: Resetting on resume");
3045 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3046 index 900ea91fb3c6b..f6b5010deb735 100644
3047 --- a/drivers/usb/host/xhci-ring.c
3048 +++ b/drivers/usb/host/xhci-ring.c
3049 @@ -2299,7 +2299,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
3050 remaining = 0;
3051 break;
3052 case COMP_USB_TRANSACTION_ERROR:
3053 - if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
3054 + if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
3055 + (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
3056 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
3057 break;
3058 *status = 0;
3059 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3060 index 91330517444e7..b5080bc1689e4 100644
3061 --- a/drivers/usb/host/xhci.c
3062 +++ b/drivers/usb/host/xhci.c
3063 @@ -883,44 +883,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
3064 xhci_set_cmd_ring_deq(xhci);
3065 }
3066
3067 -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
3068 +/*
3069 + * Disable port wake bits if do_wakeup is not set.
3070 + *
3071 + * Also clear a possible internal port wake state left hanging for ports that
3072 + * detected termination but never successfully enumerated (trained to 0U).
3073 + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
3074 + * at enumeration clears this wake, force one here as well for unconnected ports
3075 + */
3076 +
3077 +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
3078 + struct xhci_hub *rhub,
3079 + bool do_wakeup)
3080 {
3081 - struct xhci_port **ports;
3082 - int port_index;
3083 unsigned long flags;
3084 u32 t1, t2, portsc;
3085 + int i;
3086
3087 spin_lock_irqsave(&xhci->lock, flags);
3088
3089 - /* disable usb3 ports Wake bits */
3090 - port_index = xhci->usb3_rhub.num_ports;
3091 - ports = xhci->usb3_rhub.ports;
3092 - while (port_index--) {
3093 - t1 = readl(ports[port_index]->addr);
3094 - portsc = t1;
3095 - t1 = xhci_port_state_to_neutral(t1);
3096 - t2 = t1 & ~PORT_WAKE_BITS;
3097 - if (t1 != t2) {
3098 - writel(t2, ports[port_index]->addr);
3099 - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
3100 - xhci->usb3_rhub.hcd->self.busnum,
3101 - port_index + 1, portsc, t2);
3102 - }
3103 - }
3104 + for (i = 0; i < rhub->num_ports; i++) {
3105 + portsc = readl(rhub->ports[i]->addr);
3106 + t1 = xhci_port_state_to_neutral(portsc);
3107 + t2 = t1;
3108 +
3109 + /* clear wake bits if do_wake is not set */
3110 + if (!do_wakeup)
3111 + t2 &= ~PORT_WAKE_BITS;
3112 +
3113 + /* Don't touch csc bit if connected or connect change is set */
3114 + if (!(portsc & (PORT_CSC | PORT_CONNECT)))
3115 + t2 |= PORT_CSC;
3116
3117 - /* disable usb2 ports Wake bits */
3118 - port_index = xhci->usb2_rhub.num_ports;
3119 - ports = xhci->usb2_rhub.ports;
3120 - while (port_index--) {
3121 - t1 = readl(ports[port_index]->addr);
3122 - portsc = t1;
3123 - t1 = xhci_port_state_to_neutral(t1);
3124 - t2 = t1 & ~PORT_WAKE_BITS;
3125 if (t1 != t2) {
3126 - writel(t2, ports[port_index]->addr);
3127 - xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
3128 - xhci->usb2_rhub.hcd->self.busnum,
3129 - port_index + 1, portsc, t2);
3130 + writel(t2, rhub->ports[i]->addr);
3131 + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
3132 + rhub->hcd->self.busnum, i + 1, portsc, t2);
3133 }
3134 }
3135 spin_unlock_irqrestore(&xhci->lock, flags);
3136 @@ -983,8 +981,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
3137 return -EINVAL;
3138
3139 /* Clear root port wake on bits if wakeup not allowed. */
3140 - if (!do_wakeup)
3141 - xhci_disable_port_wake_on_bits(xhci);
3142 + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
3143 + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
3144
3145 if (!HCD_HW_ACCESSIBLE(hcd))
3146 return 0;
3147 @@ -1088,6 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3148 struct usb_hcd *secondary_hcd;
3149 int retval = 0;
3150 bool comp_timer_running = false;
3151 + bool pending_portevent = false;
3152
3153 if (!hcd->state)
3154 return 0;
3155 @@ -1226,13 +1225,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3156
3157 done:
3158 if (retval == 0) {
3159 - /* Resume root hubs only when have pending events. */
3160 - if (xhci_pending_portevent(xhci)) {
3161 + /*
3162 + * Resume roothubs only if there are pending events.
3163 + * USB 3 devices resend U3 LFPS wake after a 100ms delay if
3164 + * the first wake signalling failed, give it that chance.
3165 + */
3166 + pending_portevent = xhci_pending_portevent(xhci);
3167 + if (!pending_portevent) {
3168 + msleep(120);
3169 + pending_portevent = xhci_pending_portevent(xhci);
3170 + }
3171 +
3172 + if (pending_portevent) {
3173 usb_hcd_resume_root_hub(xhci->shared_hcd);
3174 usb_hcd_resume_root_hub(hcd);
3175 }
3176 }
3177 -
3178 /*
3179 * If system is subject to the Quirk, Compliance Mode Timer needs to
3180 * be re-initialized Always after a system resume. Ports are subject
3181 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3182 index 1ad1d6e9e9979..8798ed0317864 100644
3183 --- a/drivers/usb/host/xhci.h
3184 +++ b/drivers/usb/host/xhci.h
3185 @@ -1875,6 +1875,7 @@ struct xhci_hcd {
3186 #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
3187 #define XHCI_SKIP_PHY_INIT BIT_ULL(37)
3188 #define XHCI_DISABLE_SPARSE BIT_ULL(38)
3189 +#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
3190
3191 unsigned int num_active_eps;
3192 unsigned int limit_active_eps;
3193 diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
3194 index 9e5afdde1adbf..40576e7176d8b 100644
3195 --- a/drivers/usb/renesas_usbhs/pipe.c
3196 +++ b/drivers/usb/renesas_usbhs/pipe.c
3197 @@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
3198
3199 void usbhs_pipe_free(struct usbhs_pipe *pipe)
3200 {
3201 + usbhsp_pipe_select(pipe);
3202 + usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0);
3203 usbhsp_put_pipe(pipe);
3204 }
3205
3206 diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3207 index fdaefbe924908..a82ba9cc0c724 100644
3208 --- a/drivers/usb/serial/ch341.c
3209 +++ b/drivers/usb/serial/ch341.c
3210 @@ -85,6 +85,7 @@ static const struct usb_device_id id_table[] = {
3211 { USB_DEVICE(0x1a86, 0x7522) },
3212 { USB_DEVICE(0x1a86, 0x7523) },
3213 { USB_DEVICE(0x4348, 0x5523) },
3214 + { USB_DEVICE(0x9986, 0x7523) },
3215 { },
3216 };
3217 MODULE_DEVICE_TABLE(usb, id_table);
3218 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
3219 index 361a2e3ccad8d..caf27a0d51f03 100644
3220 --- a/drivers/usb/serial/cp210x.c
3221 +++ b/drivers/usb/serial/cp210x.c
3222 @@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
3223 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
3224 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
3225 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
3226 + { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */
3227 { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
3228 { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
3229 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
3230 @@ -202,6 +203,8 @@ static const struct usb_device_id id_table[] = {
3231 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
3232 { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
3233 { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
3234 + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
3235 + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
3236 { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
3237 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
3238 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
3239 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
3240 index 4cca0b836f430..3b4d1ff9033dd 100644
3241 --- a/drivers/usb/serial/io_edgeport.c
3242 +++ b/drivers/usb/serial/io_edgeport.c
3243 @@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial)
3244 response = -ENODEV;
3245 }
3246
3247 - usb_free_urb(edge_serial->interrupt_read_urb);
3248 - kfree(edge_serial->interrupt_in_buffer);
3249 -
3250 - usb_free_urb(edge_serial->read_urb);
3251 - kfree(edge_serial->bulk_in_buffer);
3252 -
3253 - kfree(edge_serial);
3254 -
3255 - return response;
3256 + goto error;
3257 }
3258
3259 /* start interrupt read for this edgeport this interrupt will
3260 * continue as long as the edgeport is connected */
3261 response = usb_submit_urb(edge_serial->interrupt_read_urb,
3262 GFP_KERNEL);
3263 - if (response)
3264 + if (response) {
3265 dev_err(ddev, "%s - Error %d submitting control urb\n",
3266 __func__, response);
3267 +
3268 + goto error;
3269 + }
3270 }
3271 return response;
3272 +
3273 +error:
3274 + usb_free_urb(edge_serial->interrupt_read_urb);
3275 + kfree(edge_serial->interrupt_in_buffer);
3276 +
3277 + usb_free_urb(edge_serial->read_urb);
3278 + kfree(edge_serial->bulk_in_buffer);
3279 +
3280 + kfree(edge_serial);
3281 +
3282 + return response;
3283 }
3284
3285
3286 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
3287 index 2305d425e6c9a..8f1de1fbbeedf 100644
3288 --- a/drivers/usb/usbip/stub_dev.c
3289 +++ b/drivers/usb/usbip/stub_dev.c
3290 @@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3291 int sockfd = 0;
3292 struct socket *socket;
3293 int rv;
3294 + struct task_struct *tcp_rx = NULL;
3295 + struct task_struct *tcp_tx = NULL;
3296
3297 if (!sdev) {
3298 dev_err(dev, "sdev is null\n");
3299 @@ -69,23 +71,47 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3300 }
3301
3302 socket = sockfd_lookup(sockfd, &err);
3303 - if (!socket)
3304 + if (!socket) {
3305 + dev_err(dev, "failed to lookup sock");
3306 goto err;
3307 + }
3308
3309 - sdev->ud.tcp_socket = socket;
3310 - sdev->ud.sockfd = sockfd;
3311 + if (socket->type != SOCK_STREAM) {
3312 + dev_err(dev, "Expecting SOCK_STREAM - found %d",
3313 + socket->type);
3314 + goto sock_err;
3315 + }
3316
3317 + /* unlock and create threads and get tasks */
3318 spin_unlock_irq(&sdev->ud.lock);
3319 + tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
3320 + if (IS_ERR(tcp_rx)) {
3321 + sockfd_put(socket);
3322 + return -EINVAL;
3323 + }
3324 + tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
3325 + if (IS_ERR(tcp_tx)) {
3326 + kthread_stop(tcp_rx);
3327 + sockfd_put(socket);
3328 + return -EINVAL;
3329 + }
3330
3331 - sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud,
3332 - "stub_rx");
3333 - sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud,
3334 - "stub_tx");
3335 + /* get task structs now */
3336 + get_task_struct(tcp_rx);
3337 + get_task_struct(tcp_tx);
3338
3339 + /* lock and update sdev->ud state */
3340 spin_lock_irq(&sdev->ud.lock);
3341 + sdev->ud.tcp_socket = socket;
3342 + sdev->ud.sockfd = sockfd;
3343 + sdev->ud.tcp_rx = tcp_rx;
3344 + sdev->ud.tcp_tx = tcp_tx;
3345 sdev->ud.status = SDEV_ST_USED;
3346 spin_unlock_irq(&sdev->ud.lock);
3347
3348 + wake_up_process(sdev->ud.tcp_rx);
3349 + wake_up_process(sdev->ud.tcp_tx);
3350 +
3351 } else {
3352 dev_info(dev, "stub down\n");
3353
3354 @@ -100,6 +126,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3355
3356 return count;
3357
3358 +sock_err:
3359 + sockfd_put(socket);
3360 err:
3361 spin_unlock_irq(&sdev->ud.lock);
3362 return -EINVAL;
3363 diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
3364 index be37aec250c2b..e64ea314930be 100644
3365 --- a/drivers/usb/usbip/vhci_sysfs.c
3366 +++ b/drivers/usb/usbip/vhci_sysfs.c
3367 @@ -312,6 +312,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
3368 struct vhci *vhci;
3369 int err;
3370 unsigned long flags;
3371 + struct task_struct *tcp_rx = NULL;
3372 + struct task_struct *tcp_tx = NULL;
3373
3374 /*
3375 * @rhport: port number of vhci_hcd
3376 @@ -349,12 +351,35 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
3377
3378 /* Extract socket from fd. */
3379 socket = sockfd_lookup(sockfd, &err);
3380 - if (!socket)
3381 + if (!socket) {
3382 + dev_err(dev, "failed to lookup sock");
3383 return -EINVAL;
3384 + }
3385 + if (socket->type != SOCK_STREAM) {
3386 + dev_err(dev, "Expecting SOCK_STREAM - found %d",
3387 + socket->type);
3388 + sockfd_put(socket);
3389 + return -EINVAL;
3390 + }
3391 +
3392 + /* create threads before locking */
3393 + tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
3394 + if (IS_ERR(tcp_rx)) {
3395 + sockfd_put(socket);
3396 + return -EINVAL;
3397 + }
3398 + tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
3399 + if (IS_ERR(tcp_tx)) {
3400 + kthread_stop(tcp_rx);
3401 + sockfd_put(socket);
3402 + return -EINVAL;
3403 + }
3404
3405 - /* now need lock until setting vdev status as used */
3406 + /* get task structs now */
3407 + get_task_struct(tcp_rx);
3408 + get_task_struct(tcp_tx);
3409
3410 - /* begin a lock */
3411 + /* now begin lock until setting vdev status set */
3412 spin_lock_irqsave(&vhci->lock, flags);
3413 spin_lock(&vdev->ud.lock);
3414
3415 @@ -364,6 +389,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
3416 spin_unlock_irqrestore(&vhci->lock, flags);
3417
3418 sockfd_put(socket);
3419 + kthread_stop_put(tcp_rx);
3420 + kthread_stop_put(tcp_tx);
3421
3422 dev_err(dev, "port %d already used\n", rhport);
3423 /*
3424 @@ -382,14 +409,16 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
3425 vdev->speed = speed;
3426 vdev->ud.sockfd = sockfd;
3427 vdev->ud.tcp_socket = socket;
3428 + vdev->ud.tcp_rx = tcp_rx;
3429 + vdev->ud.tcp_tx = tcp_tx;
3430 vdev->ud.status = VDEV_ST_NOTASSIGNED;
3431
3432 spin_unlock(&vdev->ud.lock);
3433 spin_unlock_irqrestore(&vhci->lock, flags);
3434 /* end the lock */
3435
3436 - vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
3437 - vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
3438 + wake_up_process(vdev->ud.tcp_rx);
3439 + wake_up_process(vdev->ud.tcp_tx);
3440
3441 rh_port_connect(vdev, speed);
3442
3443 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
3444 index 100f680c572ae..a3ec39fc61778 100644
3445 --- a/drivers/usb/usbip/vudc_sysfs.c
3446 +++ b/drivers/usb/usbip/vudc_sysfs.c
3447 @@ -90,8 +90,9 @@ unlock:
3448 }
3449 static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
3450
3451 -static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
3452 - const char *in, size_t count)
3453 +static ssize_t usbip_sockfd_store(struct device *dev,
3454 + struct device_attribute *attr,
3455 + const char *in, size_t count)
3456 {
3457 struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
3458 int rv;
3459 @@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3460 struct socket *socket;
3461 unsigned long flags;
3462 int ret;
3463 + struct task_struct *tcp_rx = NULL;
3464 + struct task_struct *tcp_tx = NULL;
3465
3466 rv = kstrtoint(in, 0, &sockfd);
3467 if (rv != 0)
3468 @@ -138,24 +141,54 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3469 goto unlock_ud;
3470 }
3471
3472 - udc->ud.tcp_socket = socket;
3473 + if (socket->type != SOCK_STREAM) {
3474 + dev_err(dev, "Expecting SOCK_STREAM - found %d",
3475 + socket->type);
3476 + ret = -EINVAL;
3477 + goto sock_err;
3478 + }
3479
3480 + /* unlock and create threads and get tasks */
3481 spin_unlock_irq(&udc->ud.lock);
3482 spin_unlock_irqrestore(&udc->lock, flags);
3483
3484 - udc->ud.tcp_rx = kthread_get_run(&v_rx_loop,
3485 - &udc->ud, "vudc_rx");
3486 - udc->ud.tcp_tx = kthread_get_run(&v_tx_loop,
3487 - &udc->ud, "vudc_tx");
3488 + tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
3489 + if (IS_ERR(tcp_rx)) {
3490 + sockfd_put(socket);
3491 + return -EINVAL;
3492 + }
3493 + tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
3494 + if (IS_ERR(tcp_tx)) {
3495 + kthread_stop(tcp_rx);
3496 + sockfd_put(socket);
3497 + return -EINVAL;
3498 + }
3499 +
3500 + /* get task structs now */
3501 + get_task_struct(tcp_rx);
3502 + get_task_struct(tcp_tx);
3503
3504 + /* lock and update udc->ud state */
3505 spin_lock_irqsave(&udc->lock, flags);
3506 spin_lock_irq(&udc->ud.lock);
3507 +
3508 + udc->ud.tcp_socket = socket;
3509 + udc->ud.tcp_rx = tcp_rx;
3510 + udc->ud.tcp_rx = tcp_tx;
3511 udc->ud.status = SDEV_ST_USED;
3512 +
3513 spin_unlock_irq(&udc->ud.lock);
3514
3515 ktime_get_ts64(&udc->start_time);
3516 v_start_timer(udc);
3517 udc->connected = 1;
3518 +
3519 + spin_unlock_irqrestore(&udc->lock, flags);
3520 +
3521 + wake_up_process(udc->ud.tcp_rx);
3522 + wake_up_process(udc->ud.tcp_tx);
3523 + return count;
3524 +
3525 } else {
3526 if (!udc->connected) {
3527 dev_err(dev, "Device not connected");
3528 @@ -177,6 +210,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
3529
3530 return count;
3531
3532 +sock_err:
3533 + sockfd_put(socket);
3534 unlock_ud:
3535 spin_unlock_irq(&udc->ud.lock);
3536 unlock:
3537 diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
3538 index f026624898e7a..77cc80bcb479c 100644
3539 --- a/drivers/xen/events/events_2l.c
3540 +++ b/drivers/xen/events/events_2l.c
3541 @@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
3542 return EVTCHN_2L_NR_CHANNELS;
3543 }
3544
3545 +static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
3546 +{
3547 + clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
3548 +}
3549 +
3550 static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
3551 {
3552 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
3553 @@ -71,12 +76,6 @@ static bool evtchn_2l_is_pending(unsigned port)
3554 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
3555 }
3556
3557 -static bool evtchn_2l_test_and_set_mask(unsigned port)
3558 -{
3559 - struct shared_info *s = HYPERVISOR_shared_info;
3560 - return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
3561 -}
3562 -
3563 static void evtchn_2l_mask(unsigned port)
3564 {
3565 struct shared_info *s = HYPERVISOR_shared_info;
3566 @@ -354,18 +353,27 @@ static void evtchn_2l_resume(void)
3567 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
3568 }
3569
3570 +static int evtchn_2l_percpu_deinit(unsigned int cpu)
3571 +{
3572 + memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
3573 + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
3574 +
3575 + return 0;
3576 +}
3577 +
3578 static const struct evtchn_ops evtchn_ops_2l = {
3579 .max_channels = evtchn_2l_max_channels,
3580 .nr_channels = evtchn_2l_max_channels,
3581 + .remove = evtchn_2l_remove,
3582 .bind_to_cpu = evtchn_2l_bind_to_cpu,
3583 .clear_pending = evtchn_2l_clear_pending,
3584 .set_pending = evtchn_2l_set_pending,
3585 .is_pending = evtchn_2l_is_pending,
3586 - .test_and_set_mask = evtchn_2l_test_and_set_mask,
3587 .mask = evtchn_2l_mask,
3588 .unmask = evtchn_2l_unmask,
3589 .handle_events = evtchn_2l_handle_events,
3590 .resume = evtchn_2l_resume,
3591 + .percpu_deinit = evtchn_2l_percpu_deinit,
3592 };
3593
3594 void __init xen_evtchn_2l_init(void)
3595 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
3596 index 7b94a6c316643..e492f5fe5be62 100644
3597 --- a/drivers/xen/events/events_base.c
3598 +++ b/drivers/xen/events/events_base.c
3599 @@ -99,6 +99,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
3600 * evtchn_rwlock
3601 * IRQ-desc lock
3602 * percpu eoi_list_lock
3603 + * irq_info->lock
3604 */
3605
3606 static LIST_HEAD(xen_irq_list_head);
3607 @@ -220,6 +221,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
3608 info->irq = irq;
3609 info->evtchn = evtchn;
3610 info->cpu = cpu;
3611 + info->mask_reason = EVT_MASK_REASON_EXPLICIT;
3612 + spin_lock_init(&info->lock);
3613
3614 ret = set_evtchn_to_irq(evtchn, irq);
3615 if (ret < 0)
3616 @@ -286,6 +289,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
3617 static void xen_irq_info_cleanup(struct irq_info *info)
3618 {
3619 set_evtchn_to_irq(info->evtchn, -1);
3620 + xen_evtchn_port_remove(info->evtchn, info->cpu);
3621 info->evtchn = 0;
3622 }
3623
3624 @@ -366,6 +370,34 @@ unsigned int cpu_from_evtchn(unsigned int evtchn)
3625 return ret;
3626 }
3627
3628 +static void do_mask(struct irq_info *info, u8 reason)
3629 +{
3630 + unsigned long flags;
3631 +
3632 + spin_lock_irqsave(&info->lock, flags);
3633 +
3634 + if (!info->mask_reason)
3635 + mask_evtchn(info->evtchn);
3636 +
3637 + info->mask_reason |= reason;
3638 +
3639 + spin_unlock_irqrestore(&info->lock, flags);
3640 +}
3641 +
3642 +static void do_unmask(struct irq_info *info, u8 reason)
3643 +{
3644 + unsigned long flags;
3645 +
3646 + spin_lock_irqsave(&info->lock, flags);
3647 +
3648 + info->mask_reason &= ~reason;
3649 +
3650 + if (!info->mask_reason)
3651 + unmask_evtchn(info->evtchn);
3652 +
3653 + spin_unlock_irqrestore(&info->lock, flags);
3654 +}
3655 +
3656 #ifdef CONFIG_X86
3657 static bool pirq_check_eoi_map(unsigned irq)
3658 {
3659 @@ -493,7 +525,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
3660 }
3661
3662 info->eoi_time = 0;
3663 - unmask_evtchn(evtchn);
3664 + do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
3665 }
3666
3667 static void xen_irq_lateeoi_worker(struct work_struct *work)
3668 @@ -662,6 +694,12 @@ static void xen_evtchn_close(unsigned int port)
3669 BUG();
3670 }
3671
3672 +static void event_handler_exit(struct irq_info *info)
3673 +{
3674 + smp_store_release(&info->is_active, 0);
3675 + clear_evtchn(info->evtchn);
3676 +}
3677 +
3678 static void pirq_query_unmask(int irq)
3679 {
3680 struct physdev_irq_status_query irq_status;
3681 @@ -680,7 +718,8 @@ static void pirq_query_unmask(int irq)
3682
3683 static void eoi_pirq(struct irq_data *data)
3684 {
3685 - int evtchn = evtchn_from_irq(data->irq);
3686 + struct irq_info *info = info_for_irq(data->irq);
3687 + int evtchn = info ? info->evtchn : 0;
3688 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
3689 int rc = 0;
3690
3691 @@ -689,16 +728,15 @@ static void eoi_pirq(struct irq_data *data)
3692
3693 if (unlikely(irqd_is_setaffinity_pending(data)) &&
3694 likely(!irqd_irq_disabled(data))) {
3695 - int masked = test_and_set_mask(evtchn);
3696 + do_mask(info, EVT_MASK_REASON_TEMPORARY);
3697
3698 - clear_evtchn(evtchn);
3699 + event_handler_exit(info);
3700
3701 irq_move_masked_irq(data);
3702
3703 - if (!masked)
3704 - unmask_evtchn(evtchn);
3705 + do_unmask(info, EVT_MASK_REASON_TEMPORARY);
3706 } else
3707 - clear_evtchn(evtchn);
3708 + event_handler_exit(info);
3709
3710 if (pirq_needs_eoi(data->irq)) {
3711 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
3712 @@ -749,7 +787,8 @@ static unsigned int __startup_pirq(unsigned int irq)
3713 goto err;
3714
3715 out:
3716 - unmask_evtchn(evtchn);
3717 + do_unmask(info, EVT_MASK_REASON_EXPLICIT);
3718 +
3719 eoi_pirq(irq_get_irq_data(irq));
3720
3721 return 0;
3722 @@ -776,7 +815,7 @@ static void shutdown_pirq(struct irq_data *data)
3723 if (!VALID_EVTCHN(evtchn))
3724 return;
3725
3726 - mask_evtchn(evtchn);
3727 + do_mask(info, EVT_MASK_REASON_EXPLICIT);
3728 xen_evtchn_close(evtchn);
3729 xen_irq_info_cleanup(info);
3730 }
3731 @@ -1533,6 +1572,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
3732 }
3733
3734 info = info_for_irq(irq);
3735 + if (xchg_acquire(&info->is_active, 1))
3736 + return;
3737
3738 if (ctrl->defer_eoi) {
3739 info->eoi_cpu = smp_processor_id();
3740 @@ -1635,10 +1676,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
3741 }
3742
3743 /* Rebind an evtchn so that it gets delivered to a specific cpu */
3744 -static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
3745 +static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
3746 {
3747 struct evtchn_bind_vcpu bind_vcpu;
3748 - int masked;
3749 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3750
3751 if (!VALID_EVTCHN(evtchn))
3752 return -1;
3753 @@ -1654,7 +1695,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
3754 * Mask the event while changing the VCPU binding to prevent
3755 * it being delivered on an unexpected VCPU.
3756 */
3757 - masked = test_and_set_mask(evtchn);
3758 + do_mask(info, EVT_MASK_REASON_TEMPORARY);
3759
3760 /*
3761 * If this fails, it usually just indicates that we're dealing with a
3762 @@ -1664,8 +1705,7 @@ static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
3763 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
3764 bind_evtchn_to_cpu(evtchn, tcpu);
3765
3766 - if (!masked)
3767 - unmask_evtchn(evtchn);
3768 + do_unmask(info, EVT_MASK_REASON_TEMPORARY);
3769
3770 return 0;
3771 }
3772 @@ -1674,7 +1714,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
3773 bool force)
3774 {
3775 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
3776 - int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
3777 + int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
3778
3779 if (!ret)
3780 irq_data_update_effective_affinity(data, cpumask_of(tcpu));
3781 @@ -1693,39 +1733,41 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
3782
3783 static void enable_dynirq(struct irq_data *data)
3784 {
3785 - int evtchn = evtchn_from_irq(data->irq);
3786 + struct irq_info *info = info_for_irq(data->irq);
3787 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3788
3789 if (VALID_EVTCHN(evtchn))
3790 - unmask_evtchn(evtchn);
3791 + do_unmask(info, EVT_MASK_REASON_EXPLICIT);
3792 }
3793
3794 static void disable_dynirq(struct irq_data *data)
3795 {
3796 - int evtchn = evtchn_from_irq(data->irq);
3797 + struct irq_info *info = info_for_irq(data->irq);
3798 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3799
3800 if (VALID_EVTCHN(evtchn))
3801 - mask_evtchn(evtchn);
3802 + do_mask(info, EVT_MASK_REASON_EXPLICIT);
3803 }
3804
3805 static void ack_dynirq(struct irq_data *data)
3806 {
3807 - int evtchn = evtchn_from_irq(data->irq);
3808 + struct irq_info *info = info_for_irq(data->irq);
3809 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3810
3811 if (!VALID_EVTCHN(evtchn))
3812 return;
3813
3814 if (unlikely(irqd_is_setaffinity_pending(data)) &&
3815 likely(!irqd_irq_disabled(data))) {
3816 - int masked = test_and_set_mask(evtchn);
3817 + do_mask(info, EVT_MASK_REASON_TEMPORARY);
3818
3819 - clear_evtchn(evtchn);
3820 + event_handler_exit(info);
3821
3822 irq_move_masked_irq(data);
3823
3824 - if (!masked)
3825 - unmask_evtchn(evtchn);
3826 + do_unmask(info, EVT_MASK_REASON_TEMPORARY);
3827 } else
3828 - clear_evtchn(evtchn);
3829 + event_handler_exit(info);
3830 }
3831
3832 static void mask_ack_dynirq(struct irq_data *data)
3833 @@ -1734,18 +1776,39 @@ static void mask_ack_dynirq(struct irq_data *data)
3834 ack_dynirq(data);
3835 }
3836
3837 +static void lateeoi_ack_dynirq(struct irq_data *data)
3838 +{
3839 + struct irq_info *info = info_for_irq(data->irq);
3840 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3841 +
3842 + if (VALID_EVTCHN(evtchn)) {
3843 + do_mask(info, EVT_MASK_REASON_EOI_PENDING);
3844 + event_handler_exit(info);
3845 + }
3846 +}
3847 +
3848 +static void lateeoi_mask_ack_dynirq(struct irq_data *data)
3849 +{
3850 + struct irq_info *info = info_for_irq(data->irq);
3851 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3852 +
3853 + if (VALID_EVTCHN(evtchn)) {
3854 + do_mask(info, EVT_MASK_REASON_EXPLICIT);
3855 + event_handler_exit(info);
3856 + }
3857 +}
3858 +
3859 static int retrigger_dynirq(struct irq_data *data)
3860 {
3861 - unsigned int evtchn = evtchn_from_irq(data->irq);
3862 - int masked;
3863 + struct irq_info *info = info_for_irq(data->irq);
3864 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3865
3866 if (!VALID_EVTCHN(evtchn))
3867 return 0;
3868
3869 - masked = test_and_set_mask(evtchn);
3870 + do_mask(info, EVT_MASK_REASON_TEMPORARY);
3871 set_evtchn(evtchn);
3872 - if (!masked)
3873 - unmask_evtchn(evtchn);
3874 + do_unmask(info, EVT_MASK_REASON_TEMPORARY);
3875
3876 return 1;
3877 }
3878 @@ -1840,10 +1903,11 @@ static void restore_cpu_ipis(unsigned int cpu)
3879 /* Clear an irq's pending state, in preparation for polling on it */
3880 void xen_clear_irq_pending(int irq)
3881 {
3882 - int evtchn = evtchn_from_irq(irq);
3883 + struct irq_info *info = info_for_irq(irq);
3884 + evtchn_port_t evtchn = info ? info->evtchn : 0;
3885
3886 if (VALID_EVTCHN(evtchn))
3887 - clear_evtchn(evtchn);
3888 + event_handler_exit(info);
3889 }
3890 EXPORT_SYMBOL(xen_clear_irq_pending);
3891 void xen_set_irq_pending(int irq)
3892 @@ -1951,8 +2015,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {
3893 .irq_mask = disable_dynirq,
3894 .irq_unmask = enable_dynirq,
3895
3896 - .irq_ack = mask_ack_dynirq,
3897 - .irq_mask_ack = mask_ack_dynirq,
3898 + .irq_ack = lateeoi_ack_dynirq,
3899 + .irq_mask_ack = lateeoi_mask_ack_dynirq,
3900
3901 .irq_set_affinity = set_affinity_irq,
3902 .irq_retrigger = retrigger_dynirq,
3903 diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
3904 index 33462521bfd0f..360a7f8cdf754 100644
3905 --- a/drivers/xen/events/events_fifo.c
3906 +++ b/drivers/xen/events/events_fifo.c
3907 @@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsigned port)
3908 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
3909 }
3910
3911 -static bool evtchn_fifo_test_and_set_mask(unsigned port)
3912 -{
3913 - event_word_t *word = event_word_from_port(port);
3914 - return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
3915 -}
3916 -
3917 static void evtchn_fifo_mask(unsigned port)
3918 {
3919 event_word_t *word = event_word_from_port(port);
3920 @@ -420,7 +414,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
3921 .clear_pending = evtchn_fifo_clear_pending,
3922 .set_pending = evtchn_fifo_set_pending,
3923 .is_pending = evtchn_fifo_is_pending,
3924 - .test_and_set_mask = evtchn_fifo_test_and_set_mask,
3925 .mask = evtchn_fifo_mask,
3926 .unmask = evtchn_fifo_unmask,
3927 .handle_events = evtchn_fifo_handle_events,
3928 diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
3929 index a35c8c7ac6066..d3a89b4646b8b 100644
3930 --- a/drivers/xen/events/events_internal.h
3931 +++ b/drivers/xen/events/events_internal.h
3932 @@ -33,13 +33,19 @@ struct irq_info {
3933 struct list_head eoi_list;
3934 short refcnt;
3935 short spurious_cnt;
3936 - enum xen_irq_type type; /* type */
3937 + short type; /* type */
3938 + u8 mask_reason; /* Why is event channel masked */
3939 +#define EVT_MASK_REASON_EXPLICIT 0x01
3940 +#define EVT_MASK_REASON_TEMPORARY 0x02
3941 +#define EVT_MASK_REASON_EOI_PENDING 0x04
3942 + u8 is_active; /* Is event just being handled? */
3943 unsigned irq;
3944 unsigned int evtchn; /* event channel */
3945 unsigned short cpu; /* cpu bound */
3946 unsigned short eoi_cpu; /* EOI must happen on this cpu */
3947 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
3948 u64 eoi_time; /* Time in jiffies when to EOI. */
3949 + spinlock_t lock;
3950
3951 union {
3952 unsigned short virq;
3953 @@ -65,12 +71,12 @@ struct evtchn_ops {
3954 unsigned (*nr_channels)(void);
3955
3956 int (*setup)(struct irq_info *info);
3957 + void (*remove)(evtchn_port_t port, unsigned int cpu);
3958 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
3959
3960 void (*clear_pending)(unsigned port);
3961 void (*set_pending)(unsigned port);
3962 bool (*is_pending)(unsigned port);
3963 - bool (*test_and_set_mask)(unsigned port);
3964 void (*mask)(unsigned port);
3965 void (*unmask)(unsigned port);
3966
3967 @@ -107,6 +113,13 @@ static inline int xen_evtchn_port_setup(struct irq_info *info)
3968 return 0;
3969 }
3970
3971 +static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
3972 + unsigned int cpu)
3973 +{
3974 + if (evtchn_ops->remove)
3975 + evtchn_ops->remove(evtchn, cpu);
3976 +}
3977 +
3978 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
3979 unsigned cpu)
3980 {
3981 @@ -128,11 +141,6 @@ static inline bool test_evtchn(unsigned port)
3982 return evtchn_ops->is_pending(port);
3983 }
3984
3985 -static inline bool test_and_set_mask(unsigned port)
3986 -{
3987 - return evtchn_ops->test_and_set_mask(port);
3988 -}
3989 -
3990 static inline void mask_evtchn(unsigned port)
3991 {
3992 return evtchn_ops->mask(port);
3993 diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
3994 index cdb45829354d9..056a68292e152 100644
3995 --- a/fs/binfmt_misc.c
3996 +++ b/fs/binfmt_misc.c
3997 @@ -696,12 +696,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
3998 struct super_block *sb = file_inode(file)->i_sb;
3999 struct dentry *root = sb->s_root, *dentry;
4000 int err = 0;
4001 + struct file *f = NULL;
4002
4003 e = create_entry(buffer, count);
4004
4005 if (IS_ERR(e))
4006 return PTR_ERR(e);
4007
4008 + if (e->flags & MISC_FMT_OPEN_FILE) {
4009 + f = open_exec(e->interpreter);
4010 + if (IS_ERR(f)) {
4011 + pr_notice("register: failed to install interpreter file %s\n",
4012 + e->interpreter);
4013 + kfree(e);
4014 + return PTR_ERR(f);
4015 + }
4016 + e->interp_file = f;
4017 + }
4018 +
4019 inode_lock(d_inode(root));
4020 dentry = lookup_one_len(e->name, root, strlen(e->name));
4021 err = PTR_ERR(dentry);
4022 @@ -725,21 +737,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
4023 goto out2;
4024 }
4025
4026 - if (e->flags & MISC_FMT_OPEN_FILE) {
4027 - struct file *f;
4028 -
4029 - f = open_exec(e->interpreter);
4030 - if (IS_ERR(f)) {
4031 - err = PTR_ERR(f);
4032 - pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
4033 - simple_release_fs(&bm_mnt, &entry_count);
4034 - iput(inode);
4035 - inode = NULL;
4036 - goto out2;
4037 - }
4038 - e->interp_file = f;
4039 - }
4040 -
4041 e->dentry = dget(dentry);
4042 inode->i_private = e;
4043 inode->i_fop = &bm_entry_operations;
4044 @@ -756,6 +753,8 @@ out:
4045 inode_unlock(d_inode(root));
4046
4047 if (err) {
4048 + if (f)
4049 + filp_close(f, NULL);
4050 kfree(e);
4051 return err;
4052 }
4053 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
4054 index 115f063497ffa..41b3c5fc958c7 100644
4055 --- a/fs/cifs/cifsfs.c
4056 +++ b/fs/cifs/cifsfs.c
4057 @@ -278,7 +278,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
4058 rc = server->ops->queryfs(xid, tcon, buf);
4059
4060 free_xid(xid);
4061 - return 0;
4062 + return rc;
4063 }
4064
4065 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
4066 diff --git a/fs/configfs/file.c b/fs/configfs/file.c
4067 index fb65b706cc0db..84b4d58fc65f7 100644
4068 --- a/fs/configfs/file.c
4069 +++ b/fs/configfs/file.c
4070 @@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
4071
4072 attr = to_attr(dentry);
4073 if (!attr)
4074 - goto out_put_item;
4075 + goto out_free_buffer;
4076
4077 if (type & CONFIGFS_ITEM_BIN_ATTR) {
4078 buffer->bin_attr = to_bin_attr(dentry);
4079 @@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
4080 /* Grab the module reference for this attribute if we have one */
4081 error = -ENODEV;
4082 if (!try_module_get(buffer->owner))
4083 - goto out_put_item;
4084 + goto out_free_buffer;
4085
4086 error = -EACCES;
4087 if (!buffer->item->ci_type)
4088 @@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
4089
4090 out_put_module:
4091 module_put(buffer->owner);
4092 -out_put_item:
4093 - config_item_put(buffer->item);
4094 out_free_buffer:
4095 up_read(&frag->frag_sem);
4096 kfree(buffer);
4097 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4098 index 188b17a3b19eb..e7c0790308fe0 100644
4099 --- a/fs/nfs/dir.c
4100 +++ b/fs/nfs/dir.c
4101 @@ -1073,6 +1073,15 @@ out_force:
4102 goto out;
4103 }
4104
4105 +static void nfs_mark_dir_for_revalidate(struct inode *inode)
4106 +{
4107 + struct nfs_inode *nfsi = NFS_I(inode);
4108 +
4109 + spin_lock(&inode->i_lock);
4110 + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
4111 + spin_unlock(&inode->i_lock);
4112 +}
4113 +
4114 /*
4115 * We judge how long we want to trust negative
4116 * dentries by looking at the parent inode mtime.
4117 @@ -1107,19 +1116,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
4118 __func__, dentry);
4119 return 1;
4120 case 0:
4121 - nfs_mark_for_revalidate(dir);
4122 - if (inode && S_ISDIR(inode->i_mode)) {
4123 - /* Purge readdir caches. */
4124 - nfs_zap_caches(inode);
4125 - /*
4126 - * We can't d_drop the root of a disconnected tree:
4127 - * its d_hash is on the s_anon list and d_drop() would hide
4128 - * it from shrink_dcache_for_unmount(), leading to busy
4129 - * inodes on unmount and further oopses.
4130 - */
4131 - if (IS_ROOT(dentry))
4132 - return 1;
4133 - }
4134 + /*
4135 + * We can't d_drop the root of a disconnected tree:
4136 + * its d_hash is on the s_anon list and d_drop() would hide
4137 + * it from shrink_dcache_for_unmount(), leading to busy
4138 + * inodes on unmount and further oopses.
4139 + */
4140 + if (inode && IS_ROOT(dentry))
4141 + return 1;
4142 dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
4143 __func__, dentry);
4144 return 0;
4145 @@ -1188,6 +1192,13 @@ out:
4146 nfs_free_fattr(fattr);
4147 nfs_free_fhandle(fhandle);
4148 nfs4_label_free(label);
4149 +
4150 + /*
4151 + * If the lookup failed despite the dentry change attribute being
4152 + * a match, then we should revalidate the directory cache.
4153 + */
4154 + if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
4155 + nfs_mark_dir_for_revalidate(dir);
4156 return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
4157 }
4158
4159 @@ -1230,7 +1241,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
4160 error = nfs_lookup_verify_inode(inode, flags);
4161 if (error) {
4162 if (error == -ESTALE)
4163 - nfs_zap_caches(dir);
4164 + nfs_mark_dir_for_revalidate(dir);
4165 goto out_bad;
4166 }
4167 nfs_advise_use_readdirplus(dir);
4168 @@ -1725,7 +1736,6 @@ out:
4169 dput(parent);
4170 return d;
4171 out_error:
4172 - nfs_mark_for_revalidate(dir);
4173 d = ERR_PTR(error);
4174 goto out;
4175 }
4176 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4177 index 30e44b33040a4..b2119159dead2 100644
4178 --- a/fs/nfs/nfs4proc.c
4179 +++ b/fs/nfs/nfs4proc.c
4180 @@ -5830,7 +5830,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
4181 return ret;
4182 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4183 return -ENOENT;
4184 - return 0;
4185 + return label.len;
4186 }
4187
4188 static int nfs4_get_security_label(struct inode *inode, void *buf,
4189 diff --git a/fs/pnode.h b/fs/pnode.h
4190 index 26f74e092bd98..988f1aa9b02ae 100644
4191 --- a/fs/pnode.h
4192 +++ b/fs/pnode.h
4193 @@ -12,7 +12,7 @@
4194
4195 #define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
4196 #define IS_MNT_SLAVE(m) ((m)->mnt_master)
4197 -#define IS_MNT_NEW(m) (!(m)->mnt_ns)
4198 +#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns))
4199 #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
4200 #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
4201 #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
4202 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
4203 index 97a192eb9949c..507f8f9103270 100644
4204 --- a/fs/udf/inode.c
4205 +++ b/fs/udf/inode.c
4206 @@ -547,11 +547,14 @@ static int udf_do_extend_file(struct inode *inode,
4207
4208 udf_write_aext(inode, last_pos, &last_ext->extLocation,
4209 last_ext->extLength, 1);
4210 +
4211 /*
4212 - * We've rewritten the last extent but there may be empty
4213 - * indirect extent after it - enter it.
4214 + * We've rewritten the last extent. If we are going to add
4215 + * more extents, we may need to enter possible following
4216 + * empty indirect extent.
4217 */
4218 - udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
4219 + if (new_block_bytes || prealloc_len)
4220 + udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
4221 }
4222
4223 /* Managed to do everything necessary? */
4224 diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
4225 index 0783b0c6d9e2f..1ef071e5a55ef 100644
4226 --- a/include/linux/can/skb.h
4227 +++ b/include/linux/can/skb.h
4228 @@ -49,8 +49,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
4229
4230 static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
4231 {
4232 - if (sk) {
4233 - sock_hold(sk);
4234 + /* If the socket has already been closed by user space, the
4235 + * refcount may already be 0 (and the socket will be freed
4236 + * after the last TX skb has been freed). So only increase
4237 + * socket refcount if the refcount is > 0.
4238 + */
4239 + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
4240 skb->destructor = sock_efree;
4241 skb->sk = sk;
4242 }
4243 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
4244 index a132d875d3518..3a1d899019af0 100644
4245 --- a/include/linux/sched/mm.h
4246 +++ b/include/linux/sched/mm.h
4247 @@ -167,7 +167,8 @@ static inline bool in_vfork(struct task_struct *tsk)
4248 * another oom-unkillable task does this it should blame itself.
4249 */
4250 rcu_read_lock();
4251 - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
4252 + ret = tsk->vfork_done &&
4253 + rcu_dereference(tsk->real_parent)->mm == tsk->mm;
4254 rcu_read_unlock();
4255
4256 return ret;
4257 diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
4258 index f9a0c6189852e..69998fc5ffe9d 100644
4259 --- a/include/linux/stop_machine.h
4260 +++ b/include/linux/stop_machine.h
4261 @@ -139,7 +139,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
4262 const struct cpumask *cpus);
4263 #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
4264
4265 -static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
4266 +static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
4267 const struct cpumask *cpus)
4268 {
4269 unsigned long flags;
4270 @@ -150,14 +150,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
4271 return ret;
4272 }
4273
4274 -static inline int stop_machine(cpu_stop_fn_t fn, void *data,
4275 - const struct cpumask *cpus)
4276 +static __always_inline int
4277 +stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
4278 {
4279 return stop_machine_cpuslocked(fn, data, cpus);
4280 }
4281
4282 -static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
4283 - const struct cpumask *cpus)
4284 +static __always_inline int
4285 +stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
4286 + const struct cpumask *cpus)
4287 {
4288 return stop_machine(fn, data, cpus);
4289 }
4290 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
4291 index e8a924eeea3d0..6b5fcfa1e5553 100644
4292 --- a/include/linux/virtio_net.h
4293 +++ b/include/linux/virtio_net.h
4294 @@ -79,8 +79,13 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
4295 if (gso_type && skb->network_header) {
4296 struct flow_keys_basic keys;
4297
4298 - if (!skb->protocol)
4299 + if (!skb->protocol) {
4300 + __be16 protocol = dev_parse_header_protocol(skb);
4301 +
4302 virtio_net_hdr_set_proto(skb, hdr);
4303 + if (protocol && protocol != skb->protocol)
4304 + return -EINVAL;
4305 + }
4306 retry:
4307 if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
4308 NULL, 0, 0, 0,
4309 diff --git a/include/media/rc-map.h b/include/media/rc-map.h
4310 index c2ef3906e1cd1..a358c87a65de2 100644
4311 --- a/include/media/rc-map.h
4312 +++ b/include/media/rc-map.h
4313 @@ -126,6 +126,13 @@ struct rc_map_list {
4314 struct rc_map map;
4315 };
4316
4317 +#ifdef CONFIG_MEDIA_CEC_RC
4318 +/*
4319 + * rc_map_list from rc-cec.c
4320 + */
4321 +extern struct rc_map_list cec_map;
4322 +#endif
4323 +
4324 /* Routines from rc-map.c */
4325
4326 /**
4327 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
4328 index 51b6f50eabeeb..0deeff9b4496a 100644
4329 --- a/include/target/target_core_backend.h
4330 +++ b/include/target/target_core_backend.h
4331 @@ -69,6 +69,7 @@ int transport_backend_register(const struct target_backend_ops *);
4332 void target_backend_unregister(const struct target_backend_ops *);
4333
4334 void target_complete_cmd(struct se_cmd *, u8);
4335 +void target_set_cmd_data_length(struct se_cmd *, int);
4336 void target_complete_cmd_with_length(struct se_cmd *, u8, int);
4337
4338 void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
4339 diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
4340 index a13137afc4299..70af02092d16e 100644
4341 --- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h
4342 +++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
4343 @@ -5,7 +5,7 @@
4344 #define NFCT_HELPER_STATUS_DISABLED 0
4345 #define NFCT_HELPER_STATUS_ENABLED 1
4346
4347 -enum nfnl_acct_msg_types {
4348 +enum nfnl_cthelper_msg_types {
4349 NFNL_MSG_CTHELPER_NEW,
4350 NFNL_MSG_CTHELPER_GET,
4351 NFNL_MSG_CTHELPER_DEL,
4352 diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
4353 index be0ca3306be8c..46c142b695988 100644
4354 --- a/kernel/sched/membarrier.c
4355 +++ b/kernel/sched/membarrier.c
4356 @@ -265,9 +265,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
4357 }
4358 rcu_read_unlock();
4359
4360 - preempt_disable();
4361 - smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1);
4362 - preempt_enable();
4363 + on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
4364
4365 free_cpumask_var(tmpmask);
4366 cpus_read_unlock();
4367 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4368 index 70665934d53e2..eae6a078619f9 100644
4369 --- a/kernel/sysctl.c
4370 +++ b/kernel/sysctl.c
4371 @@ -1563,7 +1563,7 @@ static struct ctl_table vm_table[] = {
4372 .data = &block_dump,
4373 .maxlen = sizeof(block_dump),
4374 .mode = 0644,
4375 - .proc_handler = proc_dointvec,
4376 + .proc_handler = proc_dointvec_minmax,
4377 .extra1 = SYSCTL_ZERO,
4378 },
4379 {
4380 @@ -1571,7 +1571,7 @@ static struct ctl_table vm_table[] = {
4381 .data = &sysctl_vfs_cache_pressure,
4382 .maxlen = sizeof(sysctl_vfs_cache_pressure),
4383 .mode = 0644,
4384 - .proc_handler = proc_dointvec,
4385 + .proc_handler = proc_dointvec_minmax,
4386 .extra1 = SYSCTL_ZERO,
4387 },
4388 #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
4389 @@ -1581,7 +1581,7 @@ static struct ctl_table vm_table[] = {
4390 .data = &sysctl_legacy_va_layout,
4391 .maxlen = sizeof(sysctl_legacy_va_layout),
4392 .mode = 0644,
4393 - .proc_handler = proc_dointvec,
4394 + .proc_handler = proc_dointvec_minmax,
4395 .extra1 = SYSCTL_ZERO,
4396 },
4397 #endif
4398 @@ -1591,7 +1591,7 @@ static struct ctl_table vm_table[] = {
4399 .data = &node_reclaim_mode,
4400 .maxlen = sizeof(node_reclaim_mode),
4401 .mode = 0644,
4402 - .proc_handler = proc_dointvec,
4403 + .proc_handler = proc_dointvec_minmax,
4404 .extra1 = SYSCTL_ZERO,
4405 },
4406 {
4407 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
4408 index 7f31932216a12..299a4c5b6cf8d 100644
4409 --- a/kernel/time/hrtimer.c
4410 +++ b/kernel/time/hrtimer.c
4411 @@ -547,8 +547,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
4412 }
4413
4414 /*
4415 - * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
4416 - * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
4417 + * Recomputes cpu_base::*next_timer and returns the earliest expires_next
4418 + * but does not set cpu_base::*expires_next, that is done by
4419 + * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
4420 + * cpu_base::*expires_next right away, reprogramming logic would no longer
4421 + * work.
4422 *
4423 * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
4424 * those timers will get run whenever the softirq gets handled, at the end of
4425 @@ -589,6 +592,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
4426 return expires_next;
4427 }
4428
4429 +static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
4430 +{
4431 + ktime_t expires_next, soft = KTIME_MAX;
4432 +
4433 + /*
4434 + * If the soft interrupt has already been activated, ignore the
4435 + * soft bases. They will be handled in the already raised soft
4436 + * interrupt.
4437 + */
4438 + if (!cpu_base->softirq_activated) {
4439 + soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
4440 + /*
4441 + * Update the soft expiry time. clock_settime() might have
4442 + * affected it.
4443 + */
4444 + cpu_base->softirq_expires_next = soft;
4445 + }
4446 +
4447 + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
4448 + /*
4449 + * If a softirq timer is expiring first, update cpu_base->next_timer
4450 + * and program the hardware with the soft expiry time.
4451 + */
4452 + if (expires_next > soft) {
4453 + cpu_base->next_timer = cpu_base->softirq_next_timer;
4454 + expires_next = soft;
4455 + }
4456 +
4457 + return expires_next;
4458 +}
4459 +
4460 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
4461 {
4462 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
4463 @@ -629,23 +663,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
4464 {
4465 ktime_t expires_next;
4466
4467 - /*
4468 - * Find the current next expiration time.
4469 - */
4470 - expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
4471 -
4472 - if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
4473 - /*
4474 - * When the softirq is activated, hrtimer has to be
4475 - * programmed with the first hard hrtimer because soft
4476 - * timer interrupt could occur too late.
4477 - */
4478 - if (cpu_base->softirq_activated)
4479 - expires_next = __hrtimer_get_next_event(cpu_base,
4480 - HRTIMER_ACTIVE_HARD);
4481 - else
4482 - cpu_base->softirq_expires_next = expires_next;
4483 - }
4484 + expires_next = hrtimer_update_next_event(cpu_base);
4485
4486 if (skip_equal && expires_next == cpu_base->expires_next)
4487 return;
4488 @@ -1640,8 +1658,8 @@ retry:
4489
4490 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
4491
4492 - /* Reevaluate the clock bases for the next expiry */
4493 - expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
4494 + /* Reevaluate the clock bases for the [soft] next expiry */
4495 + expires_next = hrtimer_update_next_event(cpu_base);
4496 /*
4497 * Store the new expiry value so the migration code can verify
4498 * against it.
4499 diff --git a/lib/logic_pio.c b/lib/logic_pio.c
4500 index 905027574e5d8..774bb02fff10a 100644
4501 --- a/lib/logic_pio.c
4502 +++ b/lib/logic_pio.c
4503 @@ -27,6 +27,8 @@ static DEFINE_MUTEX(io_range_mutex);
4504 * @new_range: pointer to the IO range to be registered.
4505 *
4506 * Returns 0 on success, the error code in case of failure.
4507 + * If the range already exists, -EEXIST will be returned, which should be
4508 + * considered a success.
4509 *
4510 * Register a new IO range node in the IO range list.
4511 */
4512 @@ -49,6 +51,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
4513 list_for_each_entry(range, &io_range_list, list) {
4514 if (range->fwnode == new_range->fwnode) {
4515 /* range already there */
4516 + ret = -EEXIST;
4517 goto end_register;
4518 }
4519 if (range->flags == LOGIC_PIO_CPU_MMIO &&
4520 diff --git a/mm/slub.c b/mm/slub.c
4521 index e622e8f4c2ac4..52ded855b4ed0 100644
4522 --- a/mm/slub.c
4523 +++ b/mm/slub.c
4524 @@ -1887,7 +1887,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
4525
4526 t = acquire_slab(s, n, page, object == NULL, &objects);
4527 if (!t)
4528 - continue; /* cmpxchg raced */
4529 + break;
4530
4531 available += objects;
4532 if (!object) {
4533 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
4534 index a23094b050f8b..e290a0c9e9282 100644
4535 --- a/net/ipv4/cipso_ipv4.c
4536 +++ b/net/ipv4/cipso_ipv4.c
4537 @@ -519,16 +519,10 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
4538 ret_val = -ENOENT;
4539 goto doi_remove_return;
4540 }
4541 - if (!refcount_dec_and_test(&doi_def->refcount)) {
4542 - spin_unlock(&cipso_v4_doi_list_lock);
4543 - ret_val = -EBUSY;
4544 - goto doi_remove_return;
4545 - }
4546 list_del_rcu(&doi_def->list);
4547 spin_unlock(&cipso_v4_doi_list_lock);
4548
4549 - cipso_v4_cache_invalidate();
4550 - call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
4551 + cipso_v4_doi_putdef(doi_def);
4552 ret_val = 0;
4553
4554 doi_remove_return:
4555 @@ -585,9 +579,6 @@ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
4556
4557 if (!refcount_dec_and_test(&doi_def->refcount))
4558 return;
4559 - spin_lock(&cipso_v4_doi_list_lock);
4560 - list_del_rcu(&doi_def->list);
4561 - spin_unlock(&cipso_v4_doi_list_lock);
4562
4563 cipso_v4_cache_invalidate();
4564 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
4565 diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
4566 index c2b7d43d92b0e..f5f4369c131c9 100644
4567 --- a/net/ipv4/nexthop.c
4568 +++ b/net/ipv4/nexthop.c
4569 @@ -1065,7 +1065,7 @@ out:
4570
4571 /* rtnl */
4572 /* remove all nexthops tied to a device being deleted */
4573 -static void nexthop_flush_dev(struct net_device *dev)
4574 +static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
4575 {
4576 unsigned int hash = nh_dev_hashfn(dev->ifindex);
4577 struct net *net = dev_net(dev);
4578 @@ -1077,6 +1077,10 @@ static void nexthop_flush_dev(struct net_device *dev)
4579 if (nhi->fib_nhc.nhc_dev != dev)
4580 continue;
4581
4582 + if (nhi->reject_nh &&
4583 + (event == NETDEV_DOWN || event == NETDEV_CHANGE))
4584 + continue;
4585 +
4586 remove_nexthop(net, nhi->nh_parent, NULL);
4587 }
4588 }
4589 @@ -1794,11 +1798,11 @@ static int nh_netdev_event(struct notifier_block *this,
4590 switch (event) {
4591 case NETDEV_DOWN:
4592 case NETDEV_UNREGISTER:
4593 - nexthop_flush_dev(dev);
4594 + nexthop_flush_dev(dev, event);
4595 break;
4596 case NETDEV_CHANGE:
4597 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
4598 - nexthop_flush_dev(dev);
4599 + nexthop_flush_dev(dev, event);
4600 break;
4601 case NETDEV_CHANGEMTU:
4602 info_ext = ptr;
4603 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4604 index 6ddec8a23942b..5c8d0fb498256 100644
4605 --- a/net/ipv4/tcp.c
4606 +++ b/net/ipv4/tcp.c
4607 @@ -2957,16 +2957,23 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
4608 break;
4609
4610 case TCP_QUEUE_SEQ:
4611 - if (sk->sk_state != TCP_CLOSE)
4612 + if (sk->sk_state != TCP_CLOSE) {
4613 err = -EPERM;
4614 - else if (tp->repair_queue == TCP_SEND_QUEUE)
4615 - WRITE_ONCE(tp->write_seq, val);
4616 - else if (tp->repair_queue == TCP_RECV_QUEUE) {
4617 - WRITE_ONCE(tp->rcv_nxt, val);
4618 - WRITE_ONCE(tp->copied_seq, val);
4619 - }
4620 - else
4621 + } else if (tp->repair_queue == TCP_SEND_QUEUE) {
4622 + if (!tcp_rtx_queue_empty(sk))
4623 + err = -EPERM;
4624 + else
4625 + WRITE_ONCE(tp->write_seq, val);
4626 + } else if (tp->repair_queue == TCP_RECV_QUEUE) {
4627 + if (tp->rcv_nxt != tp->copied_seq) {
4628 + err = -EPERM;
4629 + } else {
4630 + WRITE_ONCE(tp->rcv_nxt, val);
4631 + WRITE_ONCE(tp->copied_seq, val);
4632 + }
4633 + } else {
4634 err = -EINVAL;
4635 + }
4636 break;
4637
4638 case TCP_REPAIR_OPTIONS:
4639 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
4640 index d7c64e953e9a5..c463eebdc8fe2 100644
4641 --- a/net/ipv4/udp_offload.c
4642 +++ b/net/ipv4/udp_offload.c
4643 @@ -426,7 +426,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
4644 }
4645
4646 if (NAPI_GRO_CB(skb)->encap_mark ||
4647 - (skb->ip_summed != CHECKSUM_PARTIAL &&
4648 + (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
4649 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
4650 !NAPI_GRO_CB(skb)->csum_valid) ||
4651 !udp_sk(sk)->gro_receive)
4652 diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
4653 index 8d3f66c310dbd..7426e33686d13 100644
4654 --- a/net/ipv6/calipso.c
4655 +++ b/net/ipv6/calipso.c
4656 @@ -83,6 +83,9 @@ struct calipso_map_cache_entry {
4657
4658 static struct calipso_map_cache_bkt *calipso_cache;
4659
4660 +static void calipso_cache_invalidate(void);
4661 +static void calipso_doi_putdef(struct calipso_doi *doi_def);
4662 +
4663 /* Label Mapping Cache Functions
4664 */
4665
4666 @@ -444,15 +447,10 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info)
4667 ret_val = -ENOENT;
4668 goto doi_remove_return;
4669 }
4670 - if (!refcount_dec_and_test(&doi_def->refcount)) {
4671 - spin_unlock(&calipso_doi_list_lock);
4672 - ret_val = -EBUSY;
4673 - goto doi_remove_return;
4674 - }
4675 list_del_rcu(&doi_def->list);
4676 spin_unlock(&calipso_doi_list_lock);
4677
4678 - call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
4679 + calipso_doi_putdef(doi_def);
4680 ret_val = 0;
4681
4682 doi_remove_return:
4683 @@ -508,10 +506,8 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def)
4684
4685 if (!refcount_dec_and_test(&doi_def->refcount))
4686 return;
4687 - spin_lock(&calipso_doi_list_lock);
4688 - list_del_rcu(&doi_def->list);
4689 - spin_unlock(&calipso_doi_list_lock);
4690
4691 + calipso_cache_invalidate();
4692 call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
4693 }
4694
4695 diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
4696 index b1690149b6fa0..1482259de9b5d 100644
4697 --- a/net/mpls/mpls_gso.c
4698 +++ b/net/mpls/mpls_gso.c
4699 @@ -14,6 +14,7 @@
4700 #include <linux/netdev_features.h>
4701 #include <linux/netdevice.h>
4702 #include <linux/skbuff.h>
4703 +#include <net/mpls.h>
4704
4705 static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
4706 netdev_features_t features)
4707 @@ -27,6 +28,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
4708
4709 skb_reset_network_header(skb);
4710 mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
4711 + if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
4712 + goto out;
4713 if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
4714 goto out;
4715
4716 diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
4717 index e87b6bd6b3cdb..4731d21fc3ad8 100644
4718 --- a/net/netfilter/nf_nat_proto.c
4719 +++ b/net/netfilter/nf_nat_proto.c
4720 @@ -646,8 +646,8 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
4721 }
4722
4723 static unsigned int
4724 -nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
4725 - const struct nf_hook_state *state)
4726 +nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb,
4727 + const struct nf_hook_state *state)
4728 {
4729 unsigned int ret;
4730 __be32 daddr = ip_hdr(skb)->daddr;
4731 @@ -659,6 +659,23 @@ nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
4732 return ret;
4733 }
4734
4735 +static unsigned int
4736 +nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb,
4737 + const struct nf_hook_state *state)
4738 +{
4739 + __be32 saddr = ip_hdr(skb)->saddr;
4740 + struct sock *sk = skb->sk;
4741 + unsigned int ret;
4742 +
4743 + ret = nf_nat_ipv4_fn(priv, skb, state);
4744 +
4745 + if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr &&
4746 + !inet_sk_transparent(sk))
4747 + skb_orphan(skb); /* TCP edemux obtained wrong socket */
4748 +
4749 + return ret;
4750 +}
4751 +
4752 static unsigned int
4753 nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
4754 const struct nf_hook_state *state)
4755 @@ -736,7 +753,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
4756 static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
4757 /* Before packet filtering, change destination */
4758 {
4759 - .hook = nf_nat_ipv4_in,
4760 + .hook = nf_nat_ipv4_pre_routing,
4761 .pf = NFPROTO_IPV4,
4762 .hooknum = NF_INET_PRE_ROUTING,
4763 .priority = NF_IP_PRI_NAT_DST,
4764 @@ -757,7 +774,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
4765 },
4766 /* After packet filtering, change source */
4767 {
4768 - .hook = nf_nat_ipv4_fn,
4769 + .hook = nf_nat_ipv4_local_in,
4770 .pf = NFPROTO_IPV4,
4771 .hooknum = NF_INET_LOCAL_IN,
4772 .priority = NF_IP_PRI_NAT_SRC,
4773 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
4774 index d1ef2d7930739..8b60fc04c67c2 100644
4775 --- a/net/netfilter/x_tables.c
4776 +++ b/net/netfilter/x_tables.c
4777 @@ -330,6 +330,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
4778 const struct xt_match *m;
4779 int have_rev = 0;
4780
4781 + mutex_lock(&xt[af].mutex);
4782 list_for_each_entry(m, &xt[af].match, list) {
4783 if (strcmp(m->name, name) == 0) {
4784 if (m->revision > *bestp)
4785 @@ -338,6 +339,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
4786 have_rev = 1;
4787 }
4788 }
4789 + mutex_unlock(&xt[af].mutex);
4790
4791 if (af != NFPROTO_UNSPEC && !have_rev)
4792 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
4793 @@ -350,6 +352,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
4794 const struct xt_target *t;
4795 int have_rev = 0;
4796
4797 + mutex_lock(&xt[af].mutex);
4798 list_for_each_entry(t, &xt[af].target, list) {
4799 if (strcmp(t->name, name) == 0) {
4800 if (t->revision > *bestp)
4801 @@ -358,6 +361,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
4802 have_rev = 1;
4803 }
4804 }
4805 + mutex_unlock(&xt[af].mutex);
4806
4807 if (af != NFPROTO_UNSPEC && !have_rev)
4808 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
4809 @@ -371,12 +375,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
4810 {
4811 int have_rev, best = -1;
4812
4813 - mutex_lock(&xt[af].mutex);
4814 if (target == 1)
4815 have_rev = target_revfn(af, name, revision, &best);
4816 else
4817 have_rev = match_revfn(af, name, revision, &best);
4818 - mutex_unlock(&xt[af].mutex);
4819
4820 /* Nothing at all? Return 0 to try loading module. */
4821 if (best == -1) {
4822 diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
4823 index 0f16080b87cb9..4cb43a2c07d14 100644
4824 --- a/net/netlabel/netlabel_cipso_v4.c
4825 +++ b/net/netlabel/netlabel_cipso_v4.c
4826 @@ -575,6 +575,7 @@ list_start:
4827
4828 break;
4829 }
4830 + cipso_v4_doi_putdef(doi_def);
4831 rcu_read_unlock();
4832
4833 genlmsg_end(ans_skb, data);
4834 @@ -583,12 +584,14 @@ list_start:
4835 list_retry:
4836 /* XXX - this limit is a guesstimate */
4837 if (nlsze_mult < 4) {
4838 + cipso_v4_doi_putdef(doi_def);
4839 rcu_read_unlock();
4840 kfree_skb(ans_skb);
4841 nlsze_mult *= 2;
4842 goto list_start;
4843 }
4844 list_failure_lock:
4845 + cipso_v4_doi_putdef(doi_def);
4846 rcu_read_unlock();
4847 list_failure:
4848 kfree_skb(ans_skb);
4849 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
4850 index ef602976bb2c8..6e023e93d3186 100644
4851 --- a/net/qrtr/qrtr.c
4852 +++ b/net/qrtr/qrtr.c
4853 @@ -791,8 +791,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
4854 plen = (len + 3) & ~3;
4855 skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
4856 msg->msg_flags & MSG_DONTWAIT, &rc);
4857 - if (!skb)
4858 + if (!skb) {
4859 + rc = -ENOMEM;
4860 goto out_node;
4861 + }
4862
4863 skb_reserve(skb, QRTR_HDR_MAX_SIZE);
4864
4865 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
4866 index b65a405f607b2..3b1b5ee521379 100644
4867 --- a/net/sched/sch_api.c
4868 +++ b/net/sched/sch_api.c
4869 @@ -2157,7 +2157,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
4870
4871 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
4872 struct tcmsg *tcm, struct netlink_callback *cb,
4873 - int *t_p, int s_t)
4874 + int *t_p, int s_t, bool recur)
4875 {
4876 struct Qdisc *q;
4877 int b;
4878 @@ -2168,7 +2168,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
4879 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
4880 return -1;
4881
4882 - if (!qdisc_dev(root))
4883 + if (!qdisc_dev(root) || !recur)
4884 return 0;
4885
4886 if (tcm->tcm_parent) {
4887 @@ -2203,13 +2203,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
4888 s_t = cb->args[0];
4889 t = 0;
4890
4891 - if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
4892 + if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
4893 goto done;
4894
4895 dev_queue = dev_ingress_queue(dev);
4896 if (dev_queue &&
4897 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
4898 - &t, s_t) < 0)
4899 + &t, s_t, false) < 0)
4900 goto done;
4901
4902 done:
4903 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
4904 index 7afbf15bcbd9a..4beb6d2957c33 100644
4905 --- a/net/sunrpc/sched.c
4906 +++ b/net/sunrpc/sched.c
4907 @@ -990,8 +990,11 @@ void rpc_execute(struct rpc_task *task)
4908
4909 rpc_set_active(task);
4910 rpc_make_runnable(rpciod_workqueue, task);
4911 - if (!is_async)
4912 + if (!is_async) {
4913 + unsigned int pflags = memalloc_nofs_save();
4914 __rpc_execute(task);
4915 + memalloc_nofs_restore(pflags);
4916 + }
4917 }
4918
4919 static void rpc_async_schedule(struct work_struct *work)
4920 diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
4921 index 79d1005ff2ee3..3b604c1eb3c3b 100644
4922 --- a/samples/bpf/xdpsock_user.c
4923 +++ b/samples/bpf/xdpsock_user.c
4924 @@ -783,5 +783,7 @@ int main(int argc, char **argv)
4925 else
4926 l2fwd_all();
4927
4928 + munmap(bufs, NUM_FRAMES * opt_xsk_frame_size);
4929 +
4930 return 0;
4931 }
4932 diff --git a/security/commoncap.c b/security/commoncap.c
4933 index ed89a6dd4f83d..28a6939bcc4e5 100644
4934 --- a/security/commoncap.c
4935 +++ b/security/commoncap.c
4936 @@ -500,8 +500,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
4937 __u32 magic, nsmagic;
4938 struct inode *inode = d_backing_inode(dentry);
4939 struct user_namespace *task_ns = current_user_ns(),
4940 - *fs_ns = inode->i_sb->s_user_ns,
4941 - *ancestor;
4942 + *fs_ns = inode->i_sb->s_user_ns;
4943 kuid_t rootid;
4944 size_t newsize;
4945
4946 @@ -524,15 +523,6 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
4947 if (nsrootid == -1)
4948 return -EINVAL;
4949
4950 - /*
4951 - * Do not allow allow adding a v3 filesystem capability xattr
4952 - * if the rootid field is ambiguous.
4953 - */
4954 - for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
4955 - if (from_kuid(ancestor, rootid) == 0)
4956 - return -EINVAL;
4957 - }
4958 -
4959 newsize = sizeof(struct vfs_ns_cap_data);
4960 nscap = kmalloc(newsize, GFP_ATOMIC);
4961 if (!nscap)
4962 diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
4963 index 6a85645663759..17a25e453f60c 100644
4964 --- a/sound/pci/hda/hda_bind.c
4965 +++ b/sound/pci/hda/hda_bind.c
4966 @@ -47,6 +47,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
4967 if (codec->bus->shutdown)
4968 return;
4969
4970 + /* ignore unsol events during system suspend/resume */
4971 + if (codec->core.dev.power.power_state.event != PM_EVENT_ON)
4972 + return;
4973 +
4974 if (codec->patch_ops.unsol_event)
4975 codec->patch_ops.unsol_event(codec, ev);
4976 }
4977 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
4978 index 5e6081750bd9b..6a159c6c2f546 100644
4979 --- a/sound/pci/hda/hda_controller.c
4980 +++ b/sound/pci/hda/hda_controller.c
4981 @@ -613,13 +613,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
4982 20,
4983 178000000);
4984
4985 - /* by some reason, the playback stream stalls on PulseAudio with
4986 - * tsched=1 when a capture stream triggers. Until we figure out the
4987 - * real cause, disable tsched mode by telling the PCM info flag.
4988 - */
4989 - if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
4990 - runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
4991 -
4992 if (chip->align_buffer_size)
4993 /* constrain buffer sizes to be multiple of 128
4994 bytes. This is more efficient in terms of memory
4995 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4996 index 7f9f6bbca5489..febd16c9efd7a 100644
4997 --- a/sound/pci/hda/hda_intel.c
4998 +++ b/sound/pci/hda/hda_intel.c
4999 @@ -1025,6 +1025,8 @@ static int azx_prepare(struct device *dev)
5000 chip = card->private_data;
5001 chip->pm_prepared = 1;
5002
5003 + flush_work(&azx_bus(chip)->unsol_work);
5004 +
5005 /* HDA controller always requires different WAKEEN for runtime suspend
5006 * and system suspend, so don't use direct-complete here.
5007 */
5008 diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
5009 index d7b2aae6d4289..9412bdda85c82 100644
5010 --- a/sound/pci/hda/patch_ca0132.c
5011 +++ b/sound/pci/hda/patch_ca0132.c
5012 @@ -1185,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
5013 SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
5014 SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
5015 SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
5016 + SND_PCI_QUIRK(0x1102, 0x0191, "Sound Blaster AE-5 Plus", QUIRK_AE5),
5017 SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
5018 {}
5019 };
5020 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
5021 index df4771b9eff24..ce38b5d4670da 100644
5022 --- a/sound/pci/hda/patch_hdmi.c
5023 +++ b/sound/pci/hda/patch_hdmi.c
5024 @@ -2382,6 +2382,18 @@ static void generic_hdmi_free(struct hda_codec *codec)
5025 }
5026
5027 #ifdef CONFIG_PM
5028 +static int generic_hdmi_suspend(struct hda_codec *codec)
5029 +{
5030 + struct hdmi_spec *spec = codec->spec;
5031 + int pin_idx;
5032 +
5033 + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
5034 + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
5035 + cancel_delayed_work_sync(&per_pin->work);
5036 + }
5037 + return 0;
5038 +}
5039 +
5040 static int generic_hdmi_resume(struct hda_codec *codec)
5041 {
5042 struct hdmi_spec *spec = codec->spec;
5043 @@ -2405,6 +2417,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
5044 .build_controls = generic_hdmi_build_controls,
5045 .unsol_event = hdmi_unsol_event,
5046 #ifdef CONFIG_PM
5047 + .suspend = generic_hdmi_suspend,
5048 .resume = generic_hdmi_resume,
5049 #endif
5050 };
5051 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5052 index a7f31766d14df..9fb03c646a88f 100644
5053 --- a/sound/usb/quirks.c
5054 +++ b/sound/usb/quirks.c
5055 @@ -1452,6 +1452,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
5056 case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
5057 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
5058 case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
5059 + case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
5060 return true;
5061 }
5062
5063 @@ -1604,6 +1605,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
5064 && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5065 msleep(20);
5066
5067 + /*
5068 + * Plantronics headsets (C320, C320-M, etc) need a delay to avoid
5069 + * random microhpone failures.
5070 + */
5071 + if (USB_ID_VENDOR(chip->usb_id) == 0x047f &&
5072 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5073 + msleep(20);
5074 +
5075 /* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950),
5076 * Jabra 550a, Kingston HyperX needs a tiny delay here,
5077 * otherwise requests like get/set frequency return
5078 diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
5079 index 8593d3c200c61..0116b0c06e976 100644
5080 --- a/tools/perf/util/trace-event-read.c
5081 +++ b/tools/perf/util/trace-event-read.c
5082 @@ -361,6 +361,7 @@ static int read_saved_cmdline(struct tep_handle *pevent)
5083 pr_debug("error reading saved cmdlines\n");
5084 goto out;
5085 }
5086 + buf[ret] = '\0';
5087
5088 parse_saved_cmdline(pevent, buf, size);
5089 ret = 0;
5090 diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
5091 index 0f98724120deb..b4e9a1d8c6cdb 100644
5092 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
5093 +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
5094 @@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
5095 }
5096
5097 ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
5098 - if (ret < 0) {
5099 - ERROR(ret);
5100 - return TC_ACT_SHOT;
5101 - }
5102 + if (ret < 0)
5103 + gopt.opt_class = 0;
5104
5105 bpf_trace_printk(fmt, sizeof(fmt),
5106 key.tunnel_id, key.remote_ipv4, gopt.opt_class);
5107 diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
5108 index f3c33e128709b..bcc87906c4c19 100644
5109 --- a/tools/testing/selftests/bpf/verifier/array_access.c
5110 +++ b/tools/testing/selftests/bpf/verifier/array_access.c
5111 @@ -250,12 +250,13 @@
5112 BPF_MOV64_IMM(BPF_REG_5, 0),
5113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5114 BPF_FUNC_csum_diff),
5115 + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
5116 BPF_EXIT_INSN(),
5117 },
5118 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5119 .fixup_map_array_ro = { 3 },
5120 .result = ACCEPT,
5121 - .retval = -29,
5122 + .retval = 65507,
5123 },
5124 {
5125 "invalid write map access into a read-only array 1",
5126 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
5127 index 197e769c2ed16..f8cda822c1cec 100755
5128 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
5129 +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
5130 @@ -86,11 +86,20 @@ test_ip6gretap()
5131
5132 test_gretap_stp()
5133 {
5134 + # Sometimes after mirror installation, the neighbor's state is not valid.
5135 + # The reason is that there is no SW datapath activity related to the
5136 + # neighbor for the remote GRE address. Therefore whether the corresponding
5137 + # neighbor will be valid is a matter of luck, and the test is thus racy.
5138 + # Set the neighbor's state to permanent, so it would be always valid.
5139 + ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \
5140 + nud permanent dev br2
5141 full_test_span_gre_stp gt4 $swp3.555 "mirror to gretap"
5142 }
5143
5144 test_ip6gretap_stp()
5145 {
5146 + ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \
5147 + nud permanent dev br2
5148 full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap"
5149 }
5150
5151 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
5152 index 986fbc3cf667b..2e7d2b3f29079 100644
5153 --- a/virt/kvm/arm/arm.c
5154 +++ b/virt/kvm/arm/arm.c
5155 @@ -373,11 +373,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
5156 cpu_data = this_cpu_ptr(&kvm_host_data);
5157
5158 /*
5159 + * We guarantee that both TLBs and I-cache are private to each
5160 + * vcpu. If detecting that a vcpu from the same VM has
5161 + * previously run on the same physical CPU, call into the
5162 + * hypervisor code to nuke the relevant contexts.
5163 + *
5164 + * We might get preempted before the vCPU actually runs, but
5165 * We might get preempted before the vCPU actually runs, but
5166 * over-invalidation doesn't affect correctness.
5167 */
5168 if (*last_ran != vcpu->vcpu_id) {
5169 - kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
5170 + kvm_call_hyp(__kvm_flush_cpu_context, vcpu);
5171 *last_ran = vcpu->vcpu_id;
5172 }
5173
5174 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5175 index 03a586ab6d27b..c6ba672f07ccf 100644
5176 --- a/virt/kvm/arm/mmu.c
5177 +++ b/virt/kvm/arm/mmu.c
5178 @@ -2307,8 +2307,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
5179 * Prevent userspace from creating a memory region outside of the IPA
5180 * space addressable by the KVM guest IPA space.
5181 */
5182 - if (memslot->base_gfn + memslot->npages >=
5183 - (kvm_phys_size(kvm) >> PAGE_SHIFT))
5184 + if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
5185 return -EFAULT;
5186
5187 down_read(&current->mm->mmap_sem);