Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0122-5.4.23-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3503 - (hide annotations) (download)
Mon May 11 14:36:24 2020 UTC (4 years ago) by niro
File size: 201423 byte(s)
-linux-5.4.23
1 niro 3503 diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
2     index d4a85d535bf9..4a9d9c794ee5 100644
3     --- a/Documentation/arm64/tagged-address-abi.rst
4     +++ b/Documentation/arm64/tagged-address-abi.rst
5     @@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
6     how the user addresses are used by the kernel:
7    
8     1. User addresses not accessed by the kernel but used for address space
9     - management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
10     - of valid tagged pointers in this context is always allowed.
11     + management (e.g. ``mprotect()``, ``madvise()``). The use of valid
12     + tagged pointers in this context is allowed with the exception of
13     + ``brk()``, ``mmap()`` and the ``new_address`` argument to
14     + ``mremap()`` as these have the potential to alias with existing
15     + user addresses.
16     +
17     + NOTE: This behaviour changed in v5.6 and so some earlier kernels may
18     + incorrectly accept valid tagged pointers for the ``brk()``,
19     + ``mmap()`` and ``mremap()`` system calls.
20    
21     2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
22     relaxation is disabled by default and the application thread needs to
23     diff --git a/MAINTAINERS b/MAINTAINERS
24     index d1aeebb59e6a..fe6fa5d3a63e 100644
25     --- a/MAINTAINERS
26     +++ b/MAINTAINERS
27     @@ -8201,7 +8201,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
28     M: Rodrigo Vivi <rodrigo.vivi@intel.com>
29     L: intel-gfx@lists.freedesktop.org
30     W: https://01.org/linuxgraphics/
31     -B: https://01.org/linuxgraphics/documentation/how-report-bugs
32     +B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
33     C: irc://chat.freenode.net/intel-gfx
34     Q: http://patchwork.freedesktop.org/project/intel-gfx/
35     T: git git://anongit.freedesktop.org/drm-intel
36     diff --git a/Makefile b/Makefile
37     index 9428ec3b611a..af5e90075514 100644
38     --- a/Makefile
39     +++ b/Makefile
40     @@ -1,7 +1,7 @@
41     # SPDX-License-Identifier: GPL-2.0
42     VERSION = 5
43     PATCHLEVEL = 4
44     -SUBLEVEL = 22
45     +SUBLEVEL = 23
46     EXTRAVERSION =
47     NAME = Kleptomaniac Octopus
48    
49     diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
50     index 73834996c4b6..5de132100b6d 100644
51     --- a/arch/arm64/include/asm/lse.h
52     +++ b/arch/arm64/include/asm/lse.h
53     @@ -6,7 +6,7 @@
54    
55     #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
56    
57     -#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
58     +#define __LSE_PREAMBLE ".arch_extension lse\n"
59    
60     #include <linux/compiler_types.h>
61     #include <linux/export.h>
62     diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
63     index c23c47360664..08df42e4db96 100644
64     --- a/arch/arm64/include/asm/memory.h
65     +++ b/arch/arm64/include/asm/memory.h
66     @@ -219,7 +219,7 @@ static inline unsigned long kaslr_offset(void)
67     ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
68    
69     #define untagged_addr(addr) ({ \
70     - u64 __addr = (__force u64)addr; \
71     + u64 __addr = (__force u64)(addr); \
72     __addr &= __untagged_addr(__addr); \
73     (__force __typeof__(addr))__addr; \
74     })
75     diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
76     index c8bb14ff4713..6ba5adb96a3b 100644
77     --- a/arch/powerpc/include/asm/page.h
78     +++ b/arch/powerpc/include/asm/page.h
79     @@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
80     /*
81     * Some number of bits at the level of the page table that points to
82     * a hugepte are used to encode the size. This masks those bits.
83     + * On 8xx, HW assistance requires 4k alignment for the hugepte.
84     */
85     +#ifdef CONFIG_PPC_8xx
86     +#define HUGEPD_SHIFT_MASK 0xfff
87     +#else
88     #define HUGEPD_SHIFT_MASK 0x3f
89     +#endif
90    
91     #ifndef __ASSEMBLY__
92    
93     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
94     index 2fb166928e91..4fd7efdf2a53 100644
95     --- a/arch/powerpc/kernel/eeh_driver.c
96     +++ b/arch/powerpc/kernel/eeh_driver.c
97     @@ -1200,6 +1200,17 @@ void eeh_handle_special_event(void)
98     eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
99     eeh_handle_normal_event(pe);
100     } else {
101     + eeh_for_each_pe(pe, tmp_pe)
102     + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
103     + edev->mode &= ~EEH_DEV_NO_HANDLER;
104     +
105     + /* Notify all devices to be down */
106     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
107     + eeh_set_channel_state(pe, pci_channel_io_perm_failure);
108     + eeh_pe_report(
109     + "error_detected(permanent failure)", pe,
110     + eeh_report_failure, NULL);
111     +
112     pci_lock_rescan_remove();
113     list_for_each_entry(hose, &hose_list, list_node) {
114     phb_pe = eeh_phb_pe_get(hose);
115     @@ -1208,16 +1219,6 @@ void eeh_handle_special_event(void)
116     (phb_pe->state & EEH_PE_RECOVERING))
117     continue;
118    
119     - eeh_for_each_pe(pe, tmp_pe)
120     - eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
121     - edev->mode &= ~EEH_DEV_NO_HANDLER;
122     -
123     - /* Notify all devices to be down */
124     - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
125     - eeh_set_channel_state(pe, pci_channel_io_perm_failure);
126     - eeh_pe_report(
127     - "error_detected(permanent failure)", pe,
128     - eeh_report_failure, NULL);
129     bus = eeh_pe_bus_get(phb_pe);
130     if (!bus) {
131     pr_err("%s: Cannot find PCI bus for "
132     diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
133     index 59bb4f4ae316..13f699256258 100644
134     --- a/arch/powerpc/kernel/entry_32.S
135     +++ b/arch/powerpc/kernel/entry_32.S
136     @@ -778,7 +778,7 @@ fast_exception_return:
137     1: lis r3,exc_exit_restart_end@ha
138     addi r3,r3,exc_exit_restart_end@l
139     cmplw r12,r3
140     -#if CONFIG_PPC_BOOK3S_601
141     +#ifdef CONFIG_PPC_BOOK3S_601
142     bge 2b
143     #else
144     bge 3f
145     @@ -786,7 +786,7 @@ fast_exception_return:
146     lis r4,exc_exit_restart@ha
147     addi r4,r4,exc_exit_restart@l
148     cmplw r12,r4
149     -#if CONFIG_PPC_BOOK3S_601
150     +#ifdef CONFIG_PPC_BOOK3S_601
151     blt 2b
152     #else
153     blt 3f
154     diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
155     index 19f583e18402..98d8b6832fcb 100644
156     --- a/arch/powerpc/kernel/head_8xx.S
157     +++ b/arch/powerpc/kernel/head_8xx.S
158     @@ -289,7 +289,7 @@ InstructionTLBMiss:
159     * set. All other Linux PTE bits control the behavior
160     * of the MMU.
161     */
162     - rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
163     + rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
164     rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
165     ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
166     mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
167     diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
168     index e6c30cee6abf..d215f9554553 100644
169     --- a/arch/powerpc/kernel/signal.c
170     +++ b/arch/powerpc/kernel/signal.c
171     @@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
172     * normal/non-checkpointed stack pointer.
173     */
174    
175     + unsigned long ret = tsk->thread.regs->gpr[1];
176     +
177     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
178     BUG_ON(tsk != current);
179    
180     if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
181     + preempt_disable();
182     tm_reclaim_current(TM_CAUSE_SIGNAL);
183     if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
184     - return tsk->thread.ckpt_regs.gpr[1];
185     + ret = tsk->thread.ckpt_regs.gpr[1];
186     +
187     + /*
188     + * If we treclaim, we must clear the current thread's TM bits
189     + * before re-enabling preemption. Otherwise we might be
190     + * preempted and have the live MSR[TS] changed behind our back
191     + * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
192     + * enter the signal handler in non-transactional state.
193     + */
194     + tsk->thread.regs->msr &= ~MSR_TS_MASK;
195     + preempt_enable();
196     }
197     #endif
198     - return tsk->thread.regs->gpr[1];
199     + return ret;
200     }
201     diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
202     index 98600b276f76..1b090a76b444 100644
203     --- a/arch/powerpc/kernel/signal_32.c
204     +++ b/arch/powerpc/kernel/signal_32.c
205     @@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
206     */
207     static int save_tm_user_regs(struct pt_regs *regs,
208     struct mcontext __user *frame,
209     - struct mcontext __user *tm_frame, int sigret)
210     + struct mcontext __user *tm_frame, int sigret,
211     + unsigned long msr)
212     {
213     - unsigned long msr = regs->msr;
214     -
215     WARN_ON(tm_suspend_disabled);
216    
217     - /* Remove TM bits from thread's MSR. The MSR in the sigcontext
218     - * just indicates to userland that we were doing a transaction, but we
219     - * don't want to return in transactional state. This also ensures
220     - * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
221     - */
222     - regs->msr &= ~MSR_TS_MASK;
223     -
224     /* Save both sets of general registers */
225     if (save_general_regs(&current->thread.ckpt_regs, frame)
226     || save_general_regs(regs, tm_frame))
227     @@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
228     int sigret;
229     unsigned long tramp;
230     struct pt_regs *regs = tsk->thread.regs;
231     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
232     + /* Save the thread's msr before get_tm_stackpointer() changes it */
233     + unsigned long msr = regs->msr;
234     +#endif
235    
236     BUG_ON(tsk != current);
237    
238     @@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
239    
240     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
241     tm_frame = &rt_sf->uc_transact.uc_mcontext;
242     - if (MSR_TM_ACTIVE(regs->msr)) {
243     + if (MSR_TM_ACTIVE(msr)) {
244     if (__put_user((unsigned long)&rt_sf->uc_transact,
245     &rt_sf->uc.uc_link) ||
246     __put_user((unsigned long)tm_frame,
247     &rt_sf->uc_transact.uc_regs))
248     goto badframe;
249     - if (save_tm_user_regs(regs, frame, tm_frame, sigret))
250     + if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
251     goto badframe;
252     }
253     else
254     @@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
255     int sigret;
256     unsigned long tramp;
257     struct pt_regs *regs = tsk->thread.regs;
258     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
259     + /* Save the thread's msr before get_tm_stackpointer() changes it */
260     + unsigned long msr = regs->msr;
261     +#endif
262    
263     BUG_ON(tsk != current);
264    
265     @@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
266    
267     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
268     tm_mctx = &frame->mctx_transact;
269     - if (MSR_TM_ACTIVE(regs->msr)) {
270     + if (MSR_TM_ACTIVE(msr)) {
271     if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
272     - sigret))
273     + sigret, msr))
274     goto badframe;
275     }
276     else
277     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
278     index 117515564ec7..84ed2e77ef9c 100644
279     --- a/arch/powerpc/kernel/signal_64.c
280     +++ b/arch/powerpc/kernel/signal_64.c
281     @@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
282     static long setup_tm_sigcontexts(struct sigcontext __user *sc,
283     struct sigcontext __user *tm_sc,
284     struct task_struct *tsk,
285     - int signr, sigset_t *set, unsigned long handler)
286     + int signr, sigset_t *set, unsigned long handler,
287     + unsigned long msr)
288     {
289     /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
290     * process never used altivec yet (MSR_VEC is zero in pt_regs of
291     @@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
292     elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
293     #endif
294     struct pt_regs *regs = tsk->thread.regs;
295     - unsigned long msr = tsk->thread.regs->msr;
296     long err = 0;
297    
298     BUG_ON(tsk != current);
299    
300     - BUG_ON(!MSR_TM_ACTIVE(regs->msr));
301     + BUG_ON(!MSR_TM_ACTIVE(msr));
302    
303     WARN_ON(tm_suspend_disabled);
304    
305     @@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
306     */
307     msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
308    
309     - /* Remove TM bits from thread's MSR. The MSR in the sigcontext
310     - * just indicates to userland that we were doing a transaction, but we
311     - * don't want to return in transactional state. This also ensures
312     - * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
313     - */
314     - regs->msr &= ~MSR_TS_MASK;
315     -
316     #ifdef CONFIG_ALTIVEC
317     err |= __put_user(v_regs, &sc->v_regs);
318     err |= __put_user(tm_v_regs, &tm_sc->v_regs);
319     @@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
320     unsigned long newsp = 0;
321     long err = 0;
322     struct pt_regs *regs = tsk->thread.regs;
323     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
324     + /* Save the thread's msr before get_tm_stackpointer() changes it */
325     + unsigned long msr = regs->msr;
326     +#endif
327    
328     BUG_ON(tsk != current);
329    
330     @@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
331     err |= __put_user(0, &frame->uc.uc_flags);
332     err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
333     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
334     - if (MSR_TM_ACTIVE(regs->msr)) {
335     + if (MSR_TM_ACTIVE(msr)) {
336     /* The ucontext_t passed to userland points to the second
337     * ucontext_t (for transactional state) with its uc_link ptr.
338     */
339     @@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
340     err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
341     &frame->uc_transact.uc_mcontext,
342     tsk, ksig->sig, NULL,
343     - (unsigned long)ksig->ka.sa.sa_handler);
344     + (unsigned long)ksig->ka.sa.sa_handler,
345     + msr);
346     } else
347     #endif
348     {
349     diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
350     index 73d4873fc7f8..33b3461d91e8 100644
351     --- a/arch/powerpc/mm/hugetlbpage.c
352     +++ b/arch/powerpc/mm/hugetlbpage.c
353     @@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
354     if (pshift >= pdshift) {
355     cachep = PGT_CACHE(PTE_T_ORDER);
356     num_hugepd = 1 << (pshift - pdshift);
357     + new = NULL;
358     } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
359     - cachep = PGT_CACHE(PTE_INDEX_SIZE);
360     + cachep = NULL;
361     num_hugepd = 1;
362     + new = pte_alloc_one(mm);
363     } else {
364     cachep = PGT_CACHE(pdshift - pshift);
365     num_hugepd = 1;
366     + new = NULL;
367     }
368    
369     - if (!cachep) {
370     + if (!cachep && !new) {
371     WARN_ONCE(1, "No page table cache created for hugetlb tables");
372     return -ENOMEM;
373     }
374    
375     - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
376     + if (cachep)
377     + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
378    
379     BUG_ON(pshift > HUGEPD_SHIFT_MASK);
380     BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
381     @@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
382     if (i < num_hugepd) {
383     for (i = i - 1 ; i >= 0; i--, hpdp--)
384     *hpdp = __hugepd(0);
385     - kmem_cache_free(cachep, new);
386     + if (cachep)
387     + kmem_cache_free(cachep, new);
388     + else
389     + pte_free(mm, new);
390     } else {
391     kmemleak_ignore(new);
392     }
393     @@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
394     if (shift >= pdshift)
395     hugepd_free(tlb, hugepte);
396     else if (IS_ENABLED(CONFIG_PPC_8xx))
397     - pgtable_free_tlb(tlb, hugepte,
398     - get_hugepd_cache_index(PTE_INDEX_SIZE));
399     + pgtable_free_tlb(tlb, hugepte, 0);
400     else
401     pgtable_free_tlb(tlb, hugepte,
402     get_hugepd_cache_index(pdshift - shift));
403     @@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
404     * if we have pdshift and shift value same, we don't
405     * use pgt cache for hugepd.
406     */
407     - if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
408     - pgtable_cache_add(PTE_INDEX_SIZE);
409     - else if (pdshift > shift)
410     - pgtable_cache_add(pdshift - shift);
411     - else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
412     + if (pdshift > shift) {
413     + if (!IS_ENABLED(CONFIG_PPC_8xx))
414     + pgtable_cache_add(pdshift - shift);
415     + } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
416     + IS_ENABLED(CONFIG_PPC_8xx)) {
417     pgtable_cache_add(PTE_T_ORDER);
418     + }
419    
420     configured = true;
421     }
422     diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
423     index 5d12352545c5..5591243d673e 100644
424     --- a/arch/s390/boot/kaslr.c
425     +++ b/arch/s390/boot/kaslr.c
426     @@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
427     *(unsigned long *) prng.parm_block ^= seed;
428     for (i = 0; i < 16; i++) {
429     cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
430     - (char *) entropy, (char *) entropy,
431     + (u8 *) entropy, (u8 *) entropy,
432     sizeof(entropy));
433     memcpy(prng.parm_block, entropy, sizeof(entropy));
434     }
435     diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
436     index 3f5cb55cde35..e399102367af 100644
437     --- a/arch/s390/include/asm/page.h
438     +++ b/arch/s390/include/asm/page.h
439     @@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
440    
441     static inline void storage_key_init_range(unsigned long start, unsigned long end)
442     {
443     - if (PAGE_DEFAULT_KEY)
444     + if (PAGE_DEFAULT_KEY != 0)
445     __storage_key_init_range(start, end);
446     }
447    
448     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
449     index c1ed054c103c..734a3334e0f0 100644
450     --- a/arch/x86/include/asm/kvm_host.h
451     +++ b/arch/x86/include/asm/kvm_host.h
452     @@ -1098,7 +1098,7 @@ struct kvm_x86_ops {
453     void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
454     void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
455     void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
456     - void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
457     + int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
458     int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
459     int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
460     int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
461     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
462     index 6a3124664289..1682e4b5ce75 100644
463     --- a/arch/x86/include/asm/msr-index.h
464     +++ b/arch/x86/include/asm/msr-index.h
465     @@ -510,6 +510,8 @@
466     #define MSR_K7_HWCR 0xc0010015
467     #define MSR_K7_HWCR_SMMLOCK_BIT 0
468     #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
469     +#define MSR_K7_HWCR_IRPERF_EN_BIT 30
470     +#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
471     #define MSR_K7_FID_VID_CTL 0xc0010041
472     #define MSR_K7_FID_VID_STATUS 0xc0010042
473    
474     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
475     index 62c30279be77..c3f4dd4ae155 100644
476     --- a/arch/x86/kernel/cpu/amd.c
477     +++ b/arch/x86/kernel/cpu/amd.c
478     @@ -28,6 +28,7 @@
479    
480     static const int amd_erratum_383[];
481     static const int amd_erratum_400[];
482     +static const int amd_erratum_1054[];
483     static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
484    
485     /*
486     @@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c)
487     /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
488     if (!cpu_has(c, X86_FEATURE_XENPV))
489     set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
490     +
491     + /*
492     + * Turn on the Instructions Retired free counter on machines not
493     + * susceptible to erratum #1054 "Instructions Retired Performance
494     + * Counter May Be Inaccurate".
495     + */
496     + if (cpu_has(c, X86_FEATURE_IRPERF) &&
497     + !cpu_has_amd_erratum(c, amd_erratum_1054))
498     + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
499     }
500    
501     #ifdef CONFIG_X86_32
502     @@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] =
503     static const int amd_erratum_383[] =
504     AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
505    
506     +/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
507     +static const int amd_erratum_1054[] =
508     + AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
509     +
510    
511     static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
512     {
513     diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
514     index 259f3f4e2e5f..1cf34fcc3a8e 100644
515     --- a/arch/x86/kernel/cpu/mce/amd.c
516     +++ b/arch/x86/kernel/cpu/mce/amd.c
517     @@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = {
518     .store = store,
519     };
520    
521     +static void threshold_block_release(struct kobject *kobj);
522     +
523     static struct kobj_type threshold_ktype = {
524     .sysfs_ops = &threshold_ops,
525     .default_attrs = default_attrs,
526     + .release = threshold_block_release,
527     };
528    
529     static const char *get_name(unsigned int bank, struct threshold_block *b)
530     @@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
531     return buf_mcatype;
532     }
533    
534     -static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
535     - unsigned int block, u32 address)
536     +static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
537     + unsigned int bank, unsigned int block,
538     + u32 address)
539     {
540     struct threshold_block *b = NULL;
541     u32 low, high;
542     @@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
543    
544     INIT_LIST_HEAD(&b->miscj);
545    
546     - if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
547     - list_add(&b->miscj,
548     - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
549     - } else {
550     - per_cpu(threshold_banks, cpu)[bank]->blocks = b;
551     - }
552     + if (tb->blocks)
553     + list_add(&b->miscj, &tb->blocks->miscj);
554     + else
555     + tb->blocks = b;
556    
557     - err = kobject_init_and_add(&b->kobj, &threshold_ktype,
558     - per_cpu(threshold_banks, cpu)[bank]->kobj,
559     - get_name(bank, b));
560     + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
561     if (err)
562     goto out_free;
563     recurse:
564     @@ -1258,7 +1258,7 @@ recurse:
565     if (!address)
566     return 0;
567    
568     - err = allocate_threshold_blocks(cpu, bank, block, address);
569     + err = allocate_threshold_blocks(cpu, tb, bank, block, address);
570     if (err)
571     goto out_free;
572    
573     @@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
574     goto out_free;
575     }
576    
577     - per_cpu(threshold_banks, cpu)[bank] = b;
578     -
579     if (is_shared_bank(bank)) {
580     refcount_set(&b->cpus, 1);
581    
582     @@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
583     }
584     }
585    
586     - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
587     - if (!err)
588     - goto out;
589     + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
590     + if (err)
591     + goto out_free;
592     +
593     + per_cpu(threshold_banks, cpu)[bank] = b;
594     +
595     + return 0;
596    
597     out_free:
598     kfree(b);
599     @@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
600     return err;
601     }
602    
603     -static void deallocate_threshold_block(unsigned int cpu,
604     - unsigned int bank)
605     +static void threshold_block_release(struct kobject *kobj)
606     +{
607     + kfree(to_block(kobj));
608     +}
609     +
610     +static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
611     {
612     struct threshold_block *pos = NULL;
613     struct threshold_block *tmp = NULL;
614     @@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu,
615     return;
616    
617     list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
618     - kobject_put(&pos->kobj);
619     list_del(&pos->miscj);
620     - kfree(pos);
621     + kobject_put(&pos->kobj);
622     }
623    
624     - kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
625     - per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
626     + kobject_put(&head->blocks->kobj);
627     }
628    
629     static void __threshold_remove_blocks(struct threshold_bank *b)
630     diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
631     index 4d4f5d9faac3..23054909c8dd 100644
632     --- a/arch/x86/kernel/ima_arch.c
633     +++ b/arch/x86/kernel/ima_arch.c
634     @@ -10,8 +10,6 @@ extern struct boot_params boot_params;
635    
636     static enum efi_secureboot_mode get_sb_mode(void)
637     {
638     - efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
639     - efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
640     efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
641     efi_status_t status;
642     unsigned long size;
643     @@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
644     }
645    
646     /* Get variable contents into buffer */
647     - status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
648     + status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
649     NULL, &size, &secboot);
650     if (status == EFI_NOT_FOUND) {
651     pr_info("ima: secureboot mode disabled\n");
652     @@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
653     }
654    
655     size = sizeof(setupmode);
656     - status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
657     + status = efi.get_variable(L"SetupMode", &efi_variable_guid,
658     NULL, &size, &setupmode);
659    
660     if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
661     diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
662     index 8ecd48d31800..5ddcaacef291 100644
663     --- a/arch/x86/kvm/irq_comm.c
664     +++ b/arch/x86/kvm/irq_comm.c
665     @@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
666    
667     kvm_set_msi_irq(vcpu->kvm, entry, &irq);
668    
669     - if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
670     + if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
671     irq.dest_id, irq.dest_mode))
672     __set_bit(irq.vector, ioapic_handled_vectors);
673     }
674     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
675     index 15728971a430..5d2587005d0e 100644
676     --- a/arch/x86/kvm/lapic.c
677     +++ b/arch/x86/kvm/lapic.c
678     @@ -637,9 +637,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
679     static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
680     {
681     u8 val;
682     - if (pv_eoi_get_user(vcpu, &val) < 0)
683     + if (pv_eoi_get_user(vcpu, &val) < 0) {
684     printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
685     (unsigned long long)vcpu->arch.pv_eoi.msr_val);
686     + return false;
687     + }
688     return val & 0x1;
689     }
690    
691     @@ -1056,11 +1058,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
692     apic->regs + APIC_TMR);
693     }
694    
695     - if (vcpu->arch.apicv_active)
696     - kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
697     - else {
698     + if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
699     kvm_lapic_set_irr(vector, apic);
700     -
701     kvm_make_request(KVM_REQ_EVENT, vcpu);
702     kvm_vcpu_kick(vcpu);
703     }
704     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
705     index 8d1be7c61f10..207030db3481 100644
706     --- a/arch/x86/kvm/svm.c
707     +++ b/arch/x86/kvm/svm.c
708     @@ -5141,8 +5141,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
709     return;
710     }
711    
712     -static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
713     +static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
714     {
715     + if (!vcpu->arch.apicv_active)
716     + return -1;
717     +
718     kvm_lapic_set_irr(vec, vcpu->arch.apic);
719     smp_mb__after_atomic();
720    
721     @@ -5154,6 +5157,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
722     put_cpu();
723     } else
724     kvm_vcpu_wake_up(vcpu);
725     +
726     + return 0;
727     }
728    
729     static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
730     diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
731     index 283bdb7071af..f486e2606247 100644
732     --- a/arch/x86/kvm/vmx/capabilities.h
733     +++ b/arch/x86/kvm/vmx/capabilities.h
734     @@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
735     extern bool __read_mostly enable_unrestricted_guest;
736     extern bool __read_mostly enable_ept_ad_bits;
737     extern bool __read_mostly enable_pml;
738     +extern bool __read_mostly enable_apicv;
739     extern int __read_mostly pt_mode;
740    
741     #define PT_MODE_SYSTEM 0
742     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
743     index 931d3b5f3acd..802ef7177d53 100644
744     --- a/arch/x86/kvm/vmx/nested.c
745     +++ b/arch/x86/kvm/vmx/nested.c
746     @@ -5132,24 +5132,17 @@ fail:
747     return 1;
748     }
749    
750     -
751     -static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
752     - struct vmcs12 *vmcs12)
753     +/*
754     + * Return true if an IO instruction with the specified port and size should cause
755     + * a VM-exit into L1.
756     + */
757     +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
758     + int size)
759     {
760     - unsigned long exit_qualification;
761     + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
762     gpa_t bitmap, last_bitmap;
763     - unsigned int port;
764     - int size;
765     u8 b;
766    
767     - if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
768     - return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
769     -
770     - exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
771     -
772     - port = exit_qualification >> 16;
773     - size = (exit_qualification & 7) + 1;
774     -
775     last_bitmap = (gpa_t)-1;
776     b = -1;
777    
778     @@ -5176,6 +5169,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
779     return false;
780     }
781    
782     +static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
783     + struct vmcs12 *vmcs12)
784     +{
785     + unsigned long exit_qualification;
786     + unsigned short port;
787     + int size;
788     +
789     + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
790     + return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
791     +
792     + exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
793     +
794     + port = exit_qualification >> 16;
795     + size = (exit_qualification & 7) + 1;
796     +
797     + return nested_vmx_check_io_bitmaps(vcpu, port, size);
798     +}
799     +
800     /*
801     * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
802     * rather than handle it ourselves in L0. I.e., check whether L1 expressed
803     @@ -5796,8 +5807,7 @@ void nested_vmx_vcpu_setup(void)
804     * bit in the high half is on if the corresponding bit in the control field
805     * may be on. See also vmx_control_verify().
806     */
807     -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
808     - bool apicv)
809     +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
810     {
811     /*
812     * Note that as a general rule, the high half of the MSRs (bits in
813     @@ -5824,7 +5834,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
814     PIN_BASED_EXT_INTR_MASK |
815     PIN_BASED_NMI_EXITING |
816     PIN_BASED_VIRTUAL_NMIS |
817     - (apicv ? PIN_BASED_POSTED_INTR : 0);
818     + (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
819     msrs->pinbased_ctls_high |=
820     PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
821     PIN_BASED_VMX_PREEMPTION_TIMER;
822     diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
823     index 6280f33e5fa6..b8521c451bb0 100644
824     --- a/arch/x86/kvm/vmx/nested.h
825     +++ b/arch/x86/kvm/vmx/nested.h
826     @@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
827     };
828    
829     void vmx_leave_nested(struct kvm_vcpu *vcpu);
830     -void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
831     - bool apicv);
832     +void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
833     void nested_vmx_hardware_unsetup(void);
834     __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
835     void nested_vmx_vcpu_setup(void);
836     @@ -33,6 +32,8 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
837     int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
838     int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
839     u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
840     +bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
841     + int size);
842    
843     static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
844     {
845     diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
846     index 84b57b461ad6..8ebcd9de87a2 100644
847     --- a/arch/x86/kvm/vmx/vmx.c
848     +++ b/arch/x86/kvm/vmx/vmx.c
849     @@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
850     static bool __read_mostly fasteoi = 1;
851     module_param(fasteoi, bool, S_IRUGO);
852    
853     -static bool __read_mostly enable_apicv = 1;
854     +bool __read_mostly enable_apicv = 1;
855     module_param(enable_apicv, bool, S_IRUGO);
856    
857     /*
858     @@ -3853,24 +3853,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
859     * 2. If target vcpu isn't running(root mode), kick it to pick up the
860     * interrupt from PIR in next vmentry.
861     */
862     -static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
863     +static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
864     {
865     struct vcpu_vmx *vmx = to_vmx(vcpu);
866     int r;
867    
868     r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
869     if (!r)
870     - return;
871     + return 0;
872     +
873     + if (!vcpu->arch.apicv_active)
874     + return -1;
875    
876     if (pi_test_and_set_pir(vector, &vmx->pi_desc))
877     - return;
878     + return 0;
879    
880     /* If a previous notification has sent the IPI, nothing to do. */
881     if (pi_test_and_set_on(&vmx->pi_desc))
882     - return;
883     + return 0;
884    
885     if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
886     kvm_vcpu_kick(vcpu);
887     +
888     + return 0;
889     }
890    
891     /*
892     @@ -6802,8 +6807,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
893    
894     if (nested)
895     nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
896     - vmx_capability.ept,
897     - kvm_vcpu_apicv_active(&vmx->vcpu));
898     + vmx_capability.ept);
899     else
900     memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
901    
902     @@ -6885,8 +6889,7 @@ static int __init vmx_check_processor_compat(void)
903     if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
904     return -EIO;
905     if (nested)
906     - nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
907     - enable_apicv);
908     + nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
909     if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
910     printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
911     smp_processor_id());
912     @@ -7132,6 +7135,39 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
913     to_vmx(vcpu)->req_immediate_exit = true;
914     }
915    
916     +static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
917     + struct x86_instruction_info *info)
918     +{
919     + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
920     + unsigned short port;
921     + bool intercept;
922     + int size;
923     +
924     + if (info->intercept == x86_intercept_in ||
925     + info->intercept == x86_intercept_ins) {
926     + port = info->src_val;
927     + size = info->dst_bytes;
928     + } else {
929     + port = info->dst_val;
930     + size = info->src_bytes;
931     + }
932     +
933     + /*
934     + * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
935     + * VM-exits depend on the 'unconditional IO exiting' VM-execution
936     + * control.
937     + *
938     + * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
939     + */
940     + if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
941     + intercept = nested_cpu_has(vmcs12,
942     + CPU_BASED_UNCOND_IO_EXITING);
943     + else
944     + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
945     +
946     + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
947     +}
948     +
949     static int vmx_check_intercept(struct kvm_vcpu *vcpu,
950     struct x86_instruction_info *info,
951     enum x86_intercept_stage stage)
952     @@ -7139,19 +7175,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
953     struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
954     struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
955    
956     + switch (info->intercept) {
957     /*
958     * RDPID causes #UD if disabled through secondary execution controls.
959     * Because it is marked as EmulateOnUD, we need to intercept it here.
960     */
961     - if (info->intercept == x86_intercept_rdtscp &&
962     - !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
963     - ctxt->exception.vector = UD_VECTOR;
964     - ctxt->exception.error_code_valid = false;
965     - return X86EMUL_PROPAGATE_FAULT;
966     - }
967     + case x86_intercept_rdtscp:
968     + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
969     + ctxt->exception.vector = UD_VECTOR;
970     + ctxt->exception.error_code_valid = false;
971     + return X86EMUL_PROPAGATE_FAULT;
972     + }
973     + break;
974     +
975     + case x86_intercept_in:
976     + case x86_intercept_ins:
977     + case x86_intercept_out:
978     + case x86_intercept_outs:
979     + return vmx_check_intercept_io(vcpu, info);
980    
981     /* TODO: check more intercepts... */
982     - return X86EMUL_CONTINUE;
983     + default:
984     + break;
985     + }
986     +
987     + return X86EMUL_UNHANDLEABLE;
988     }
989    
990     #ifdef CONFIG_X86_64
991     @@ -7736,7 +7784,7 @@ static __init int hardware_setup(void)
992    
993     if (nested) {
994     nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
995     - vmx_capability.ept, enable_apicv);
996     + vmx_capability.ept);
997    
998     r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
999     if (r)
1000     diff --git a/crypto/hash_info.c b/crypto/hash_info.c
1001     index c754cb75dd1a..a49ff96bde77 100644
1002     --- a/crypto/hash_info.c
1003     +++ b/crypto/hash_info.c
1004     @@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
1005     [HASH_ALGO_TGR_128] = "tgr128",
1006     [HASH_ALGO_TGR_160] = "tgr160",
1007     [HASH_ALGO_TGR_192] = "tgr192",
1008     - [HASH_ALGO_SM3_256] = "sm3-256",
1009     + [HASH_ALGO_SM3_256] = "sm3",
1010     [HASH_ALGO_STREEBOG_256] = "streebog256",
1011     [HASH_ALGO_STREEBOG_512] = "streebog512",
1012     };
1013     diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
1014     index 9e2f5a05c066..bad2257356fe 100644
1015     --- a/drivers/acpi/acpica/evevent.c
1016     +++ b/drivers/acpi/acpica/evevent.c
1017     @@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
1018     handler) (acpi_gbl_fixed_event_handlers[event].context));
1019     }
1020    
1021     +/*******************************************************************************
1022     + *
1023     + * FUNCTION: acpi_any_fixed_event_status_set
1024     + *
1025     + * PARAMETERS: None
1026     + *
1027     + * RETURN: TRUE or FALSE
1028     + *
1029     + * DESCRIPTION: Checks the PM status register for active fixed events
1030     + *
1031     + ******************************************************************************/
1032     +
1033     +u32 acpi_any_fixed_event_status_set(void)
1034     +{
1035     + acpi_status status;
1036     + u32 in_status;
1037     + u32 in_enable;
1038     + u32 i;
1039     +
1040     + status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
1041     + if (ACPI_FAILURE(status)) {
1042     + return (FALSE);
1043     + }
1044     +
1045     + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
1046     + if (ACPI_FAILURE(status)) {
1047     + return (FALSE);
1048     + }
1049     +
1050     + /*
1051     + * Check for all possible Fixed Events and dispatch those that are active
1052     + */
1053     + for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
1054     +
1055     + /* Both the status and enable bits must be on for this event */
1056     +
1057     + if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
1058     + (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
1059     + return (TRUE);
1060     + }
1061     + }
1062     +
1063     + return (FALSE);
1064     +}
1065     +
1066     #endif /* !ACPI_REDUCED_HARDWARE */
1067     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1068     index 62348ec2a807..827530dae682 100644
1069     --- a/drivers/acpi/sleep.c
1070     +++ b/drivers/acpi/sleep.c
1071     @@ -992,6 +992,13 @@ static bool acpi_s2idle_wake(void)
1072     if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
1073     return true;
1074    
1075     + /*
1076     + * If the status bit of any enabled fixed event is set, the
1077     + * wakeup is regarded as valid.
1078     + */
1079     + if (acpi_any_fixed_event_status_set())
1080     + return true;
1081     +
1082     /*
1083     * If there are no EC events to process and at least one of the
1084     * other enabled GPEs is active, the wakeup is regarded as a
1085     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1086     index 05c2b32dcc4d..1787e3ad9c44 100644
1087     --- a/drivers/ata/ahci.c
1088     +++ b/drivers/ata/ahci.c
1089     @@ -80,6 +80,7 @@ enum board_ids {
1090    
1091     static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1092     static void ahci_remove_one(struct pci_dev *dev);
1093     +static void ahci_shutdown_one(struct pci_dev *dev);
1094     static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1095     unsigned long deadline);
1096     static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1097     @@ -593,6 +594,7 @@ static struct pci_driver ahci_pci_driver = {
1098     .id_table = ahci_pci_tbl,
1099     .probe = ahci_init_one,
1100     .remove = ahci_remove_one,
1101     + .shutdown = ahci_shutdown_one,
1102     .driver = {
1103     .pm = &ahci_pci_pm_ops,
1104     },
1105     @@ -1864,6 +1866,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1106     return 0;
1107     }
1108    
1109     +static void ahci_shutdown_one(struct pci_dev *pdev)
1110     +{
1111     + ata_pci_shutdown_one(pdev);
1112     +}
1113     +
1114     static void ahci_remove_one(struct pci_dev *pdev)
1115     {
1116     pm_runtime_get_noresume(&pdev->dev);
1117     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1118     index 84b183a6424e..581595b35573 100644
1119     --- a/drivers/ata/libata-core.c
1120     +++ b/drivers/ata/libata-core.c
1121     @@ -6762,6 +6762,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
1122     ata_host_detach(host);
1123     }
1124    
1125     +void ata_pci_shutdown_one(struct pci_dev *pdev)
1126     +{
1127     + struct ata_host *host = pci_get_drvdata(pdev);
1128     + int i;
1129     +
1130     + for (i = 0; i < host->n_ports; i++) {
1131     + struct ata_port *ap = host->ports[i];
1132     +
1133     + ap->pflags |= ATA_PFLAG_FROZEN;
1134     +
1135     + /* Disable port interrupts */
1136     + if (ap->ops->freeze)
1137     + ap->ops->freeze(ap);
1138     +
1139     + /* Stop the port DMA engines */
1140     + if (ap->ops->port_stop)
1141     + ap->ops->port_stop(ap);
1142     + }
1143     +}
1144     +
1145     /* move to PCI subsystem */
1146     int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1147     {
1148     @@ -7382,6 +7402,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
1149    
1150     #ifdef CONFIG_PCI
1151     EXPORT_SYMBOL_GPL(pci_test_config_bits);
1152     +EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
1153     EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1154     #ifdef CONFIG_PM
1155     EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
1156     diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
1157     index 485865fd0412..f19a03b62365 100644
1158     --- a/drivers/block/floppy.c
1159     +++ b/drivers/block/floppy.c
1160     @@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
1161     /* selects the fdc and drive, and enables the fdc's input/dma. */
1162     static void set_fdc(int drive)
1163     {
1164     + unsigned int new_fdc = fdc;
1165     +
1166     if (drive >= 0 && drive < N_DRIVE) {
1167     - fdc = FDC(drive);
1168     + new_fdc = FDC(drive);
1169     current_drive = drive;
1170     }
1171     - if (fdc != 1 && fdc != 0) {
1172     + if (new_fdc >= N_FDC) {
1173     pr_info("bad fdc value\n");
1174     return;
1175     }
1176     + fdc = new_fdc;
1177     set_dor(fdc, ~0, 8);
1178     #if N_FDC > 1
1179     set_dor(1 - fdc, ~8, 0);
1180     diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1181     index 5817dfe5c5d2..2f8026b71933 100644
1182     --- a/drivers/char/tpm/tpm2-cmd.c
1183     +++ b/drivers/char/tpm/tpm2-cmd.c
1184     @@ -831,6 +831,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
1185     return 0;
1186     }
1187    
1188     + bank->crypto_id = HASH_ALGO__LAST;
1189     +
1190     return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
1191     }
1192    
1193     diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1194     index 66f1b2ac5cde..c27e206a764c 100644
1195     --- a/drivers/dma/imx-sdma.c
1196     +++ b/drivers/dma/imx-sdma.c
1197     @@ -760,8 +760,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
1198     return;
1199     }
1200     sdmac->desc = desc = to_sdma_desc(&vd->tx);
1201     -
1202     - list_del(&vd->node);
1203     + /*
1204     + * Do not delete the node in desc_issued list in cyclic mode, otherwise
1205     + * the desc allocated will never be freed in vchan_dma_desc_free_list
1206     + */
1207     + if (!(sdmac->flags & IMX_DMA_SG_LOOP))
1208     + list_del(&vd->node);
1209    
1210     sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
1211     sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1212     @@ -1067,6 +1071,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1213    
1214     spin_lock_irqsave(&sdmac->vc.lock, flags);
1215     vchan_get_all_descriptors(&sdmac->vc, &head);
1216     + sdmac->desc = NULL;
1217     spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1218     vchan_dma_desc_free_list(&sdmac->vc, &head);
1219     sdmac->context_loaded = false;
1220     @@ -1075,19 +1080,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1221     static int sdma_disable_channel_async(struct dma_chan *chan)
1222     {
1223     struct sdma_channel *sdmac = to_sdma_chan(chan);
1224     - unsigned long flags;
1225     -
1226     - spin_lock_irqsave(&sdmac->vc.lock, flags);
1227    
1228     sdma_disable_channel(chan);
1229    
1230     - if (sdmac->desc) {
1231     - vchan_terminate_vdesc(&sdmac->desc->vd);
1232     - sdmac->desc = NULL;
1233     + if (sdmac->desc)
1234     schedule_work(&sdmac->terminate_worker);
1235     - }
1236     -
1237     - spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1238    
1239     return 0;
1240     }
1241     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1242     index 596722e79a26..2816d0329738 100644
1243     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1244     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1245     @@ -3977,11 +3977,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
1246     {
1247     uint64_t clock;
1248    
1249     + amdgpu_gfx_off_ctrl(adev, false);
1250     mutex_lock(&adev->gfx.gpu_clock_mutex);
1251     WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
1252     clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
1253     ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
1254     mutex_unlock(&adev->gfx.gpu_clock_mutex);
1255     + amdgpu_gfx_off_ctrl(adev, true);
1256     return clock;
1257     }
1258    
1259     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1260     index 0125ea7c4103..d85e1e559c82 100644
1261     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1262     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1263     @@ -4080,11 +4080,13 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
1264     {
1265     uint64_t clock;
1266    
1267     + amdgpu_gfx_off_ctrl(adev, false);
1268     mutex_lock(&adev->gfx.gpu_clock_mutex);
1269     WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
1270     clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
1271     ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
1272     mutex_unlock(&adev->gfx.gpu_clock_mutex);
1273     + amdgpu_gfx_off_ctrl(adev, true);
1274     return clock;
1275     }
1276    
1277     diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1278     index 4ccfcdf8f16a..80934ca17260 100644
1279     --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1280     +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1281     @@ -267,7 +267,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
1282    
1283     static u32 soc15_get_xclk(struct amdgpu_device *adev)
1284     {
1285     - return adev->clock.spll.reference_freq;
1286     + u32 reference_clock = adev->clock.spll.reference_freq;
1287     +
1288     + if (adev->asic_type == CHIP_RAVEN)
1289     + return reference_clock / 4;
1290     +
1291     + return reference_clock;
1292     }
1293    
1294    
1295     diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1296     index 8a8d605021f0..0454675a44cb 100644
1297     --- a/drivers/gpu/drm/bridge/tc358767.c
1298     +++ b/drivers/gpu/drm/bridge/tc358767.c
1299     @@ -294,7 +294,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
1300    
1301     static int tc_aux_wait_busy(struct tc_data *tc)
1302     {
1303     - return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
1304     + return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
1305     }
1306    
1307     static int tc_aux_write_data(struct tc_data *tc, const void *data,
1308     @@ -637,7 +637,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
1309     if (ret)
1310     goto err;
1311    
1312     - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
1313     + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
1314     if (ret == -ETIMEDOUT) {
1315     dev_err(tc->dev, "Timeout waiting for PHY to become ready");
1316     return ret;
1317     @@ -861,7 +861,7 @@ static int tc_wait_link_training(struct tc_data *tc)
1318     int ret;
1319    
1320     ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
1321     - LT_LOOPDONE, 1, 1000);
1322     + LT_LOOPDONE, 500, 100000);
1323     if (ret) {
1324     dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
1325     return ret;
1326     @@ -934,7 +934,7 @@ static int tc_main_link_enable(struct tc_data *tc)
1327     dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
1328     ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
1329    
1330     - ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
1331     + ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
1332     if (ret) {
1333     dev_err(dev, "timeout waiting for phy become ready");
1334     return ret;
1335     diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
1336     index 0d21402945ab..3317798945e8 100644
1337     --- a/drivers/gpu/drm/i915/Kconfig
1338     +++ b/drivers/gpu/drm/i915/Kconfig
1339     @@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
1340     help
1341     This option enables capturing the GPU state when a hang is detected.
1342     This information is vital for triaging hangs and assists in debugging.
1343     - Please report any hang to
1344     - https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
1345     - for triaging.
1346     + Please report any hang for triaging according to:
1347     + https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
1348    
1349     If in doubt, say "Y".
1350    
1351     diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
1352     index af50f05f4e9d..272503615378 100644
1353     --- a/drivers/gpu/drm/i915/display/intel_display.c
1354     +++ b/drivers/gpu/drm/i915/display/intel_display.c
1355     @@ -10510,7 +10510,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
1356     u32 base;
1357    
1358     if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
1359     - base = obj->phys_handle->busaddr;
1360     + base = sg_dma_address(obj->mm.pages->sgl);
1361     else
1362     base = intel_plane_ggtt_offset(plane_state);
1363    
1364     diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1365     index 646859fea224..08b35587bc6d 100644
1366     --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1367     +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1368     @@ -240,9 +240,6 @@ struct drm_i915_gem_object {
1369    
1370     void *gvt_info;
1371     };
1372     -
1373     - /** for phys allocated objects */
1374     - struct drm_dma_handle *phys_handle;
1375     };
1376    
1377     static inline struct drm_i915_gem_object *
1378     diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1379     index 768356908160..0cfe9bd76377 100644
1380     --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1381     +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1382     @@ -21,88 +21,87 @@
1383     static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
1384     {
1385     struct address_space *mapping = obj->base.filp->f_mapping;
1386     - struct drm_dma_handle *phys;
1387     - struct sg_table *st;
1388     struct scatterlist *sg;
1389     - char *vaddr;
1390     + struct sg_table *st;
1391     + dma_addr_t dma;
1392     + void *vaddr;
1393     + void *dst;
1394     int i;
1395     - int err;
1396    
1397     if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
1398     return -EINVAL;
1399    
1400     - /* Always aligning to the object size, allows a single allocation
1401     + /*
1402     + * Always aligning to the object size, allows a single allocation
1403     * to handle all possible callers, and given typical object sizes,
1404     * the alignment of the buddy allocation will naturally match.
1405     */
1406     - phys = drm_pci_alloc(obj->base.dev,
1407     - roundup_pow_of_two(obj->base.size),
1408     - roundup_pow_of_two(obj->base.size));
1409     - if (!phys)
1410     + vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
1411     + roundup_pow_of_two(obj->base.size),
1412     + &dma, GFP_KERNEL);
1413     + if (!vaddr)
1414     return -ENOMEM;
1415    
1416     - vaddr = phys->vaddr;
1417     + st = kmalloc(sizeof(*st), GFP_KERNEL);
1418     + if (!st)
1419     + goto err_pci;
1420     +
1421     + if (sg_alloc_table(st, 1, GFP_KERNEL))
1422     + goto err_st;
1423     +
1424     + sg = st->sgl;
1425     + sg->offset = 0;
1426     + sg->length = obj->base.size;
1427     +
1428     + sg_assign_page(sg, (struct page *)vaddr);
1429     + sg_dma_address(sg) = dma;
1430     + sg_dma_len(sg) = obj->base.size;
1431     +
1432     + dst = vaddr;
1433     for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
1434     struct page *page;
1435     - char *src;
1436     + void *src;
1437    
1438     page = shmem_read_mapping_page(mapping, i);
1439     - if (IS_ERR(page)) {
1440     - err = PTR_ERR(page);
1441     - goto err_phys;
1442     - }
1443     + if (IS_ERR(page))
1444     + goto err_st;
1445    
1446     src = kmap_atomic(page);
1447     - memcpy(vaddr, src, PAGE_SIZE);
1448     - drm_clflush_virt_range(vaddr, PAGE_SIZE);
1449     + memcpy(dst, src, PAGE_SIZE);
1450     + drm_clflush_virt_range(dst, PAGE_SIZE);
1451     kunmap_atomic(src);
1452    
1453     put_page(page);
1454     - vaddr += PAGE_SIZE;
1455     + dst += PAGE_SIZE;
1456     }
1457    
1458     intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
1459    
1460     - st = kmalloc(sizeof(*st), GFP_KERNEL);
1461     - if (!st) {
1462     - err = -ENOMEM;
1463     - goto err_phys;
1464     - }
1465     -
1466     - if (sg_alloc_table(st, 1, GFP_KERNEL)) {
1467     - kfree(st);
1468     - err = -ENOMEM;
1469     - goto err_phys;
1470     - }
1471     -
1472     - sg = st->sgl;
1473     - sg->offset = 0;
1474     - sg->length = obj->base.size;
1475     -
1476     - sg_dma_address(sg) = phys->busaddr;
1477     - sg_dma_len(sg) = obj->base.size;
1478     -
1479     - obj->phys_handle = phys;
1480     -
1481     __i915_gem_object_set_pages(obj, st, sg->length);
1482    
1483     return 0;
1484    
1485     -err_phys:
1486     - drm_pci_free(obj->base.dev, phys);
1487     -
1488     - return err;
1489     +err_st:
1490     + kfree(st);
1491     +err_pci:
1492     + dma_free_coherent(&obj->base.dev->pdev->dev,
1493     + roundup_pow_of_two(obj->base.size),
1494     + vaddr, dma);
1495     + return -ENOMEM;
1496     }
1497    
1498     static void
1499     i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1500     struct sg_table *pages)
1501     {
1502     + dma_addr_t dma = sg_dma_address(pages->sgl);
1503     + void *vaddr = sg_page(pages->sgl);
1504     +
1505     __i915_gem_object_release_shmem(obj, pages, false);
1506    
1507     if (obj->mm.dirty) {
1508     struct address_space *mapping = obj->base.filp->f_mapping;
1509     - char *vaddr = obj->phys_handle->vaddr;
1510     + void *src = vaddr;
1511     int i;
1512    
1513     for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
1514     @@ -114,15 +113,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1515     continue;
1516    
1517     dst = kmap_atomic(page);
1518     - drm_clflush_virt_range(vaddr, PAGE_SIZE);
1519     - memcpy(dst, vaddr, PAGE_SIZE);
1520     + drm_clflush_virt_range(src, PAGE_SIZE);
1521     + memcpy(dst, src, PAGE_SIZE);
1522     kunmap_atomic(dst);
1523    
1524     set_page_dirty(page);
1525     if (obj->mm.madv == I915_MADV_WILLNEED)
1526     mark_page_accessed(page);
1527     put_page(page);
1528     - vaddr += PAGE_SIZE;
1529     +
1530     + src += PAGE_SIZE;
1531     }
1532     obj->mm.dirty = false;
1533     }
1534     @@ -130,7 +130,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1535     sg_free_table(pages);
1536     kfree(pages);
1537    
1538     - drm_pci_free(obj->base.dev, obj->phys_handle);
1539     + dma_free_coherent(&obj->base.dev->pdev->dev,
1540     + roundup_pow_of_two(obj->base.size),
1541     + vaddr, dma);
1542     }
1543    
1544     static void phys_release(struct drm_i915_gem_object *obj)
1545     diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
1546     index 22aab8593abf..926272b5a0ca 100644
1547     --- a/drivers/gpu/drm/i915/gt/intel_engine.h
1548     +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
1549     @@ -250,6 +250,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
1550     return pos & (ring->size - 1);
1551     }
1552    
1553     +static inline int intel_ring_direction(const struct intel_ring *ring,
1554     + u32 next, u32 prev)
1555     +{
1556     + typecheck(typeof(ring->size), next);
1557     + typecheck(typeof(ring->size), prev);
1558     + return (next - prev) << ring->wrap;
1559     +}
1560     +
1561     static inline bool
1562     intel_ring_offset_valid(const struct intel_ring *ring,
1563     unsigned int pos)
1564     diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
1565     index 798e1b024406..c77c9518c58b 100644
1566     --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
1567     +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
1568     @@ -107,6 +107,7 @@ struct intel_ring {
1569    
1570     u32 space;
1571     u32 size;
1572     + u32 wrap;
1573     u32 effective_size;
1574     };
1575    
1576     diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
1577     index 4949b5ad860f..66f6d1a897f2 100644
1578     --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
1579     +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
1580     @@ -471,12 +471,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
1581     return desc;
1582     }
1583    
1584     -static void unwind_wa_tail(struct i915_request *rq)
1585     -{
1586     - rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
1587     - assert_ring_tail_valid(rq->ring, rq->tail);
1588     -}
1589     -
1590     static struct i915_request *
1591     __unwind_incomplete_requests(struct intel_engine_cs *engine)
1592     {
1593     @@ -495,7 +489,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
1594     continue; /* XXX */
1595    
1596     __i915_request_unsubmit(rq);
1597     - unwind_wa_tail(rq);
1598    
1599     /*
1600     * Push the request back into the queue for later resubmission.
1601     @@ -650,13 +643,35 @@ execlists_schedule_out(struct i915_request *rq)
1602     i915_request_put(rq);
1603     }
1604    
1605     -static u64 execlists_update_context(const struct i915_request *rq)
1606     +static u64 execlists_update_context(struct i915_request *rq)
1607     {
1608     struct intel_context *ce = rq->hw_context;
1609     - u64 desc;
1610     + u64 desc = ce->lrc_desc;
1611     + u32 tail, prev;
1612    
1613     - ce->lrc_reg_state[CTX_RING_TAIL + 1] =
1614     - intel_ring_set_tail(rq->ring, rq->tail);
1615     + /*
1616     + * WaIdleLiteRestore:bdw,skl
1617     + *
1618     + * We should never submit the context with the same RING_TAIL twice
1619     + * just in case we submit an empty ring, which confuses the HW.
1620     + *
1621     + * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
1622     + * the normal request to be able to always advance the RING_TAIL on
1623     + * subsequent resubmissions (for lite restore). Should that fail us,
1624     + * and we try and submit the same tail again, force the context
1625     + * reload.
1626     + *
1627     + * If we need to return to a preempted context, we need to skip the
1628     + * lite-restore and force it to reload the RING_TAIL. Otherwise, the
1629     + * HW has a tendency to ignore us rewinding the TAIL to the end of
1630     + * an earlier request.
1631     + */
1632     + tail = intel_ring_set_tail(rq->ring, rq->tail);
1633     + prev = ce->lrc_reg_state[CTX_RING_TAIL + 1];
1634     + if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
1635     + desc |= CTX_DESC_FORCE_RESTORE;
1636     + ce->lrc_reg_state[CTX_RING_TAIL + 1] = tail;
1637     + rq->tail = rq->wa_tail;
1638    
1639     /*
1640     * Make sure the context image is complete before we submit it to HW.
1641     @@ -675,7 +690,6 @@ static u64 execlists_update_context(const struct i915_request *rq)
1642     */
1643     mb();
1644    
1645     - desc = ce->lrc_desc;
1646     ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
1647    
1648     return desc;
1649     @@ -919,6 +933,11 @@ last_active(const struct intel_engine_execlists *execlists)
1650     return *last;
1651     }
1652    
1653     +#define for_each_waiter(p__, rq__) \
1654     + list_for_each_entry_lockless(p__, \
1655     + &(rq__)->sched.waiters_list, \
1656     + wait_link)
1657     +
1658     static void defer_request(struct i915_request *rq, struct list_head * const pl)
1659     {
1660     LIST_HEAD(list);
1661     @@ -936,7 +955,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
1662     GEM_BUG_ON(i915_request_is_active(rq));
1663     list_move_tail(&rq->sched.link, pl);
1664    
1665     - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
1666     + for_each_waiter(p, rq) {
1667     struct i915_request *w =
1668     container_of(p->waiter, typeof(*w), sched);
1669    
1670     @@ -1102,14 +1121,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1671     */
1672     __unwind_incomplete_requests(engine);
1673    
1674     - /*
1675     - * If we need to return to the preempted context, we
1676     - * need to skip the lite-restore and force it to
1677     - * reload the RING_TAIL. Otherwise, the HW has a
1678     - * tendency to ignore us rewinding the TAIL to the
1679     - * end of an earlier request.
1680     - */
1681     - last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
1682     last = NULL;
1683     } else if (need_timeslice(engine, last) &&
1684     !timer_pending(&engine->execlists.timer)) {
1685     @@ -1150,16 +1161,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1686     if (!list_is_last(&last->sched.link,
1687     &engine->active.requests))
1688     return;
1689     -
1690     - /*
1691     - * WaIdleLiteRestore:bdw,skl
1692     - * Apply the wa NOOPs to prevent
1693     - * ring:HEAD == rq:TAIL as we resubmit the
1694     - * request. See gen8_emit_fini_breadcrumb() for
1695     - * where we prepare the padding after the
1696     - * end of the request.
1697     - */
1698     - last->tail = last->wa_tail;
1699     }
1700     }
1701    
1702     diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1703     index bacaa7bb8c9a..eee9fcbe0434 100644
1704     --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1705     +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1706     @@ -1312,6 +1312,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1707     kref_init(&ring->ref);
1708    
1709     ring->size = size;
1710     + ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
1711     +
1712     /* Workaround an erratum on the i830 which causes a hang if
1713     * the TAIL pointer points to within the last 2 cachelines
1714     * of the buffer.
1715     diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1716     index 4b04af569c05..7dc7bb850d0a 100644
1717     --- a/drivers/gpu/drm/i915/gvt/gtt.c
1718     +++ b/drivers/gpu/drm/i915/gvt/gtt.c
1719     @@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1720    
1721     if (mm->type == INTEL_GVT_MM_PPGTT) {
1722     list_del(&mm->ppgtt_mm.list);
1723     +
1724     + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1725     list_del(&mm->ppgtt_mm.lru_list);
1726     + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1727     +
1728     invalidate_ppgtt_mm(mm);
1729     } else {
1730     vfree(mm->ggtt_mm.virtual_ggtt);
1731     diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1732     index 98305d987ac1..4d561da3dcea 100644
1733     --- a/drivers/gpu/drm/i915/i915_gem.c
1734     +++ b/drivers/gpu/drm/i915/i915_gem.c
1735     @@ -136,7 +136,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
1736     struct drm_i915_gem_pwrite *args,
1737     struct drm_file *file)
1738     {
1739     - void *vaddr = obj->phys_handle->vaddr + args->offset;
1740     + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
1741     char __user *user_data = u64_to_user_ptr(args->data_ptr);
1742    
1743     /*
1744     @@ -802,10 +802,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1745     ret = i915_gem_gtt_pwrite_fast(obj, args);
1746    
1747     if (ret == -EFAULT || ret == -ENOSPC) {
1748     - if (obj->phys_handle)
1749     - ret = i915_gem_phys_pwrite(obj, args, file);
1750     - else
1751     + if (i915_gem_object_has_struct_page(obj))
1752     ret = i915_gem_shmem_pwrite(obj, args);
1753     + else
1754     + ret = i915_gem_phys_pwrite(obj, args, file);
1755     }
1756    
1757     i915_gem_object_unpin_pages(obj);
1758     diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
1759     index e284bd76fa86..fe9edbba997c 100644
1760     --- a/drivers/gpu/drm/i915/i915_gpu_error.c
1761     +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
1762     @@ -1768,7 +1768,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1763     if (!xchg(&warned, true) &&
1764     ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1765     pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1766     - pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1767     + pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1768     + pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1769     pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1770     pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1771     pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1772     diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
1773     index 3eba8a2b39c2..0ef205fe5e29 100644
1774     --- a/drivers/gpu/drm/i915/i915_scheduler.c
1775     +++ b/drivers/gpu/drm/i915/i915_scheduler.c
1776     @@ -418,8 +418,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
1777    
1778     if (!node_signaled(signal)) {
1779     INIT_LIST_HEAD(&dep->dfs_link);
1780     - list_add(&dep->wait_link, &signal->waiters_list);
1781     - list_add(&dep->signal_link, &node->signalers_list);
1782     dep->signaler = signal;
1783     dep->waiter = node;
1784     dep->flags = flags;
1785     @@ -429,6 +427,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
1786     !node_started(signal))
1787     node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
1788    
1789     + /* All set, now publish. Beware the lockless walkers. */
1790     + list_add(&dep->signal_link, &node->signalers_list);
1791     + list_add_rcu(&dep->wait_link, &signal->waiters_list);
1792     +
1793     /*
1794     * As we do not allow WAIT to preempt inflight requests,
1795     * once we have executed a request, along with triggering
1796     diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
1797     index 16acdf7bdbe6..17cfeef35a24 100644
1798     --- a/drivers/gpu/drm/i915/i915_utils.c
1799     +++ b/drivers/gpu/drm/i915/i915_utils.c
1800     @@ -8,9 +8,8 @@
1801     #include "i915_drv.h"
1802     #include "i915_utils.h"
1803    
1804     -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
1805     -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
1806     - "providing the dmesg log by booting with drm.debug=0xf"
1807     +#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
1808     +#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
1809    
1810     void
1811     __i915_printk(struct drm_i915_private *dev_priv, const char *level,
1812     diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1813     index 24ab6249083a..6f420cc73dbd 100644
1814     --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1815     +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1816     @@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
1817    
1818     INTERLEAVED_RGB_FMT(RGB565,
1819     0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
1820     - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
1821     + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
1822     false, 2, 0,
1823     DPU_FETCH_LINEAR, 1),
1824    
1825     INTERLEAVED_RGB_FMT(BGR565,
1826     0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
1827     - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
1828     + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
1829     false, 2, 0,
1830     DPU_FETCH_LINEAR, 1),
1831    
1832     diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1833     index 5193b6257061..b856e87574fd 100644
1834     --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1835     +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1836     @@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
1837     asyw->clr.ntfy = armw->ntfy.handle != 0;
1838     asyw->clr.sema = armw->sema.handle != 0;
1839     asyw->clr.xlut = armw->xlut.handle != 0;
1840     + if (asyw->clr.xlut && asyw->visible)
1841     + asyw->set.xlut = asyw->xlut.handle != 0;
1842     asyw->clr.csc = armw->csc.valid;
1843     if (wndw->func->image_clr)
1844     asyw->clr.image = armw->image.handle[0] != 0;
1845     diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
1846     index 763cfca886a7..3107b0738e40 100644
1847     --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
1848     +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
1849     @@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
1850     as = mmu->as;
1851     if (as >= 0) {
1852     int en = atomic_inc_return(&mmu->as_count);
1853     - WARN_ON(en >= NUM_JOB_SLOTS);
1854     +
1855     + /*
1856     + * AS can be retained by active jobs or a perfcnt context,
1857     + * hence the '+ 1' here.
1858     + */
1859     + WARN_ON(en >= (NUM_JOB_SLOTS + 1));
1860    
1861     list_move(&mmu->list, &pfdev->as_lru_list);
1862     goto out;
1863     diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1864     index 684820448be3..6913578d5aa7 100644
1865     --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1866     +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1867     @@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
1868     struct panfrost_file_priv *user = file_priv->driver_priv;
1869     struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
1870     struct drm_gem_shmem_object *bo;
1871     - u32 cfg;
1872     + u32 cfg, as;
1873     int ret;
1874    
1875     if (user == perfcnt->user)
1876     @@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
1877    
1878     perfcnt->user = user;
1879    
1880     - /*
1881     - * Always use address space 0 for now.
1882     - * FIXME: this needs to be updated when we start using different
1883     - * address space.
1884     - */
1885     - cfg = GPU_PERFCNT_CFG_AS(0) |
1886     + as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
1887     + cfg = GPU_PERFCNT_CFG_AS(as) |
1888     GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
1889    
1890     /*
1891     @@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
1892     drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
1893     perfcnt->buf = NULL;
1894     panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
1895     + panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
1896     panfrost_gem_mapping_put(perfcnt->mapping);
1897     perfcnt->mapping = NULL;
1898     pm_runtime_mark_last_busy(pfdev->dev);
1899     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1900     index b273e421e910..a1a035270cab 100644
1901     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1902     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1903     @@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
1904     }
1905     }
1906    
1907     +static void
1908     +isert_wait4cmds(struct iscsi_conn *conn)
1909     +{
1910     + isert_info("iscsi_conn %p\n", conn);
1911     +
1912     + if (conn->sess) {
1913     + target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1914     + target_wait_for_sess_cmds(conn->sess->se_sess);
1915     + }
1916     +}
1917     +
1918     /**
1919     * isert_put_unsol_pending_cmds() - Drop commands waiting for
1920     * unsolicitate dataout
1921     @@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1922    
1923     ib_drain_qp(isert_conn->qp);
1924     isert_put_unsol_pending_cmds(conn);
1925     + isert_wait4cmds(conn);
1926     isert_wait4logout(isert_conn);
1927    
1928     queue_work(isert_release_wq, &isert_conn->release_work);
1929     diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
1930     index c31e7bc4ccbe..e0b3fa2bb7ab 100644
1931     --- a/drivers/iommu/qcom_iommu.c
1932     +++ b/drivers/iommu/qcom_iommu.c
1933     @@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
1934     {
1935     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
1936    
1937     - if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
1938     - return;
1939     -
1940     iommu_put_dma_cookie(domain);
1941    
1942     - /* NOTE: unmap can be called after client device is powered off,
1943     - * for example, with GPUs or anything involving dma-buf. So we
1944     - * cannot rely on the device_link. Make sure the IOMMU is on to
1945     - * avoid unclocked accesses in the TLB inv path:
1946     - */
1947     - pm_runtime_get_sync(qcom_domain->iommu->dev);
1948     -
1949     - free_io_pgtable_ops(qcom_domain->pgtbl_ops);
1950     -
1951     - pm_runtime_put_sync(qcom_domain->iommu->dev);
1952     + if (qcom_domain->iommu) {
1953     + /*
1954     + * NOTE: unmap can be called after client device is powered
1955     + * off, for example, with GPUs or anything involving dma-buf.
1956     + * So we cannot rely on the device_link. Make sure the IOMMU
1957     + * is on to avoid unclocked accesses in the TLB inv path:
1958     + */
1959     + pm_runtime_get_sync(qcom_domain->iommu->dev);
1960     + free_io_pgtable_ops(qcom_domain->pgtbl_ops);
1961     + pm_runtime_put_sync(qcom_domain->iommu->dev);
1962     + }
1963    
1964     kfree(qcom_domain);
1965     }
1966     @@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
1967     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
1968     unsigned i;
1969    
1970     - if (!qcom_domain->iommu)
1971     + if (WARN_ON(!qcom_domain->iommu))
1972     return;
1973    
1974     pm_runtime_get_sync(qcom_iommu->dev);
1975     @@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
1976     ctx->domain = NULL;
1977     }
1978     pm_runtime_put_sync(qcom_iommu->dev);
1979     -
1980     - qcom_domain->iommu = NULL;
1981     }
1982    
1983     static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
1984     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
1985     index c27ed7363768..8c4507838325 100644
1986     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
1987     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
1988     @@ -4713,12 +4713,12 @@ int e1000e_close(struct net_device *netdev)
1989    
1990     pm_runtime_get_sync(&pdev->dev);
1991    
1992     - if (!test_bit(__E1000_DOWN, &adapter->state)) {
1993     + if (netif_device_present(netdev)) {
1994     e1000e_down(adapter, true);
1995     e1000_free_irq(adapter);
1996    
1997     /* Link status message must follow this format */
1998     - pr_info("%s NIC Link is Down\n", adapter->netdev->name);
1999     + pr_info("%s NIC Link is Down\n", netdev->name);
2000     }
2001    
2002     napi_disable(&adapter->napi);
2003     @@ -6309,10 +6309,14 @@ static int e1000e_pm_freeze(struct device *dev)
2004     {
2005     struct net_device *netdev = dev_get_drvdata(dev);
2006     struct e1000_adapter *adapter = netdev_priv(netdev);
2007     + bool present;
2008    
2009     + rtnl_lock();
2010     +
2011     + present = netif_device_present(netdev);
2012     netif_device_detach(netdev);
2013    
2014     - if (netif_running(netdev)) {
2015     + if (present && netif_running(netdev)) {
2016     int count = E1000_CHECK_RESET_COUNT;
2017    
2018     while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
2019     @@ -6324,6 +6328,8 @@ static int e1000e_pm_freeze(struct device *dev)
2020     e1000e_down(adapter, false);
2021     e1000_free_irq(adapter);
2022     }
2023     + rtnl_unlock();
2024     +
2025     e1000e_reset_interrupt_capability(adapter);
2026    
2027     /* Allow time for pending master requests to run */
2028     @@ -6571,6 +6577,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
2029     __e1000e_disable_aspm(pdev, state, 1);
2030     }
2031    
2032     +static int e1000e_pm_thaw(struct device *dev)
2033     +{
2034     + struct net_device *netdev = dev_get_drvdata(dev);
2035     + struct e1000_adapter *adapter = netdev_priv(netdev);
2036     + int rc = 0;
2037     +
2038     + e1000e_set_interrupt_capability(adapter);
2039     +
2040     + rtnl_lock();
2041     + if (netif_running(netdev)) {
2042     + rc = e1000_request_irq(adapter);
2043     + if (rc)
2044     + goto err_irq;
2045     +
2046     + e1000e_up(adapter);
2047     + }
2048     +
2049     + netif_device_attach(netdev);
2050     +err_irq:
2051     + rtnl_unlock();
2052     +
2053     + return rc;
2054     +}
2055     +
2056     #ifdef CONFIG_PM
2057     static int __e1000_resume(struct pci_dev *pdev)
2058     {
2059     @@ -6638,26 +6668,6 @@ static int __e1000_resume(struct pci_dev *pdev)
2060     }
2061    
2062     #ifdef CONFIG_PM_SLEEP
2063     -static int e1000e_pm_thaw(struct device *dev)
2064     -{
2065     - struct net_device *netdev = dev_get_drvdata(dev);
2066     - struct e1000_adapter *adapter = netdev_priv(netdev);
2067     -
2068     - e1000e_set_interrupt_capability(adapter);
2069     - if (netif_running(netdev)) {
2070     - u32 err = e1000_request_irq(adapter);
2071     -
2072     - if (err)
2073     - return err;
2074     -
2075     - e1000e_up(adapter);
2076     - }
2077     -
2078     - netif_device_attach(netdev);
2079     -
2080     - return 0;
2081     -}
2082     -
2083     static int e1000e_pm_suspend(struct device *dev)
2084     {
2085     struct pci_dev *pdev = to_pci_dev(dev);
2086     @@ -6829,16 +6839,11 @@ static void e1000_netpoll(struct net_device *netdev)
2087     static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
2088     pci_channel_state_t state)
2089     {
2090     - struct net_device *netdev = pci_get_drvdata(pdev);
2091     - struct e1000_adapter *adapter = netdev_priv(netdev);
2092     -
2093     - netif_device_detach(netdev);
2094     + e1000e_pm_freeze(&pdev->dev);
2095    
2096     if (state == pci_channel_io_perm_failure)
2097     return PCI_ERS_RESULT_DISCONNECT;
2098    
2099     - if (netif_running(netdev))
2100     - e1000e_down(adapter, true);
2101     pci_disable_device(pdev);
2102    
2103     /* Request a slot slot reset. */
2104     @@ -6904,10 +6909,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
2105    
2106     e1000_init_manageability_pt(adapter);
2107    
2108     - if (netif_running(netdev))
2109     - e1000e_up(adapter);
2110     -
2111     - netif_device_attach(netdev);
2112     + e1000e_pm_thaw(&pdev->dev);
2113    
2114     /* If the controller has AMT, do not set DRV_LOAD until the interface
2115     * is up. For all other cases, let the f/w know that the h/w is now
2116     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2117     index 3a975641f902..20b907dc1e29 100644
2118     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2119     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2120     @@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
2121     netdev_err(priv->netdev, err_str);
2122    
2123     if (!reporter)
2124     - return err_ctx->recover(&err_ctx->ctx);
2125     + return err_ctx->recover(err_ctx->ctx);
2126    
2127     return devlink_health_report(reporter, err_str, err_ctx);
2128     }
2129     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2130     index 7c8796d9743f..a226277b0980 100644
2131     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2132     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2133     @@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
2134     }
2135     }
2136    
2137     +static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2138     +{
2139     + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2140     + mlx5_wq_ll_reset(&rq->mpwqe.wq);
2141     + else
2142     + mlx5_wq_cyc_reset(&rq->wqe.wq);
2143     +}
2144     +
2145     /* SW parser related functions */
2146    
2147     struct mlx5e_swp_spec {
2148     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2149     index 29a5a8c894e3..e5e91cbcbc31 100644
2150     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2151     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2152     @@ -723,6 +723,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
2153     if (!in)
2154     return -ENOMEM;
2155    
2156     + if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
2157     + mlx5e_rqwq_reset(rq);
2158     +
2159     rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
2160    
2161     MLX5_SET(modify_rq_in, in, rq_state, curr_state);
2162     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2163     index 60fddf8afc99..c6ed4b7f4f97 100644
2164     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2165     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2166     @@ -2319,25 +2319,17 @@ out:
2167    
2168     int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2169     {
2170     - int err = 0;
2171     -
2172     if (!esw)
2173     return -EOPNOTSUPP;
2174    
2175     if (!ESW_ALLOWED(esw))
2176     return -EPERM;
2177    
2178     - mutex_lock(&esw->state_lock);
2179     - if (esw->mode != MLX5_ESWITCH_LEGACY) {
2180     - err = -EOPNOTSUPP;
2181     - goto out;
2182     - }
2183     + if (esw->mode != MLX5_ESWITCH_LEGACY)
2184     + return -EOPNOTSUPP;
2185    
2186     *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2187     -
2188     -out:
2189     - mutex_unlock(&esw->state_lock);
2190     - return err;
2191     + return 0;
2192     }
2193    
2194     int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2195     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2196     index dd2315ce4441..41e35b341b70 100644
2197     --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2198     +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2199     @@ -96,6 +96,13 @@ err_db_free:
2200     return err;
2201     }
2202    
2203     +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
2204     +{
2205     + wq->wqe_ctr = 0;
2206     + wq->cur_sz = 0;
2207     + mlx5_wq_cyc_update_db_record(wq);
2208     +}
2209     +
2210     int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2211     void *qpc, struct mlx5_wq_qp *wq,
2212     struct mlx5_wq_ctrl *wq_ctrl)
2213     @@ -194,6 +201,19 @@ err_db_free:
2214     return err;
2215     }
2216    
2217     +static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
2218     +{
2219     + struct mlx5_wqe_srq_next_seg *next_seg;
2220     + int i;
2221     +
2222     + for (i = 0; i < wq->fbc.sz_m1; i++) {
2223     + next_seg = mlx5_wq_ll_get_wqe(wq, i);
2224     + next_seg->next_wqe_index = cpu_to_be16(i + 1);
2225     + }
2226     + next_seg = mlx5_wq_ll_get_wqe(wq, i);
2227     + wq->tail_next = &next_seg->next_wqe_index;
2228     +}
2229     +
2230     int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2231     void *wqc, struct mlx5_wq_ll *wq,
2232     struct mlx5_wq_ctrl *wq_ctrl)
2233     @@ -201,9 +221,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2234     u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
2235     u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
2236     struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
2237     - struct mlx5_wqe_srq_next_seg *next_seg;
2238     int err;
2239     - int i;
2240    
2241     err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
2242     if (err) {
2243     @@ -222,13 +240,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2244    
2245     mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
2246    
2247     - for (i = 0; i < fbc->sz_m1; i++) {
2248     - next_seg = mlx5_wq_ll_get_wqe(wq, i);
2249     - next_seg->next_wqe_index = cpu_to_be16(i + 1);
2250     - }
2251     - next_seg = mlx5_wq_ll_get_wqe(wq, i);
2252     - wq->tail_next = &next_seg->next_wqe_index;
2253     -
2254     + mlx5_wq_ll_init_list(wq);
2255     wq_ctrl->mdev = mdev;
2256    
2257     return 0;
2258     @@ -239,6 +251,15 @@ err_db_free:
2259     return err;
2260     }
2261    
2262     +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
2263     +{
2264     + wq->head = 0;
2265     + wq->wqe_ctr = 0;
2266     + wq->cur_sz = 0;
2267     + mlx5_wq_ll_init_list(wq);
2268     + mlx5_wq_ll_update_db_record(wq);
2269     +}
2270     +
2271     void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
2272     {
2273     mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
2274     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2275     index 55791f71a778..5efc038440df 100644
2276     --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2277     +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2278     @@ -80,10 +80,12 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2279     void *wqc, struct mlx5_wq_cyc *wq,
2280     struct mlx5_wq_ctrl *wq_ctrl);
2281     u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
2282     +void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
2283    
2284     int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2285     void *qpc, struct mlx5_wq_qp *wq,
2286     struct mlx5_wq_ctrl *wq_ctrl);
2287     +void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
2288    
2289     int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2290     void *cqc, struct mlx5_cqwq *wq,
2291     diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2292     index 132ade51ee87..aed6354cb271 100644
2293     --- a/drivers/nvme/host/multipath.c
2294     +++ b/drivers/nvme/host/multipath.c
2295     @@ -711,6 +711,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2296     }
2297    
2298     INIT_WORK(&ctrl->ana_work, nvme_ana_work);
2299     + kfree(ctrl->ana_log_buf);
2300     ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
2301     if (!ctrl->ana_log_buf) {
2302     error = -ENOMEM;
2303     diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
2304     index 74d497d39c5a..c6695354b123 100644
2305     --- a/drivers/staging/android/ashmem.c
2306     +++ b/drivers/staging/android/ashmem.c
2307     @@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
2308     _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
2309     }
2310    
2311     +static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
2312     +{
2313     + /* do not allow to mmap ashmem backing shmem file directly */
2314     + return -EPERM;
2315     +}
2316     +
2317     +static unsigned long
2318     +ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
2319     + unsigned long len, unsigned long pgoff,
2320     + unsigned long flags)
2321     +{
2322     + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
2323     +}
2324     +
2325     static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
2326     {
2327     + static struct file_operations vmfile_fops;
2328     struct ashmem_area *asma = file->private_data;
2329     int ret = 0;
2330    
2331     @@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
2332     }
2333     vmfile->f_mode |= FMODE_LSEEK;
2334     asma->file = vmfile;
2335     + /*
2336     + * override mmap operation of the vmfile so that it can't be
2337     + * remapped which would lead to creation of a new vma with no
2338     + * asma permission checks. Have to override get_unmapped_area
2339     + * as well to prevent VM_BUG_ON check for f_ops modification.
2340     + */
2341     + if (!vmfile_fops.mmap) {
2342     + vmfile_fops = *vmfile->f_op;
2343     + vmfile_fops.mmap = ashmem_vmfile_mmap;
2344     + vmfile_fops.get_unmapped_area =
2345     + ashmem_vmfile_get_unmapped_area;
2346     + }
2347     + vmfile->f_op = &vmfile_fops;
2348     }
2349     get_file(asma->file);
2350    
2351     diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
2352     index 9b19ea9d3fa1..9a3f7c034ab4 100644
2353     --- a/drivers/staging/greybus/audio_manager.c
2354     +++ b/drivers/staging/greybus/audio_manager.c
2355     @@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
2356    
2357     list_for_each_entry_safe(module, next, &modules_list, list) {
2358     list_del(&module->list);
2359     - kobject_put(&module->kobj);
2360     ida_simple_remove(&module_id, module->id);
2361     + kobject_put(&module->kobj);
2362     }
2363    
2364     is_empty = list_empty(&modules_list);
2365     diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2366     index 9f0418ee7528..630e7d933b10 100644
2367     --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2368     +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2369     @@ -2025,7 +2025,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
2370     struct ieee_param *param;
2371     uint ret = 0;
2372    
2373     - if (p->length < sizeof(struct ieee_param) || !p->pointer) {
2374     + if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2375     ret = -EINVAL;
2376     goto out;
2377     }
2378     @@ -2812,7 +2812,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
2379     goto out;
2380     }
2381    
2382     - if (!p->pointer) {
2383     + if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2384     ret = -EINVAL;
2385     goto out;
2386     }
2387     diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2388     index b44e902ed338..b6d56cfb0a19 100644
2389     --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2390     +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2391     @@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
2392     s32 ret;
2393     struct adapter *padapter;
2394     struct xmit_priv *pxmitpriv;
2395     - u8 thread_name[20] = "RTWHALXT";
2396     -
2397     + u8 thread_name[20];
2398    
2399     ret = _SUCCESS;
2400     padapter = context;
2401     pxmitpriv = &padapter->xmitpriv;
2402    
2403     - rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
2404     + rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
2405     thread_enter(thread_name);
2406    
2407     DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
2408     diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2409     index d1b199e3e5bd..d8d44fd9a92f 100644
2410     --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2411     +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2412     @@ -3379,7 +3379,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
2413    
2414     /* down(&ieee->wx_sem); */
2415    
2416     - if (p->length < sizeof(struct ieee_param) || !p->pointer) {
2417     + if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2418     ret = -EINVAL;
2419     goto out;
2420     }
2421     @@ -4213,7 +4213,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
2422    
2423    
2424     /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
2425     - if (!p->pointer) {
2426     + if (!p->pointer || p->length != sizeof(*param)) {
2427     ret = -EINVAL;
2428     goto out;
2429     }
2430     diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
2431     index 3b94e80f1d5e..879ceef517fb 100644
2432     --- a/drivers/staging/vt6656/dpc.c
2433     +++ b/drivers/staging/vt6656/dpc.c
2434     @@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
2435    
2436     vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
2437    
2438     - priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
2439     + priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
2440     priv->current_rssi = priv->bb_pre_ed_rssi;
2441    
2442     skb_pull(skb, 8);
2443     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2444     index c070cb2a6a5b..d19e051f2bc2 100644
2445     --- a/drivers/target/iscsi/iscsi_target.c
2446     +++ b/drivers/target/iscsi/iscsi_target.c
2447     @@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2448     hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
2449     conn->cid);
2450    
2451     - if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
2452     - return iscsit_add_reject_cmd(cmd,
2453     - ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
2454     + target_get_sess_cmd(&cmd->se_cmd, true);
2455    
2456     cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
2457     scsilun_to_int(&hdr->lun));
2458     @@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2459     conn->sess->se_sess, 0, DMA_NONE,
2460     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
2461    
2462     - if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
2463     - return iscsit_add_reject_cmd(cmd,
2464     - ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
2465     + target_get_sess_cmd(&cmd->se_cmd, true);
2466    
2467     /*
2468     * TASK_REASSIGN for ERL=2 / connection stays inside of
2469     @@ -4151,6 +4147,9 @@ int iscsit_close_connection(
2470     iscsit_stop_nopin_response_timer(conn);
2471     iscsit_stop_nopin_timer(conn);
2472    
2473     + if (conn->conn_transport->iscsit_wait_conn)
2474     + conn->conn_transport->iscsit_wait_conn(conn);
2475     +
2476     /*
2477     * During Connection recovery drop unacknowledged out of order
2478     * commands for this connection, and prepare the other commands
2479     @@ -4233,11 +4232,6 @@ int iscsit_close_connection(
2480     * must wait until they have completed.
2481     */
2482     iscsit_check_conn_usage_count(conn);
2483     - target_sess_cmd_list_set_waiting(sess->se_sess);
2484     - target_wait_for_sess_cmds(sess->se_sess);
2485     -
2486     - if (conn->conn_transport->iscsit_wait_conn)
2487     - conn->conn_transport->iscsit_wait_conn(conn);
2488    
2489     ahash_request_free(conn->conn_tx_hash);
2490     if (conn->conn_rx_hash) {
2491     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2492     index eda8b4736c15..d542e26ca56a 100644
2493     --- a/drivers/target/target_core_transport.c
2494     +++ b/drivers/target/target_core_transport.c
2495     @@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
2496    
2497     target_remove_from_state_list(cmd);
2498    
2499     + /*
2500     + * Clear struct se_cmd->se_lun before the handoff to FE.
2501     + */
2502     + cmd->se_lun = NULL;
2503     +
2504     spin_lock_irqsave(&cmd->t_state_lock, flags);
2505     /*
2506     * Determine if frontend context caller is requesting the stopping of
2507     @@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
2508     return cmd->se_tfo->check_stop_free(cmd);
2509     }
2510    
2511     +static void transport_lun_remove_cmd(struct se_cmd *cmd)
2512     +{
2513     + struct se_lun *lun = cmd->se_lun;
2514     +
2515     + if (!lun)
2516     + return;
2517     +
2518     + if (cmpxchg(&cmd->lun_ref_active, true, false))
2519     + percpu_ref_put(&lun->lun_ref);
2520     +}
2521     +
2522     static void target_complete_failure_work(struct work_struct *work)
2523     {
2524     struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2525     @@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
2526    
2527     WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
2528    
2529     + transport_lun_remove_cmd(cmd);
2530     +
2531     transport_cmd_check_stop_to_fabric(cmd);
2532     }
2533    
2534     @@ -1695,6 +1713,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
2535     se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
2536     se_cmd->se_tfo->queue_tm_rsp(se_cmd);
2537    
2538     + transport_lun_remove_cmd(se_cmd);
2539     transport_cmd_check_stop_to_fabric(se_cmd);
2540     }
2541    
2542     @@ -1885,6 +1904,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
2543     goto queue_full;
2544    
2545     check_stop:
2546     + transport_lun_remove_cmd(cmd);
2547     transport_cmd_check_stop_to_fabric(cmd);
2548     return;
2549    
2550     @@ -2182,6 +2202,7 @@ queue_status:
2551     transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2552     return;
2553     }
2554     + transport_lun_remove_cmd(cmd);
2555     transport_cmd_check_stop_to_fabric(cmd);
2556     }
2557    
2558     @@ -2276,6 +2297,7 @@ static void target_complete_ok_work(struct work_struct *work)
2559     if (ret)
2560     goto queue_full;
2561    
2562     + transport_lun_remove_cmd(cmd);
2563     transport_cmd_check_stop_to_fabric(cmd);
2564     return;
2565     }
2566     @@ -2301,6 +2323,7 @@ static void target_complete_ok_work(struct work_struct *work)
2567     if (ret)
2568     goto queue_full;
2569    
2570     + transport_lun_remove_cmd(cmd);
2571     transport_cmd_check_stop_to_fabric(cmd);
2572     return;
2573     }
2574     @@ -2336,6 +2359,7 @@ queue_rsp:
2575     if (ret)
2576     goto queue_full;
2577    
2578     + transport_lun_remove_cmd(cmd);
2579     transport_cmd_check_stop_to_fabric(cmd);
2580     return;
2581     }
2582     @@ -2371,6 +2395,7 @@ queue_status:
2583     break;
2584     }
2585    
2586     + transport_lun_remove_cmd(cmd);
2587     transport_cmd_check_stop_to_fabric(cmd);
2588     return;
2589    
2590     @@ -2697,6 +2722,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2591     */
2592     if (cmd->state_active)
2593     target_remove_from_state_list(cmd);
2594     +
2595     + if (cmd->se_lun)
2596     + transport_lun_remove_cmd(cmd);
2597     }
2598     if (aborted)
2599     cmd->free_compl = &compl;
2600     @@ -2768,9 +2796,6 @@ static void target_release_cmd_kref(struct kref *kref)
2601     struct completion *abrt_compl = se_cmd->abrt_compl;
2602     unsigned long flags;
2603    
2604     - if (se_cmd->lun_ref_active)
2605     - percpu_ref_put(&se_cmd->se_lun->lun_ref);
2606     -
2607     if (se_sess) {
2608     spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2609     list_del_init(&se_cmd->se_cmd_list);
2610     diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
2611     index c5974c9af841..e53932d27ac5 100644
2612     --- a/drivers/thunderbolt/switch.c
2613     +++ b/drivers/thunderbolt/switch.c
2614     @@ -274,6 +274,12 @@ out:
2615     return ret;
2616     }
2617    
2618     +static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
2619     + size_t bytes)
2620     +{
2621     + return -EPERM;
2622     +}
2623     +
2624     static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
2625     size_t bytes)
2626     {
2627     @@ -319,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
2628     config.read_only = true;
2629     } else {
2630     config.name = "nvm_non_active";
2631     + config.reg_read = tb_switch_nvm_no_read;
2632     config.reg_write = tb_switch_nvm_write;
2633     config.root_only = true;
2634     }
2635     diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
2636     index d1cdd2ab8b4c..d367803e2044 100644
2637     --- a/drivers/tty/serdev/serdev-ttyport.c
2638     +++ b/drivers/tty/serdev/serdev-ttyport.c
2639     @@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2640     struct device *parent,
2641     struct tty_driver *drv, int idx)
2642     {
2643     - const struct tty_port_client_operations *old_ops;
2644     struct serdev_controller *ctrl;
2645     struct serport *serport;
2646     int ret;
2647     @@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2648    
2649     ctrl->ops = &ctrl_ops;
2650    
2651     - old_ops = port->client_ops;
2652     port->client_ops = &client_ops;
2653     port->client_data = ctrl;
2654    
2655     @@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2656    
2657     err_reset_data:
2658     port->client_data = NULL;
2659     - port->client_ops = old_ops;
2660     + port->client_ops = &tty_port_default_client_ops;
2661     serdev_controller_put(ctrl);
2662    
2663     return ERR_PTR(ret);
2664     @@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
2665     return -ENODEV;
2666    
2667     serdev_controller_remove(ctrl);
2668     - port->client_ops = NULL;
2669     port->client_data = NULL;
2670     + port->client_ops = &tty_port_default_client_ops;
2671     serdev_controller_put(ctrl);
2672    
2673     return 0;
2674     diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
2675     index 0438d9a905ce..6ba2efde7252 100644
2676     --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
2677     +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
2678     @@ -379,7 +379,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
2679     port.port.line = rc;
2680    
2681     port.port.irq = irq_of_parse_and_map(np, 0);
2682     - port.port.irqflags = IRQF_SHARED;
2683     port.port.handle_irq = aspeed_vuart_handle_irq;
2684     port.port.iotype = UPIO_MEM;
2685     port.port.type = PORT_16550A;
2686     diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
2687     index e682390ce0de..28bdbd7b4ab2 100644
2688     --- a/drivers/tty/serial/8250/8250_core.c
2689     +++ b/drivers/tty/serial/8250/8250_core.c
2690     @@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
2691     struct hlist_head *h;
2692     struct hlist_node *n;
2693     struct irq_info *i;
2694     - int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
2695     + int ret;
2696    
2697     mutex_lock(&hash_mutex);
2698    
2699     @@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
2700     INIT_LIST_HEAD(&up->list);
2701     i->head = &up->list;
2702     spin_unlock_irq(&i->lock);
2703     - irq_flags |= up->port.irqflags;
2704     ret = request_irq(up->port.irq, serial8250_interrupt,
2705     - irq_flags, up->port.name, i);
2706     + up->port.irqflags, up->port.name, i);
2707     if (ret < 0)
2708     serial_do_unlink(i, up);
2709     }
2710     diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
2711     index 0826cfdbd406..9ba31701a372 100644
2712     --- a/drivers/tty/serial/8250/8250_of.c
2713     +++ b/drivers/tty/serial/8250/8250_of.c
2714     @@ -172,7 +172,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
2715    
2716     port->type = type;
2717     port->uartclk = clk;
2718     - port->irqflags |= IRQF_SHARED;
2719    
2720     if (of_property_read_bool(np, "no-loopback-test"))
2721     port->flags |= UPF_SKIP_TEST;
2722     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
2723     index 8407166610ce..2c65c775bf5a 100644
2724     --- a/drivers/tty/serial/8250/8250_port.c
2725     +++ b/drivers/tty/serial/8250/8250_port.c
2726     @@ -2192,6 +2192,10 @@ int serial8250_do_startup(struct uart_port *port)
2727     }
2728     }
2729    
2730     + /* Check if we need to have shared IRQs */
2731     + if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
2732     + up->port.irqflags |= IRQF_SHARED;
2733     +
2734     if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
2735     unsigned char iir1;
2736     /*
2737     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2738     index 1ba9bc667e13..8a909d556185 100644
2739     --- a/drivers/tty/serial/atmel_serial.c
2740     +++ b/drivers/tty/serial/atmel_serial.c
2741     @@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port)
2742     atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
2743    
2744     if (atmel_uart_is_half_duplex(port))
2745     - atmel_start_rx(port);
2746     + if (!atomic_read(&atmel_port->tasklet_shutdown))
2747     + atmel_start_rx(port);
2748    
2749     }
2750    
2751     diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
2752     index 9d8c660dc289..22d8705cd5cd 100644
2753     --- a/drivers/tty/serial/imx.c
2754     +++ b/drivers/tty/serial/imx.c
2755     @@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
2756    
2757     sport->tx_bytes = uart_circ_chars_pending(xmit);
2758    
2759     - if (xmit->tail < xmit->head) {
2760     + if (xmit->tail < xmit->head || xmit->head == 0) {
2761     sport->dma_tx_nents = 1;
2762     sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
2763     } else {
2764     diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
2765     index 14c6306bc462..f98a79172ad2 100644
2766     --- a/drivers/tty/serial/qcom_geni_serial.c
2767     +++ b/drivers/tty/serial/qcom_geni_serial.c
2768     @@ -125,6 +125,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
2769     static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
2770     static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
2771     static void qcom_geni_serial_stop_rx(struct uart_port *uport);
2772     +static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
2773    
2774     static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
2775     32000000, 48000000, 64000000, 80000000,
2776     @@ -615,7 +616,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
2777     u32 irq_en;
2778     u32 status;
2779     struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
2780     - u32 irq_clear = S_CMD_DONE_EN;
2781     + u32 s_irq_status;
2782    
2783     irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
2784     irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
2785     @@ -631,10 +632,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
2786     return;
2787    
2788     geni_se_cancel_s_cmd(&port->se);
2789     - qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
2790     - S_GENI_CMD_CANCEL, false);
2791     + qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
2792     + S_CMD_CANCEL_EN, true);
2793     + /*
2794     + * If timeout occurs secondary engine remains active
2795     + * and Abort sequence is executed.
2796     + */
2797     + s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
2798     + /* Flush the Rx buffer */
2799     + if (s_irq_status & S_RX_FIFO_LAST_EN)
2800     + qcom_geni_serial_handle_rx(uport, true);
2801     + writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
2802     +
2803     status = readl(uport->membase + SE_GENI_STATUS);
2804     - writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
2805     if (status & S_GENI_CMD_ACTIVE)
2806     qcom_geni_serial_abort_rx(uport);
2807     }
2808     diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
2809     index 044c3cbdcfa4..ea80bf872f54 100644
2810     --- a/drivers/tty/tty_port.c
2811     +++ b/drivers/tty/tty_port.c
2812     @@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
2813     }
2814     }
2815    
2816     -static const struct tty_port_client_operations default_client_ops = {
2817     +const struct tty_port_client_operations tty_port_default_client_ops = {
2818     .receive_buf = tty_port_default_receive_buf,
2819     .write_wakeup = tty_port_default_wakeup,
2820     };
2821     +EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
2822    
2823     void tty_port_init(struct tty_port *port)
2824     {
2825     @@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
2826     spin_lock_init(&port->lock);
2827     port->close_delay = (50 * HZ) / 100;
2828     port->closing_wait = (3000 * HZ) / 100;
2829     - port->client_ops = &default_client_ops;
2830     + port->client_ops = &tty_port_default_client_ops;
2831     kref_init(&port->kref);
2832     }
2833     EXPORT_SYMBOL(tty_port_init);
2834     diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
2835     index 78732feaf65b..44d974d4159f 100644
2836     --- a/drivers/tty/vt/selection.c
2837     +++ b/drivers/tty/vt/selection.c
2838     @@ -29,6 +29,8 @@
2839     #include <linux/console.h>
2840     #include <linux/tty_flip.h>
2841    
2842     +#include <linux/sched/signal.h>
2843     +
2844     /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
2845     #define isspace(c) ((c) == ' ')
2846    
2847     @@ -350,6 +352,7 @@ int paste_selection(struct tty_struct *tty)
2848     unsigned int count;
2849     struct tty_ldisc *ld;
2850     DECLARE_WAITQUEUE(wait, current);
2851     + int ret = 0;
2852    
2853     console_lock();
2854     poke_blanked_console();
2855     @@ -363,6 +366,10 @@ int paste_selection(struct tty_struct *tty)
2856     add_wait_queue(&vc->paste_wait, &wait);
2857     while (sel_buffer && sel_buffer_lth > pasted) {
2858     set_current_state(TASK_INTERRUPTIBLE);
2859     + if (signal_pending(current)) {
2860     + ret = -EINTR;
2861     + break;
2862     + }
2863     if (tty_throttled(tty)) {
2864     schedule();
2865     continue;
2866     @@ -378,6 +385,6 @@ int paste_selection(struct tty_struct *tty)
2867    
2868     tty_buffer_unlock_exclusive(&vc->port);
2869     tty_ldisc_deref(ld);
2870     - return 0;
2871     + return ret;
2872     }
2873     EXPORT_SYMBOL_GPL(paste_selection);
2874     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2875     index 34aa39d1aed9..3b4ccc2a30c1 100644
2876     --- a/drivers/tty/vt/vt.c
2877     +++ b/drivers/tty/vt/vt.c
2878     @@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
2879     WARN_CONSOLE_UNLOCKED();
2880    
2881     set_origin(vc);
2882     - if (vc->vc_sw->con_flush_scrollback)
2883     + if (vc->vc_sw->con_flush_scrollback) {
2884     vc->vc_sw->con_flush_scrollback(vc);
2885     - else
2886     + } else if (con_is_visible(vc)) {
2887     + /*
2888     + * When no con_flush_scrollback method is provided then the
2889     + * legacy way for flushing the scrollback buffer is to use
2890     + * a side effect of the con_switch method. We do it only on
2891     + * the foreground console as background consoles have no
2892     + * scrollback buffers in that case and we obviously don't
2893     + * want to switch to them.
2894     + */
2895     + hide_cursor(vc);
2896     vc->vc_sw->con_switch(vc);
2897     + set_cursor(vc);
2898     + }
2899     }
2900    
2901     /*
2902     diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
2903     index 8b0ed139592f..ee6c91ef1f6c 100644
2904     --- a/drivers/tty/vt/vt_ioctl.c
2905     +++ b/drivers/tty/vt/vt_ioctl.c
2906     @@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
2907     return -EINVAL;
2908    
2909     for (i = 0; i < MAX_NR_CONSOLES; i++) {
2910     + struct vc_data *vcp;
2911     +
2912     if (!vc_cons[i].d)
2913     continue;
2914     console_lock();
2915     - if (v.v_vlin)
2916     - vc_cons[i].d->vc_scan_lines = v.v_vlin;
2917     - if (v.v_clin)
2918     - vc_cons[i].d->vc_font.height = v.v_clin;
2919     - vc_cons[i].d->vc_resize_user = 1;
2920     - vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
2921     + vcp = vc_cons[i].d;
2922     + if (vcp) {
2923     + if (v.v_vlin)
2924     + vcp->vc_scan_lines = v.v_vlin;
2925     + if (v.v_clin)
2926     + vcp->vc_font.height = v.v_clin;
2927     + vcp->vc_resize_user = 1;
2928     + vc_resize(vcp, v.v_cols, v.v_rows);
2929     + }
2930     console_unlock();
2931     }
2932     break;
2933     diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2934     index 3e94259406d7..c68217b7dace 100644
2935     --- a/drivers/usb/core/config.c
2936     +++ b/drivers/usb/core/config.c
2937     @@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2938     struct usb_host_interface *ifp, int num_ep,
2939     unsigned char *buffer, int size)
2940     {
2941     + struct usb_device *udev = to_usb_device(ddev);
2942     unsigned char *buffer0 = buffer;
2943     struct usb_endpoint_descriptor *d;
2944     struct usb_host_endpoint *endpoint;
2945     @@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2946     goto skip_to_next_endpoint_or_interface_descriptor;
2947     }
2948    
2949     + /* Ignore blacklisted endpoints */
2950     + if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
2951     + if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
2952     + dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
2953     + cfgno, inum, asnum,
2954     + d->bEndpointAddress);
2955     + goto skip_to_next_endpoint_or_interface_descriptor;
2956     + }
2957     + }
2958     +
2959     endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
2960     ++ifp->desc.bNumEndpoints;
2961    
2962     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2963     index 4ac74b354801..f381faa10f15 100644
2964     --- a/drivers/usb/core/hub.c
2965     +++ b/drivers/usb/core/hub.c
2966     @@ -37,7 +37,9 @@
2967     #include "otg_whitelist.h"
2968    
2969     #define USB_VENDOR_GENESYS_LOGIC 0x05e3
2970     +#define USB_VENDOR_SMSC 0x0424
2971     #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
2972     +#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
2973    
2974     #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
2975     #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
2976     @@ -1216,11 +1218,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2977     #ifdef CONFIG_PM
2978     udev->reset_resume = 1;
2979     #endif
2980     - /* Don't set the change_bits when the device
2981     - * was powered off.
2982     - */
2983     - if (test_bit(port1, hub->power_bits))
2984     - set_bit(port1, hub->change_bits);
2985    
2986     } else {
2987     /* The power session is gone; tell hub_wq */
2988     @@ -1730,6 +1727,10 @@ static void hub_disconnect(struct usb_interface *intf)
2989     kfree(hub->buffer);
2990    
2991     pm_suspend_ignore_children(&intf->dev, false);
2992     +
2993     + if (hub->quirk_disable_autosuspend)
2994     + usb_autopm_put_interface(intf);
2995     +
2996     kref_put(&hub->kref, hub_release);
2997     }
2998    
2999     @@ -1862,6 +1863,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
3000     if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
3001     hub->quirk_check_port_auto_suspend = 1;
3002    
3003     + if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
3004     + hub->quirk_disable_autosuspend = 1;
3005     + usb_autopm_get_interface(intf);
3006     + }
3007     +
3008     if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
3009     return 0;
3010    
3011     @@ -5484,6 +5490,10 @@ out_hdev_lock:
3012     }
3013    
3014     static const struct usb_device_id hub_id_table[] = {
3015     + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
3016     + .idVendor = USB_VENDOR_SMSC,
3017     + .bInterfaceClass = USB_CLASS_HUB,
3018     + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
3019     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
3020     | USB_DEVICE_ID_MATCH_INT_CLASS,
3021     .idVendor = USB_VENDOR_GENESYS_LOGIC,
3022     diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
3023     index a9e24e4b8df1..a97dd1ba964e 100644
3024     --- a/drivers/usb/core/hub.h
3025     +++ b/drivers/usb/core/hub.h
3026     @@ -61,6 +61,7 @@ struct usb_hub {
3027     unsigned quiescing:1;
3028     unsigned disconnected:1;
3029     unsigned in_reset:1;
3030     + unsigned quirk_disable_autosuspend:1;
3031    
3032     unsigned quirk_check_port_auto_suspend:1;
3033    
3034     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3035     index 6b6413073584..2b24336a72e5 100644
3036     --- a/drivers/usb/core/quirks.c
3037     +++ b/drivers/usb/core/quirks.c
3038     @@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3039     { USB_DEVICE(0x0904, 0x6103), .driver_info =
3040     USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
3041    
3042     + /* Sound Devices USBPre2 */
3043     + { USB_DEVICE(0x0926, 0x0202), .driver_info =
3044     + USB_QUIRK_ENDPOINT_BLACKLIST },
3045     +
3046     /* Keytouch QWERTY Panel keyboard */
3047     { USB_DEVICE(0x0926, 0x3333), .driver_info =
3048     USB_QUIRK_CONFIG_INTF_STRINGS },
3049     @@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3050     /* INTEL VALUE SSD */
3051     { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
3052    
3053     + /* novation SoundControl XL */
3054     + { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
3055     +
3056     { } /* terminating entry must be last */
3057     };
3058    
3059     @@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
3060     { } /* terminating entry must be last */
3061     };
3062    
3063     +/*
3064     + * Entries for blacklisted endpoints that should be ignored when parsing
3065     + * configuration descriptors.
3066     + *
3067     + * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
3068     + */
3069     +static const struct usb_device_id usb_endpoint_blacklist[] = {
3070     + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
3071     + { }
3072     +};
3073     +
3074     +bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3075     + struct usb_host_interface *intf,
3076     + struct usb_endpoint_descriptor *epd)
3077     +{
3078     + const struct usb_device_id *id;
3079     + unsigned int address;
3080     +
3081     + for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
3082     + if (!usb_match_device(udev, id))
3083     + continue;
3084     +
3085     + if (!usb_match_one_id_intf(udev, intf, id))
3086     + continue;
3087     +
3088     + address = id->driver_info;
3089     + if (address == epd->bEndpointAddress)
3090     + return true;
3091     + }
3092     +
3093     + return false;
3094     +}
3095     +
3096     static bool usb_match_any_interface(struct usb_device *udev,
3097     const struct usb_device_id *id)
3098     {
3099     diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
3100     index cf4783cf661a..3ad0ee57e859 100644
3101     --- a/drivers/usb/core/usb.h
3102     +++ b/drivers/usb/core/usb.h
3103     @@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
3104     extern void usb_detect_quirks(struct usb_device *udev);
3105     extern void usb_detect_interface_quirks(struct usb_device *udev);
3106     extern void usb_release_quirk_list(void);
3107     +extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3108     + struct usb_host_interface *intf,
3109     + struct usb_endpoint_descriptor *epd);
3110     extern int usb_remove_device(struct usb_device *udev);
3111    
3112     extern int usb_get_device_descriptor(struct usb_device *dev,
3113     diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3114     index a9133773b89e..7fd0900a9cb0 100644
3115     --- a/drivers/usb/dwc2/gadget.c
3116     +++ b/drivers/usb/dwc2/gadget.c
3117     @@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
3118     else
3119     packets = 1; /* send one packet if length is zero. */
3120    
3121     - if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
3122     - dev_err(hsotg->dev, "req length > maxpacket*mc\n");
3123     - return;
3124     - }
3125     -
3126     if (dir_in && index != 0)
3127     if (hs_ep->isochronous)
3128     epsize = DXEPTSIZ_MC(packets);
3129     @@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
3130     req->actual = 0;
3131     req->status = -EINPROGRESS;
3132    
3133     + /* Don't queue ISOC request if length greater than mps*mc */
3134     + if (hs_ep->isochronous &&
3135     + req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
3136     + dev_err(hs->dev, "req length > maxpacket*mc\n");
3137     + return -EINVAL;
3138     + }
3139     +
3140     /* In DDMA mode for ISOC's don't queue request if length greater
3141     * than descriptor limits.
3142     */
3143     @@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
3144     struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
3145     struct dwc2_hsotg_ep *ep;
3146     __le16 reply;
3147     + u16 status;
3148     int ret;
3149    
3150     dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
3151     @@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
3152    
3153     switch (ctrl->bRequestType & USB_RECIP_MASK) {
3154     case USB_RECIP_DEVICE:
3155     - /*
3156     - * bit 0 => self powered
3157     - * bit 1 => remote wakeup
3158     - */
3159     - reply = cpu_to_le16(0);
3160     + status = 1 << USB_DEVICE_SELF_POWERED;
3161     + status |= hsotg->remote_wakeup_allowed <<
3162     + USB_DEVICE_REMOTE_WAKEUP;
3163     + reply = cpu_to_le16(status);
3164     break;
3165    
3166     case USB_RECIP_INTERFACE:
3167     @@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
3168     case USB_RECIP_DEVICE:
3169     switch (wValue) {
3170     case USB_DEVICE_REMOTE_WAKEUP:
3171     - hsotg->remote_wakeup_allowed = 1;
3172     + if (set)
3173     + hsotg->remote_wakeup_allowed = 1;
3174     + else
3175     + hsotg->remote_wakeup_allowed = 0;
3176     break;
3177    
3178     case USB_DEVICE_TEST_MODE:
3179     @@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
3180     return -EINVAL;
3181    
3182     hsotg->test_mode = wIndex >> 8;
3183     - ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
3184     - if (ret) {
3185     - dev_err(hsotg->dev,
3186     - "%s: failed to send reply\n", __func__);
3187     - return ret;
3188     - }
3189     break;
3190     default:
3191     return -ENOENT;
3192     }
3193     +
3194     + ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
3195     + if (ret) {
3196     + dev_err(hsotg->dev,
3197     + "%s: failed to send reply\n", __func__);
3198     + return ret;
3199     + }
3200     break;
3201    
3202     case USB_RECIP_ENDPOINT:
3203     diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
3204     index 9baabed87d61..f2c97058a00b 100644
3205     --- a/drivers/usb/dwc3/debug.h
3206     +++ b/drivers/usb/dwc3/debug.h
3207     @@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
3208     u8 epnum = event->endpoint_number;
3209     size_t len;
3210     int status;
3211     - int ret;
3212    
3213     - ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
3214     + len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
3215     (epnum & 1) ? "in" : "out");
3216     - if (ret < 0)
3217     - return "UNKNOWN";
3218    
3219     status = event->status;
3220    
3221     switch (event->endpoint_event) {
3222     case DWC3_DEPEVT_XFERCOMPLETE:
3223     - len = strlen(str);
3224     - snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
3225     + len += scnprintf(str + len, size - len,
3226     + "Transfer Complete (%c%c%c)",
3227     status & DEPEVT_STATUS_SHORT ? 'S' : 's',
3228     status & DEPEVT_STATUS_IOC ? 'I' : 'i',
3229     status & DEPEVT_STATUS_LST ? 'L' : 'l');
3230    
3231     - len = strlen(str);
3232     -
3233     if (epnum <= 1)
3234     - snprintf(str + len, size - len, " [%s]",
3235     + scnprintf(str + len, size - len, " [%s]",
3236     dwc3_ep0_state_string(ep0state));
3237     break;
3238     case DWC3_DEPEVT_XFERINPROGRESS:
3239     - len = strlen(str);
3240     -
3241     - snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
3242     + scnprintf(str + len, size - len,
3243     + "Transfer In Progress [%d] (%c%c%c)",
3244     event->parameters,
3245     status & DEPEVT_STATUS_SHORT ? 'S' : 's',
3246     status & DEPEVT_STATUS_IOC ? 'I' : 'i',
3247     status & DEPEVT_STATUS_LST ? 'M' : 'm');
3248     break;
3249     case DWC3_DEPEVT_XFERNOTREADY:
3250     - len = strlen(str);
3251     -
3252     - snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
3253     + len += scnprintf(str + len, size - len,
3254     + "Transfer Not Ready [%d]%s",
3255     event->parameters,
3256     status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
3257     " (Active)" : " (Not Active)");
3258    
3259     - len = strlen(str);
3260     -
3261     /* Control Endpoints */
3262     if (epnum <= 1) {
3263     int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
3264    
3265     switch (phase) {
3266     case DEPEVT_STATUS_CONTROL_DATA:
3267     - snprintf(str + ret, size - ret,
3268     + scnprintf(str + len, size - len,
3269     " [Data Phase]");
3270     break;
3271     case DEPEVT_STATUS_CONTROL_STATUS:
3272     - snprintf(str + ret, size - ret,
3273     + scnprintf(str + len, size - len,
3274     " [Status Phase]");
3275     }
3276     }
3277     break;
3278     case DWC3_DEPEVT_RXTXFIFOEVT:
3279     - snprintf(str + ret, size - ret, "FIFO");
3280     + scnprintf(str + len, size - len, "FIFO");
3281     break;
3282     case DWC3_DEPEVT_STREAMEVT:
3283     status = event->status;
3284    
3285     switch (status) {
3286     case DEPEVT_STREAMEVT_FOUND:
3287     - snprintf(str + ret, size - ret, " Stream %d Found",
3288     + scnprintf(str + len, size - len, " Stream %d Found",
3289     event->parameters);
3290     break;
3291     case DEPEVT_STREAMEVT_NOTFOUND:
3292     default:
3293     - snprintf(str + ret, size - ret, " Stream Not Found");
3294     + scnprintf(str + len, size - len, " Stream Not Found");
3295     break;
3296     }
3297    
3298     break;
3299     case DWC3_DEPEVT_EPCMDCMPLT:
3300     - snprintf(str + ret, size - ret, "Endpoint Command Complete");
3301     + scnprintf(str + len, size - len, "Endpoint Command Complete");
3302     break;
3303     default:
3304     - snprintf(str, size, "UNKNOWN");
3305     + scnprintf(str + len, size - len, "UNKNOWN");
3306     }
3307    
3308     return str;
3309     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3310     index 8b95be897078..e0cb1c2d5675 100644
3311     --- a/drivers/usb/dwc3/gadget.c
3312     +++ b/drivers/usb/dwc3/gadget.c
3313     @@ -2426,7 +2426,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
3314     if (event->status & DEPEVT_STATUS_SHORT && !chain)
3315     return 1;
3316    
3317     - if (event->status & DEPEVT_STATUS_IOC)
3318     + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
3319     + (trb->ctrl & DWC3_TRB_CTRL_LST))
3320     return 1;
3321    
3322     return 0;
3323     diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
3324     index 5ec54b69c29c..0d45d7a4f949 100644
3325     --- a/drivers/usb/gadget/composite.c
3326     +++ b/drivers/usb/gadget/composite.c
3327     @@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
3328     val = CONFIG_USB_GADGET_VBUS_DRAW;
3329     if (!val)
3330     return 0;
3331     - switch (speed) {
3332     - case USB_SPEED_SUPER:
3333     - return DIV_ROUND_UP(val, 8);
3334     - default:
3335     + if (speed < USB_SPEED_SUPER)
3336     return DIV_ROUND_UP(val, 2);
3337     - }
3338     + else
3339     + return DIV_ROUND_UP(val, 8);
3340     }
3341    
3342     static int config_buf(struct usb_configuration *config,
3343     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3344     index 7a3a29e5e9d2..af92b2576fe9 100644
3345     --- a/drivers/usb/host/xhci-hub.c
3346     +++ b/drivers/usb/host/xhci-hub.c
3347     @@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
3348     static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3349     u16 wLength)
3350     {
3351     + struct xhci_port_cap *port_cap = NULL;
3352     int i, ssa_count;
3353     u32 temp;
3354     u16 desc_size, ssp_cap_size, ssa_size = 0;
3355     @@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3356     ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
3357    
3358     /* does xhci support USB 3.1 Enhanced SuperSpeed */
3359     - if (xhci->usb3_rhub.min_rev >= 0x01) {
3360     + for (i = 0; i < xhci->num_port_caps; i++) {
3361     + if (xhci->port_caps[i].maj_rev == 0x03 &&
3362     + xhci->port_caps[i].min_rev >= 0x01) {
3363     + usb3_1 = true;
3364     + port_cap = &xhci->port_caps[i];
3365     + break;
3366     + }
3367     + }
3368     +
3369     + if (usb3_1) {
3370     /* does xhci provide a PSI table for SSA speed attributes? */
3371     - if (xhci->usb3_rhub.psi_count) {
3372     + if (port_cap->psi_count) {
3373     /* two SSA entries for each unique PSI ID, RX and TX */
3374     - ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
3375     + ssa_count = port_cap->psi_uid_count * 2;
3376     ssa_size = ssa_count * sizeof(u32);
3377     ssp_cap_size -= 16; /* skip copying the default SSA */
3378     }
3379     desc_size += ssp_cap_size;
3380     - usb3_1 = true;
3381     }
3382     memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
3383    
3384     @@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3385     }
3386    
3387     /* If PSI table exists, add the custom speed attributes from it */
3388     - if (usb3_1 && xhci->usb3_rhub.psi_count) {
3389     + if (usb3_1 && port_cap->psi_count) {
3390     u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
3391     int offset;
3392    
3393     @@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3394    
3395     /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
3396     bm_attrib = (ssa_count - 1) & 0x1f;
3397     - bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
3398     + bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
3399     put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
3400    
3401     if (wLength < desc_size + ssa_size)
3402     @@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3403     * USB 3.1 requires two SSA entries (RX and TX) for every link
3404     */
3405     offset = desc_size;
3406     - for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
3407     - psi = xhci->usb3_rhub.psi[i];
3408     + for (i = 0; i < port_cap->psi_count; i++) {
3409     + psi = port_cap->psi[i];
3410     psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
3411     psi_exp = XHCI_EXT_PORT_PSIE(psi);
3412     psi_mant = XHCI_EXT_PORT_PSIM(psi);
3413     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3414     index 3b1388fa2f36..884c601bfa15 100644
3415     --- a/drivers/usb/host/xhci-mem.c
3416     +++ b/drivers/usb/host/xhci-mem.c
3417     @@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
3418     /* Allow 3 retries for everything but isoc, set CErr = 3 */
3419     if (!usb_endpoint_xfer_isoc(&ep->desc))
3420     err_count = 3;
3421     - /* Some devices get this wrong */
3422     - if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
3423     - max_packet = 512;
3424     + /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
3425     + if (usb_endpoint_xfer_bulk(&ep->desc)) {
3426     + if (udev->speed == USB_SPEED_HIGH)
3427     + max_packet = 512;
3428     + if (udev->speed == USB_SPEED_FULL) {
3429     + max_packet = rounddown_pow_of_two(max_packet);
3430     + max_packet = clamp_val(max_packet, 8, 64);
3431     + }
3432     + }
3433     /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
3434     if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
3435     avg_trb_len = 8;
3436     @@ -1909,17 +1915,17 @@ no_bw:
3437     xhci->usb3_rhub.num_ports = 0;
3438     xhci->num_active_eps = 0;
3439     kfree(xhci->usb2_rhub.ports);
3440     - kfree(xhci->usb2_rhub.psi);
3441     kfree(xhci->usb3_rhub.ports);
3442     - kfree(xhci->usb3_rhub.psi);
3443     kfree(xhci->hw_ports);
3444     kfree(xhci->rh_bw);
3445     kfree(xhci->ext_caps);
3446     + for (i = 0; i < xhci->num_port_caps; i++)
3447     + kfree(xhci->port_caps[i].psi);
3448     + kfree(xhci->port_caps);
3449     + xhci->num_port_caps = 0;
3450    
3451     xhci->usb2_rhub.ports = NULL;
3452     - xhci->usb2_rhub.psi = NULL;
3453     xhci->usb3_rhub.ports = NULL;
3454     - xhci->usb3_rhub.psi = NULL;
3455     xhci->hw_ports = NULL;
3456     xhci->rh_bw = NULL;
3457     xhci->ext_caps = NULL;
3458     @@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3459     u8 major_revision, minor_revision;
3460     struct xhci_hub *rhub;
3461     struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
3462     + struct xhci_port_cap *port_cap;
3463    
3464     temp = readl(addr);
3465     major_revision = XHCI_EXT_PORT_MAJOR(temp);
3466     @@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3467     /* WTF? "Valid values are ‘1’ to MaxPorts" */
3468     return;
3469    
3470     - rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
3471     - if (rhub->psi_count) {
3472     - rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
3473     - GFP_KERNEL, dev_to_node(dev));
3474     - if (!rhub->psi)
3475     - rhub->psi_count = 0;
3476     + port_cap = &xhci->port_caps[xhci->num_port_caps++];
3477     + if (xhci->num_port_caps > max_caps)
3478     + return;
3479     +
3480     + port_cap->maj_rev = major_revision;
3481     + port_cap->min_rev = minor_revision;
3482     + port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
3483    
3484     - rhub->psi_uid_count++;
3485     - for (i = 0; i < rhub->psi_count; i++) {
3486     - rhub->psi[i] = readl(addr + 4 + i);
3487     + if (port_cap->psi_count) {
3488     + port_cap->psi = kcalloc_node(port_cap->psi_count,
3489     + sizeof(*port_cap->psi),
3490     + GFP_KERNEL, dev_to_node(dev));
3491     + if (!port_cap->psi)
3492     + port_cap->psi_count = 0;
3493     +
3494     + port_cap->psi_uid_count++;
3495     + for (i = 0; i < port_cap->psi_count; i++) {
3496     + port_cap->psi[i] = readl(addr + 4 + i);
3497    
3498     /* count unique ID values, two consecutive entries can
3499     * have the same ID if link is assymetric
3500     */
3501     - if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
3502     - XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
3503     - rhub->psi_uid_count++;
3504     + if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
3505     + XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
3506     + port_cap->psi_uid_count++;
3507    
3508     xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
3509     - XHCI_EXT_PORT_PSIV(rhub->psi[i]),
3510     - XHCI_EXT_PORT_PSIE(rhub->psi[i]),
3511     - XHCI_EXT_PORT_PLT(rhub->psi[i]),
3512     - XHCI_EXT_PORT_PFD(rhub->psi[i]),
3513     - XHCI_EXT_PORT_LP(rhub->psi[i]),
3514     - XHCI_EXT_PORT_PSIM(rhub->psi[i]));
3515     + XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
3516     + XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
3517     + XHCI_EXT_PORT_PLT(port_cap->psi[i]),
3518     + XHCI_EXT_PORT_PFD(port_cap->psi[i]),
3519     + XHCI_EXT_PORT_LP(port_cap->psi[i]),
3520     + XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
3521     }
3522     }
3523     /* cache usb2 port capabilities */
3524     @@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3525     continue;
3526     }
3527     hw_port->rhub = rhub;
3528     + hw_port->port_cap = port_cap;
3529     rhub->num_ports++;
3530     }
3531     /* FIXME: Should we disable ports not in the Extended Capabilities? */
3532     @@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
3533     if (!xhci->ext_caps)
3534     return -ENOMEM;
3535    
3536     + xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
3537     + flags, dev_to_node(dev));
3538     + if (!xhci->port_caps)
3539     + return -ENOMEM;
3540     +
3541     offset = cap_start;
3542    
3543     while (offset) {
3544     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3545     index 4917c5b033fa..5e9b537df631 100644
3546     --- a/drivers/usb/host/xhci-pci.c
3547     +++ b/drivers/usb/host/xhci-pci.c
3548     @@ -49,6 +49,7 @@
3549     #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
3550     #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
3551     #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
3552     +#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
3553    
3554     #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
3555     #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
3556     @@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3557     pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
3558     pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
3559     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
3560     - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
3561     + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
3562     + pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
3563     xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3564     }
3565     if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3566     @@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
3567     if (!usb_hcd_is_primary_hcd(hcd))
3568     return 0;
3569    
3570     + if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3571     + xhci_pme_acpi_rtd3_enable(pdev);
3572     +
3573     xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
3574    
3575     /* Find any debug ports */
3576     @@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3577     HCC_MAX_PSA(xhci->hcc_params) >= 4)
3578     xhci->shared_hcd->can_do_streams = 1;
3579    
3580     - if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3581     - xhci_pme_acpi_rtd3_enable(dev);
3582     -
3583     /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
3584     pm_runtime_put_noidle(&dev->dev);
3585    
3586     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3587     index 4a2fe56940bd..f7a190fb2353 100644
3588     --- a/drivers/usb/host/xhci-ring.c
3589     +++ b/drivers/usb/host/xhci-ring.c
3590     @@ -2740,6 +2740,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
3591     return 1;
3592     }
3593    
3594     +/*
3595     + * Update Event Ring Dequeue Pointer:
3596     + * - When all events have finished
3597     + * - To avoid "Event Ring Full Error" condition
3598     + */
3599     +static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
3600     + union xhci_trb *event_ring_deq)
3601     +{
3602     + u64 temp_64;
3603     + dma_addr_t deq;
3604     +
3605     + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3606     + /* If necessary, update the HW's version of the event ring deq ptr. */
3607     + if (event_ring_deq != xhci->event_ring->dequeue) {
3608     + deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
3609     + xhci->event_ring->dequeue);
3610     + if (deq == 0)
3611     + xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
3612     + /*
3613     + * Per 4.9.4, Software writes to the ERDP register shall
3614     + * always advance the Event Ring Dequeue Pointer value.
3615     + */
3616     + if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
3617     + ((u64) deq & (u64) ~ERST_PTR_MASK))
3618     + return;
3619     +
3620     + /* Update HC event ring dequeue pointer */
3621     + temp_64 &= ERST_PTR_MASK;
3622     + temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3623     + }
3624     +
3625     + /* Clear the event handler busy flag (RW1C) */
3626     + temp_64 |= ERST_EHB;
3627     + xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3628     +}
3629     +
3630     /*
3631     * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3632     * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
3633     @@ -2751,9 +2787,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
3634     union xhci_trb *event_ring_deq;
3635     irqreturn_t ret = IRQ_NONE;
3636     unsigned long flags;
3637     - dma_addr_t deq;
3638     u64 temp_64;
3639     u32 status;
3640     + int event_loop = 0;
3641    
3642     spin_lock_irqsave(&xhci->lock, flags);
3643     /* Check if the xHC generated the interrupt, or the irq is shared */
3644     @@ -2807,24 +2843,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
3645     /* FIXME this should be a delayed service routine
3646     * that clears the EHB.
3647     */
3648     - while (xhci_handle_event(xhci) > 0) {}
3649     -
3650     - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3651     - /* If necessary, update the HW's version of the event ring deq ptr. */
3652     - if (event_ring_deq != xhci->event_ring->dequeue) {
3653     - deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
3654     - xhci->event_ring->dequeue);
3655     - if (deq == 0)
3656     - xhci_warn(xhci, "WARN something wrong with SW event "
3657     - "ring dequeue ptr.\n");
3658     - /* Update HC event ring dequeue pointer */
3659     - temp_64 &= ERST_PTR_MASK;
3660     - temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3661     + while (xhci_handle_event(xhci) > 0) {
3662     + if (event_loop++ < TRBS_PER_SEGMENT / 2)
3663     + continue;
3664     + xhci_update_erst_dequeue(xhci, event_ring_deq);
3665     + event_loop = 0;
3666     }
3667    
3668     - /* Clear the event handler busy flag (RW1C); event ring is empty. */
3669     - temp_64 |= ERST_EHB;
3670     - xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3671     + xhci_update_erst_dequeue(xhci, event_ring_deq);
3672     ret = IRQ_HANDLED;
3673    
3674     out:
3675     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3676     index 973d665052a2..98b98a0cd2a8 100644
3677     --- a/drivers/usb/host/xhci.h
3678     +++ b/drivers/usb/host/xhci.h
3679     @@ -1702,12 +1702,20 @@ struct xhci_bus_state {
3680     * Intel Lynx Point LP xHCI host.
3681     */
3682     #define XHCI_MAX_REXIT_TIMEOUT_MS 20
3683     +struct xhci_port_cap {
3684     + u32 *psi; /* array of protocol speed ID entries */
3685     + u8 psi_count;
3686     + u8 psi_uid_count;
3687     + u8 maj_rev;
3688     + u8 min_rev;
3689     +};
3690    
3691     struct xhci_port {
3692     __le32 __iomem *addr;
3693     int hw_portnum;
3694     int hcd_portnum;
3695     struct xhci_hub *rhub;
3696     + struct xhci_port_cap *port_cap;
3697     };
3698    
3699     struct xhci_hub {
3700     @@ -1719,9 +1727,6 @@ struct xhci_hub {
3701     /* supported prococol extended capabiliy values */
3702     u8 maj_rev;
3703     u8 min_rev;
3704     - u32 *psi; /* array of protocol speed ID entries */
3705     - u8 psi_count;
3706     - u8 psi_uid_count;
3707     };
3708    
3709     /* There is one xhci_hcd structure per controller */
3710     @@ -1880,6 +1885,9 @@ struct xhci_hcd {
3711     /* cached usb2 extened protocol capabilites */
3712     u32 *ext_caps;
3713     unsigned int num_ext_caps;
3714     + /* cached extended protocol port capabilities */
3715     + struct xhci_port_cap *port_caps;
3716     + unsigned int num_port_caps;
3717     /* Compliance Mode Recovery Data */
3718     struct timer_list comp_mode_recovery_timer;
3719     u32 port_status_u0;
3720     diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
3721     index dce44fbf031f..dce20301e367 100644
3722     --- a/drivers/usb/misc/iowarrior.c
3723     +++ b/drivers/usb/misc/iowarrior.c
3724     @@ -33,6 +33,14 @@
3725     #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
3726     /* full speed iowarrior */
3727     #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
3728     +/* fuller speed iowarrior */
3729     +#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
3730     +#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
3731     +#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
3732     +
3733     +/* OEMed devices */
3734     +#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
3735     +#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
3736    
3737     /* Get a minor range for your devices from the usb maintainer */
3738     #ifdef CONFIG_USB_DYNAMIC_MINORS
3739     @@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
3740     {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
3741     {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
3742     {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
3743     + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
3744     + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
3745     + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
3746     + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
3747     + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
3748     {} /* Terminating entry */
3749     };
3750     MODULE_DEVICE_TABLE(usb, iowarrior_ids);
3751     @@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
3752     }
3753     switch (dev->product_id) {
3754     case USB_DEVICE_ID_CODEMERCS_IOW24:
3755     + case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
3756     case USB_DEVICE_ID_CODEMERCS_IOWPV1:
3757     case USB_DEVICE_ID_CODEMERCS_IOWPV2:
3758     case USB_DEVICE_ID_CODEMERCS_IOW40:
3759     @@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
3760     goto exit;
3761     break;
3762     case USB_DEVICE_ID_CODEMERCS_IOW56:
3763     + case USB_DEVICE_ID_CODEMERCS_IOW56AM:
3764     + case USB_DEVICE_ID_CODEMERCS_IOW28:
3765     + case USB_DEVICE_ID_CODEMERCS_IOW28L:
3766     + case USB_DEVICE_ID_CODEMERCS_IOW100:
3767     /* The IOW56 uses asynchronous IO and more urbs */
3768     if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
3769     /* Wait until we are below the limit for submitted urbs */
3770     @@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
3771     switch (cmd) {
3772     case IOW_WRITE:
3773     if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
3774     + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
3775     dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
3776     dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
3777     dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
3778     @@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
3779     goto error;
3780     }
3781    
3782     - if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
3783     + if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
3784     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
3785     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
3786     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
3787     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
3788     res = usb_find_last_int_out_endpoint(iface_desc,
3789     &dev->int_out_endpoint);
3790     if (res) {
3791     @@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
3792     /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
3793     dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
3794     if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
3795     - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
3796     + ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
3797     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
3798     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
3799     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
3800     + (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
3801     /* IOWarrior56 has wMaxPacketSize different from report size */
3802     dev->report_size = 7;
3803    
3804     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
3805     index 475b9c692827..bb2198496f42 100644
3806     --- a/drivers/usb/storage/uas.c
3807     +++ b/drivers/usb/storage/uas.c
3808     @@ -45,6 +45,7 @@ struct uas_dev_info {
3809     struct scsi_cmnd *cmnd[MAX_CMNDS];
3810     spinlock_t lock;
3811     struct work_struct work;
3812     + struct work_struct scan_work; /* for async scanning */
3813     };
3814    
3815     enum {
3816     @@ -114,6 +115,17 @@ out:
3817     spin_unlock_irqrestore(&devinfo->lock, flags);
3818     }
3819    
3820     +static void uas_scan_work(struct work_struct *work)
3821     +{
3822     + struct uas_dev_info *devinfo =
3823     + container_of(work, struct uas_dev_info, scan_work);
3824     + struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
3825     +
3826     + dev_dbg(&devinfo->intf->dev, "starting scan\n");
3827     + scsi_scan_host(shost);
3828     + dev_dbg(&devinfo->intf->dev, "scan complete\n");
3829     +}
3830     +
3831     static void uas_add_work(struct uas_cmd_info *cmdinfo)
3832     {
3833     struct scsi_pointer *scp = (void *)cmdinfo;
3834     @@ -983,6 +995,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
3835     init_usb_anchor(&devinfo->data_urbs);
3836     spin_lock_init(&devinfo->lock);
3837     INIT_WORK(&devinfo->work, uas_do_work);
3838     + INIT_WORK(&devinfo->scan_work, uas_scan_work);
3839    
3840     result = uas_configure_endpoints(devinfo);
3841     if (result)
3842     @@ -999,7 +1012,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
3843     if (result)
3844     goto free_streams;
3845    
3846     - scsi_scan_host(shost);
3847     + /* Submit the delayed_work for SCSI-device scanning */
3848     + schedule_work(&devinfo->scan_work);
3849     +
3850     return result;
3851    
3852     free_streams:
3853     @@ -1167,6 +1182,12 @@ static void uas_disconnect(struct usb_interface *intf)
3854     usb_kill_anchored_urbs(&devinfo->data_urbs);
3855     uas_zap_pending(devinfo, DID_NO_CONNECT);
3856    
3857     + /*
3858     + * Prevent SCSI scanning (if it hasn't started yet)
3859     + * or wait for the SCSI-scanning routine to stop.
3860     + */
3861     + cancel_work_sync(&devinfo->scan_work);
3862     +
3863     scsi_remove_host(shost);
3864     uas_free_streams(devinfo);
3865     scsi_host_put(shost);
3866     diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
3867     index 8b9919c26095..456a164364a2 100644
3868     --- a/drivers/xen/preempt.c
3869     +++ b/drivers/xen/preempt.c
3870     @@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
3871     * cpu.
3872     */
3873     __this_cpu_write(xen_in_preemptible_hcall, false);
3874     - _cond_resched();
3875     + local_irq_enable();
3876     + cond_resched();
3877     + local_irq_disable();
3878     __this_cpu_write(xen_in_preemptible_hcall, true);
3879     }
3880     }
3881     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3882     index b0ccca5d08b5..5cdd1b51285b 100644
3883     --- a/fs/btrfs/disk-io.c
3884     +++ b/fs/btrfs/disk-io.c
3885     @@ -3203,6 +3203,7 @@ retry_root_backup:
3886     if (IS_ERR(fs_info->fs_root)) {
3887     err = PTR_ERR(fs_info->fs_root);
3888     btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3889     + fs_info->fs_root = NULL;
3890     goto fail_qgroup;
3891     }
3892    
3893     @@ -4293,6 +4294,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3894     cond_resched();
3895     spin_lock(&delayed_refs->lock);
3896     }
3897     + btrfs_qgroup_destroy_extent_records(trans);
3898    
3899     spin_unlock(&delayed_refs->lock);
3900    
3901     @@ -4518,7 +4520,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3902     wake_up(&fs_info->transaction_wait);
3903    
3904     btrfs_destroy_delayed_inodes(fs_info);
3905     - btrfs_assert_delayed_root_empty(fs_info);
3906    
3907     btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
3908     EXTENT_DIRTY);
3909     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3910     index dc50605ecbda..47ecf7216b3e 100644
3911     --- a/fs/btrfs/extent-tree.c
3912     +++ b/fs/btrfs/extent-tree.c
3913     @@ -4411,6 +4411,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
3914    
3915     ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
3916     offset, ins, 1);
3917     + if (ret)
3918     + btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
3919     btrfs_put_block_group(block_group);
3920     return ret;
3921     }
3922     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3923     index b83eef445db3..50feb01f27f3 100644
3924     --- a/fs/btrfs/inode.c
3925     +++ b/fs/btrfs/inode.c
3926     @@ -4734,6 +4734,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3927     u64 bytes_deleted = 0;
3928     bool be_nice = false;
3929     bool should_throttle = false;
3930     + const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
3931     + struct extent_state *cached_state = NULL;
3932    
3933     BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3934    
3935     @@ -4750,6 +4752,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3936     return -ENOMEM;
3937     path->reada = READA_BACK;
3938    
3939     + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3940     + lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
3941     + &cached_state);
3942     +
3943     /*
3944     * We want to drop from the next block forward in case this new size is
3945     * not block aligned since we will be keeping the last block of the
3946     @@ -4786,7 +4792,6 @@ search_again:
3947     goto out;
3948     }
3949    
3950     - path->leave_spinning = 1;
3951     ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3952     if (ret < 0)
3953     goto out;
3954     @@ -4938,7 +4943,6 @@ delete:
3955     root == fs_info->tree_root)) {
3956     struct btrfs_ref ref = { 0 };
3957    
3958     - btrfs_set_path_blocking(path);
3959     bytes_deleted += extent_num_bytes;
3960    
3961     btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
3962     @@ -5014,6 +5018,8 @@ out:
3963     if (!ret && last_size > new_size)
3964     last_size = new_size;
3965     btrfs_ordered_update_i_size(inode, last_size, NULL);
3966     + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
3967     + (u64)-1, &cached_state);
3968     }
3969    
3970     btrfs_free_path(path);
3971     @@ -10464,6 +10470,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
3972     struct btrfs_root *root = BTRFS_I(inode)->root;
3973     struct btrfs_key ins;
3974     u64 cur_offset = start;
3975     + u64 clear_offset = start;
3976     u64 i_size;
3977     u64 cur_bytes;
3978     u64 last_alloc = (u64)-1;
3979     @@ -10498,6 +10505,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
3980     btrfs_end_transaction(trans);
3981     break;
3982     }
3983     +
3984     + /*
3985     + * We've reserved this space, and thus converted it from
3986     + * ->bytes_may_use to ->bytes_reserved. Any error that happens
3987     + * from here on out we will only need to clear our reservation
3988     + * for the remaining unreserved area, so advance our
3989     + * clear_offset by our extent size.
3990     + */
3991     + clear_offset += ins.offset;
3992     btrfs_dec_block_group_reservations(fs_info, ins.objectid);
3993    
3994     last_alloc = ins.offset;
3995     @@ -10578,9 +10594,9 @@ next:
3996     if (own_trans)
3997     btrfs_end_transaction(trans);
3998     }
3999     - if (cur_offset < end)
4000     - btrfs_free_reserved_data_space(inode, NULL, cur_offset,
4001     - end - cur_offset + 1);
4002     + if (clear_offset < end)
4003     + btrfs_free_reserved_data_space(inode, NULL, clear_offset,
4004     + end - clear_offset + 1);
4005     return ret;
4006     }
4007    
4008     diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
4009     index 6240a5a1f2c0..00e1ef4f7979 100644
4010     --- a/fs/btrfs/ordered-data.c
4011     +++ b/fs/btrfs/ordered-data.c
4012     @@ -690,10 +690,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
4013     }
4014     btrfs_start_ordered_extent(inode, ordered, 1);
4015     end = ordered->file_offset;
4016     + /*
4017     + * If the ordered extent had an error save the error but don't
4018     + * exit without waiting first for all other ordered extents in
4019     + * the range to complete.
4020     + */
4021     if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
4022     ret = -EIO;
4023     btrfs_put_ordered_extent(ordered);
4024     - if (ret || end == 0 || end == start)
4025     + if (end == 0 || end == start)
4026     break;
4027     end--;
4028     }
4029     diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4030     index 50517221638a..286c8c11c8d3 100644
4031     --- a/fs/btrfs/qgroup.c
4032     +++ b/fs/btrfs/qgroup.c
4033     @@ -4018,3 +4018,16 @@ out:
4034     }
4035     return ret;
4036     }
4037     +
4038     +void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4039     +{
4040     + struct btrfs_qgroup_extent_record *entry;
4041     + struct btrfs_qgroup_extent_record *next;
4042     + struct rb_root *root;
4043     +
4044     + root = &trans->delayed_refs.dirty_extent_root;
4045     + rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4046     + ulist_free(entry->old_roots);
4047     + kfree(entry);
4048     + }
4049     +}
4050     diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
4051     index 46ba7bd2961c..17e8ac992c50 100644
4052     --- a/fs/btrfs/qgroup.h
4053     +++ b/fs/btrfs/qgroup.h
4054     @@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4055     u64 last_snapshot);
4056     int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4057     struct btrfs_root *root, struct extent_buffer *eb);
4058     +void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
4059    
4060     #endif
4061     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
4062     index ceffec752234..98b6903e3938 100644
4063     --- a/fs/btrfs/transaction.c
4064     +++ b/fs/btrfs/transaction.c
4065     @@ -51,6 +51,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
4066     BUG_ON(!list_empty(&transaction->list));
4067     WARN_ON(!RB_EMPTY_ROOT(
4068     &transaction->delayed_refs.href_root.rb_root));
4069     + WARN_ON(!RB_EMPTY_ROOT(
4070     + &transaction->delayed_refs.dirty_extent_root));
4071     if (transaction->delayed_refs.pending_csums)
4072     btrfs_err(transaction->fs_info,
4073     "pending csums is %llu",
4074     diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
4075     index f91db24bbf3b..a064b408d841 100644
4076     --- a/fs/ecryptfs/crypto.c
4077     +++ b/fs/ecryptfs/crypto.c
4078     @@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
4079     struct extent_crypt_result ecr;
4080     int rc = 0;
4081    
4082     - BUG_ON(!crypt_stat || !crypt_stat->tfm
4083     - || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
4084     + if (!crypt_stat || !crypt_stat->tfm
4085     + || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
4086     + return -EINVAL;
4087     +
4088     if (unlikely(ecryptfs_verbosity > 0)) {
4089     ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
4090     crypt_stat->key_size);
4091     diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
4092     index 216fbe6a4837..4dc09638de8f 100644
4093     --- a/fs/ecryptfs/keystore.c
4094     +++ b/fs/ecryptfs/keystore.c
4095     @@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
4096     printk(KERN_WARNING "Tag 1 packet contains key larger "
4097     "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
4098     rc = -EINVAL;
4099     - goto out;
4100     + goto out_free;
4101     }
4102     memcpy((*new_auth_tok)->session_key.encrypted_key,
4103     &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
4104     diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
4105     index d668e60b85b5..c05ca39aa449 100644
4106     --- a/fs/ecryptfs/messaging.c
4107     +++ b/fs/ecryptfs/messaging.c
4108     @@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
4109     * ecryptfs_message_buf_len),
4110     GFP_KERNEL);
4111     if (!ecryptfs_msg_ctx_arr) {
4112     + kfree(ecryptfs_daemon_hash);
4113     rc = -ENOMEM;
4114     goto out;
4115     }
4116     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
4117     index 0b202e00d93f..5aba67a504cf 100644
4118     --- a/fs/ext4/balloc.c
4119     +++ b/fs/ext4/balloc.c
4120     @@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4121     ext4_group_t ngroups = ext4_get_groups_count(sb);
4122     struct ext4_group_desc *desc;
4123     struct ext4_sb_info *sbi = EXT4_SB(sb);
4124     + struct buffer_head *bh_p;
4125    
4126     if (block_group >= ngroups) {
4127     ext4_error(sb, "block_group >= groups_count - block_group = %u,"
4128     @@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4129    
4130     group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
4131     offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
4132     - if (!sbi->s_group_desc[group_desc]) {
4133     + bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
4134     + /*
4135     + * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
4136     + * the pointer being dereferenced won't be dereferenced again. By
4137     + * looking at the usage in add_new_gdb() the value isn't modified,
4138     + * just the pointer, and so it remains valid.
4139     + */
4140     + if (!bh_p) {
4141     ext4_error(sb, "Group descriptor not loaded - "
4142     "block_group = %u, group_desc = %u, desc = %u",
4143     block_group, group_desc, offset);
4144     @@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4145     }
4146    
4147     desc = (struct ext4_group_desc *)(
4148     - (__u8 *)sbi->s_group_desc[group_desc]->b_data +
4149     + (__u8 *)bh_p->b_data +
4150     offset * EXT4_DESC_SIZE(sb));
4151     if (bh)
4152     - *bh = sbi->s_group_desc[group_desc];
4153     + *bh = bh_p;
4154     return desc;
4155     }
4156    
4157     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4158     index e2f65b565c1f..d576addfdd03 100644
4159     --- a/fs/ext4/ext4.h
4160     +++ b/fs/ext4/ext4.h
4161     @@ -1396,7 +1396,7 @@ struct ext4_sb_info {
4162     loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
4163     struct buffer_head * s_sbh; /* Buffer containing the super block */
4164     struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
4165     - struct buffer_head **s_group_desc;
4166     + struct buffer_head * __rcu *s_group_desc;
4167     unsigned int s_mount_opt;
4168     unsigned int s_mount_opt2;
4169     unsigned int s_mount_flags;
4170     @@ -1458,7 +1458,7 @@ struct ext4_sb_info {
4171     #endif
4172    
4173     /* for buddy allocator */
4174     - struct ext4_group_info ***s_group_info;
4175     + struct ext4_group_info ** __rcu *s_group_info;
4176     struct inode *s_buddy_cache;
4177     spinlock_t s_md_lock;
4178     unsigned short *s_mb_offsets;
4179     @@ -1508,7 +1508,7 @@ struct ext4_sb_info {
4180     unsigned int s_extent_max_zeroout_kb;
4181    
4182     unsigned int s_log_groups_per_flex;
4183     - struct flex_groups *s_flex_groups;
4184     + struct flex_groups * __rcu *s_flex_groups;
4185     ext4_group_t s_flex_groups_allocated;
4186    
4187     /* workqueue for reserved extent conversions (buffered io) */
4188     @@ -1548,8 +1548,11 @@ struct ext4_sb_info {
4189     struct ratelimit_state s_warning_ratelimit_state;
4190     struct ratelimit_state s_msg_ratelimit_state;
4191    
4192     - /* Barrier between changing inodes' journal flags and writepages ops. */
4193     - struct percpu_rw_semaphore s_journal_flag_rwsem;
4194     + /*
4195     + * Barrier between writepages ops and changing any inode's JOURNAL_DATA
4196     + * or EXTENTS flag.
4197     + */
4198     + struct percpu_rw_semaphore s_writepages_rwsem;
4199     struct dax_device *s_daxdev;
4200     };
4201    
4202     @@ -1569,6 +1572,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
4203     ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
4204     }
4205    
4206     +/*
4207     + * Returns: sbi->field[index]
4208     + * Used to access an array element from the following sbi fields which require
4209     + * rcu protection to avoid dereferencing an invalid pointer due to reassignment
4210     + * - s_group_desc
4211     + * - s_group_info
4212     + * - s_flex_group
4213     + */
4214     +#define sbi_array_rcu_deref(sbi, field, index) \
4215     +({ \
4216     + typeof(*((sbi)->field)) _v; \
4217     + rcu_read_lock(); \
4218     + _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
4219     + rcu_read_unlock(); \
4220     + _v; \
4221     +})
4222     +
4223     /*
4224     * Inode dynamic state flags
4225     */
4226     @@ -2666,6 +2686,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
4227     extern bool ext4_empty_dir(struct inode *inode);
4228    
4229     /* resize.c */
4230     +extern void ext4_kvfree_array_rcu(void *to_free);
4231     extern int ext4_group_add(struct super_block *sb,
4232     struct ext4_new_group_data *input);
4233     extern int ext4_group_extend(struct super_block *sb,
4234     @@ -2913,13 +2934,13 @@ static inline
4235     struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
4236     ext4_group_t group)
4237     {
4238     - struct ext4_group_info ***grp_info;
4239     + struct ext4_group_info **grp_info;
4240     long indexv, indexh;
4241     BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
4242     - grp_info = EXT4_SB(sb)->s_group_info;
4243     indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
4244     indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
4245     - return grp_info[indexv][indexh];
4246     + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
4247     + return grp_info[indexh];
4248     }
4249    
4250     /*
4251     @@ -2969,7 +2990,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
4252     !inode_is_locked(inode));
4253     down_write(&EXT4_I(inode)->i_data_sem);
4254     if (newsize > EXT4_I(inode)->i_disksize)
4255     - EXT4_I(inode)->i_disksize = newsize;
4256     + WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
4257     up_write(&EXT4_I(inode)->i_data_sem);
4258     }
4259    
4260     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
4261     index 564e2ceb8417..a6288730210e 100644
4262     --- a/fs/ext4/ialloc.c
4263     +++ b/fs/ext4/ialloc.c
4264     @@ -325,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
4265    
4266     percpu_counter_inc(&sbi->s_freeinodes_counter);
4267     if (sbi->s_log_groups_per_flex) {
4268     - ext4_group_t f = ext4_flex_group(sbi, block_group);
4269     + struct flex_groups *fg;
4270    
4271     - atomic_inc(&sbi->s_flex_groups[f].free_inodes);
4272     + fg = sbi_array_rcu_deref(sbi, s_flex_groups,
4273     + ext4_flex_group(sbi, block_group));
4274     + atomic_inc(&fg->free_inodes);
4275     if (is_directory)
4276     - atomic_dec(&sbi->s_flex_groups[f].used_dirs);
4277     + atomic_dec(&fg->used_dirs);
4278     }
4279     BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
4280     fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
4281     @@ -365,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
4282     int flex_size, struct orlov_stats *stats)
4283     {
4284     struct ext4_group_desc *desc;
4285     - struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
4286    
4287     if (flex_size > 1) {
4288     - stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
4289     - stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
4290     - stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
4291     + struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
4292     + s_flex_groups, g);
4293     + stats->free_inodes = atomic_read(&fg->free_inodes);
4294     + stats->free_clusters = atomic64_read(&fg->free_clusters);
4295     + stats->used_dirs = atomic_read(&fg->used_dirs);
4296     return;
4297     }
4298    
4299     @@ -1051,7 +1054,8 @@ got:
4300     if (sbi->s_log_groups_per_flex) {
4301     ext4_group_t f = ext4_flex_group(sbi, group);
4302    
4303     - atomic_inc(&sbi->s_flex_groups[f].used_dirs);
4304     + atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
4305     + f)->used_dirs);
4306     }
4307     }
4308     if (ext4_has_group_desc_csum(sb)) {
4309     @@ -1074,7 +1078,8 @@ got:
4310    
4311     if (sbi->s_log_groups_per_flex) {
4312     flex_group = ext4_flex_group(sbi, group);
4313     - atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
4314     + atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
4315     + flex_group)->free_inodes);
4316     }
4317    
4318     inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
4319     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4320     index 76a38ef5f226..70ef4a714b33 100644
4321     --- a/fs/ext4/inode.c
4322     +++ b/fs/ext4/inode.c
4323     @@ -2573,7 +2573,7 @@ update_disksize:
4324     * truncate are avoided by checking i_size under i_data_sem.
4325     */
4326     disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
4327     - if (disksize > EXT4_I(inode)->i_disksize) {
4328     + if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
4329     int err2;
4330     loff_t i_size;
4331    
4332     @@ -2734,7 +2734,7 @@ static int ext4_writepages(struct address_space *mapping,
4333     if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4334     return -EIO;
4335    
4336     - percpu_down_read(&sbi->s_journal_flag_rwsem);
4337     + percpu_down_read(&sbi->s_writepages_rwsem);
4338     trace_ext4_writepages(inode, wbc);
4339    
4340     /*
4341     @@ -2955,7 +2955,7 @@ unplug:
4342     out_writepages:
4343     trace_ext4_writepages_result(inode, wbc, ret,
4344     nr_to_write - wbc->nr_to_write);
4345     - percpu_up_read(&sbi->s_journal_flag_rwsem);
4346     + percpu_up_read(&sbi->s_writepages_rwsem);
4347     return ret;
4348     }
4349    
4350     @@ -2970,13 +2970,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
4351     if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4352     return -EIO;
4353    
4354     - percpu_down_read(&sbi->s_journal_flag_rwsem);
4355     + percpu_down_read(&sbi->s_writepages_rwsem);
4356     trace_ext4_writepages(inode, wbc);
4357    
4358     ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
4359     trace_ext4_writepages_result(inode, wbc, ret,
4360     nr_to_write - wbc->nr_to_write);
4361     - percpu_up_read(&sbi->s_journal_flag_rwsem);
4362     + percpu_up_read(&sbi->s_writepages_rwsem);
4363     return ret;
4364     }
4365    
4366     @@ -6185,7 +6185,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4367     }
4368     }
4369    
4370     - percpu_down_write(&sbi->s_journal_flag_rwsem);
4371     + percpu_down_write(&sbi->s_writepages_rwsem);
4372     jbd2_journal_lock_updates(journal);
4373    
4374     /*
4375     @@ -6202,7 +6202,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4376     err = jbd2_journal_flush(journal);
4377     if (err < 0) {
4378     jbd2_journal_unlock_updates(journal);
4379     - percpu_up_write(&sbi->s_journal_flag_rwsem);
4380     + percpu_up_write(&sbi->s_writepages_rwsem);
4381     return err;
4382     }
4383     ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4384     @@ -6210,7 +6210,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4385     ext4_set_aops(inode);
4386    
4387     jbd2_journal_unlock_updates(journal);
4388     - percpu_up_write(&sbi->s_journal_flag_rwsem);
4389     + percpu_up_write(&sbi->s_writepages_rwsem);
4390    
4391     if (val)
4392     up_write(&EXT4_I(inode)->i_mmap_sem);
4393     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
4394     index a3e2767bdf2f..c76ffc259d19 100644
4395     --- a/fs/ext4/mballoc.c
4396     +++ b/fs/ext4/mballoc.c
4397     @@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
4398     {
4399     struct ext4_sb_info *sbi = EXT4_SB(sb);
4400     unsigned size;
4401     - struct ext4_group_info ***new_groupinfo;
4402     + struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
4403    
4404     size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
4405     EXT4_DESC_PER_BLOCK_BITS(sb);
4406     @@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
4407     ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
4408     return -ENOMEM;
4409     }
4410     - if (sbi->s_group_info) {
4411     - memcpy(new_groupinfo, sbi->s_group_info,
4412     + rcu_read_lock();
4413     + old_groupinfo = rcu_dereference(sbi->s_group_info);
4414     + if (old_groupinfo)
4415     + memcpy(new_groupinfo, old_groupinfo,
4416     sbi->s_group_info_size * sizeof(*sbi->s_group_info));
4417     - kvfree(sbi->s_group_info);
4418     - }
4419     - sbi->s_group_info = new_groupinfo;
4420     + rcu_read_unlock();
4421     + rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
4422     sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
4423     + if (old_groupinfo)
4424     + ext4_kvfree_array_rcu(old_groupinfo);
4425     ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
4426     sbi->s_group_info_size);
4427     return 0;
4428     @@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4429     {
4430     int i;
4431     int metalen = 0;
4432     + int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
4433     struct ext4_sb_info *sbi = EXT4_SB(sb);
4434     struct ext4_group_info **meta_group_info;
4435     struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
4436     @@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4437     "for a buddy group");
4438     goto exit_meta_group_info;
4439     }
4440     - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
4441     - meta_group_info;
4442     + rcu_read_lock();
4443     + rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
4444     + rcu_read_unlock();
4445     }
4446    
4447     - meta_group_info =
4448     - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
4449     + meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
4450     i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
4451    
4452     meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
4453     @@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4454     exit_group_info:
4455     /* If a meta_group_info table has been allocated, release it now */
4456     if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
4457     - kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
4458     - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
4459     + struct ext4_group_info ***group_info;
4460     +
4461     + rcu_read_lock();
4462     + group_info = rcu_dereference(sbi->s_group_info);
4463     + kfree(group_info[idx]);
4464     + group_info[idx] = NULL;
4465     + rcu_read_unlock();
4466     }
4467     exit_meta_group_info:
4468     return -ENOMEM;
4469     @@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
4470     struct ext4_sb_info *sbi = EXT4_SB(sb);
4471     int err;
4472     struct ext4_group_desc *desc;
4473     + struct ext4_group_info ***group_info;
4474     struct kmem_cache *cachep;
4475    
4476     err = ext4_mb_alloc_groupinfo(sb, ngroups);
4477     @@ -2507,11 +2517,16 @@ err_freebuddy:
4478     while (i-- > 0)
4479     kmem_cache_free(cachep, ext4_get_group_info(sb, i));
4480     i = sbi->s_group_info_size;
4481     + rcu_read_lock();
4482     + group_info = rcu_dereference(sbi->s_group_info);
4483     while (i-- > 0)
4484     - kfree(sbi->s_group_info[i]);
4485     + kfree(group_info[i]);
4486     + rcu_read_unlock();
4487     iput(sbi->s_buddy_cache);
4488     err_freesgi:
4489     - kvfree(sbi->s_group_info);
4490     + rcu_read_lock();
4491     + kvfree(rcu_dereference(sbi->s_group_info));
4492     + rcu_read_unlock();
4493     return -ENOMEM;
4494     }
4495    
4496     @@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
4497     ext4_group_t ngroups = ext4_get_groups_count(sb);
4498     ext4_group_t i;
4499     int num_meta_group_infos;
4500     - struct ext4_group_info *grinfo;
4501     + struct ext4_group_info *grinfo, ***group_info;
4502     struct ext4_sb_info *sbi = EXT4_SB(sb);
4503     struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
4504    
4505     @@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
4506     num_meta_group_infos = (ngroups +
4507     EXT4_DESC_PER_BLOCK(sb) - 1) >>
4508     EXT4_DESC_PER_BLOCK_BITS(sb);
4509     + rcu_read_lock();
4510     + group_info = rcu_dereference(sbi->s_group_info);
4511     for (i = 0; i < num_meta_group_infos; i++)
4512     - kfree(sbi->s_group_info[i]);
4513     - kvfree(sbi->s_group_info);
4514     + kfree(group_info[i]);
4515     + kvfree(group_info);
4516     + rcu_read_unlock();
4517     }
4518     kfree(sbi->s_mb_offsets);
4519     kfree(sbi->s_mb_maxs);
4520     @@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4521     ext4_group_t flex_group = ext4_flex_group(sbi,
4522     ac->ac_b_ex.fe_group);
4523     atomic64_sub(ac->ac_b_ex.fe_len,
4524     - &sbi->s_flex_groups[flex_group].free_clusters);
4525     + &sbi_array_rcu_deref(sbi, s_flex_groups,
4526     + flex_group)->free_clusters);
4527     }
4528    
4529     err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4530     @@ -4914,7 +4933,8 @@ do_more:
4531     if (sbi->s_log_groups_per_flex) {
4532     ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4533     atomic64_add(count_clusters,
4534     - &sbi->s_flex_groups[flex_group].free_clusters);
4535     + &sbi_array_rcu_deref(sbi, s_flex_groups,
4536     + flex_group)->free_clusters);
4537     }
4538    
4539     /*
4540     @@ -5071,7 +5091,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4541     if (sbi->s_log_groups_per_flex) {
4542     ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4543     atomic64_add(clusters_freed,
4544     - &sbi->s_flex_groups[flex_group].free_clusters);
4545     + &sbi_array_rcu_deref(sbi, s_flex_groups,
4546     + flex_group)->free_clusters);
4547     }
4548    
4549     ext4_mb_unload_buddy(&e4b);
4550     diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
4551     index b1e4d359f73b..be4ee3dcc5cf 100644
4552     --- a/fs/ext4/migrate.c
4553     +++ b/fs/ext4/migrate.c
4554     @@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
4555    
4556     int ext4_ext_migrate(struct inode *inode)
4557     {
4558     + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4559     handle_t *handle;
4560     int retval = 0, i;
4561     __le32 *i_data;
4562     @@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
4563     */
4564     return retval;
4565    
4566     + percpu_down_write(&sbi->s_writepages_rwsem);
4567     +
4568     /*
4569     * Worst case we can touch the allocation bitmaps, a bgd
4570     * block, and a block to link in the orphan list. We do need
4571     @@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
4572    
4573     if (IS_ERR(handle)) {
4574     retval = PTR_ERR(handle);
4575     - return retval;
4576     + goto out_unlock;
4577     }
4578     goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
4579     EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
4580     @@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
4581     if (IS_ERR(tmp_inode)) {
4582     retval = PTR_ERR(tmp_inode);
4583     ext4_journal_stop(handle);
4584     - return retval;
4585     + goto out_unlock;
4586     }
4587     i_size_write(tmp_inode, i_size_read(inode));
4588     /*
4589     @@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
4590     */
4591     ext4_orphan_del(NULL, tmp_inode);
4592     retval = PTR_ERR(handle);
4593     - goto out;
4594     + goto out_tmp_inode;
4595     }
4596    
4597     ei = EXT4_I(inode);
4598     @@ -595,10 +598,11 @@ err_out:
4599     /* Reset the extent details */
4600     ext4_ext_tree_init(handle, tmp_inode);
4601     ext4_journal_stop(handle);
4602     -out:
4603     +out_tmp_inode:
4604     unlock_new_inode(tmp_inode);
4605     iput(tmp_inode);
4606     -
4607     +out_unlock:
4608     + percpu_up_write(&sbi->s_writepages_rwsem);
4609     return retval;
4610     }
4611    
4612     @@ -608,7 +612,8 @@ out:
4613     int ext4_ind_migrate(struct inode *inode)
4614     {
4615     struct ext4_extent_header *eh;
4616     - struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
4617     + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4618     + struct ext4_super_block *es = sbi->s_es;
4619     struct ext4_inode_info *ei = EXT4_I(inode);
4620     struct ext4_extent *ex;
4621     unsigned int i, len;
4622     @@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
4623     if (test_opt(inode->i_sb, DELALLOC))
4624     ext4_alloc_da_blocks(inode);
4625    
4626     + percpu_down_write(&sbi->s_writepages_rwsem);
4627     +
4628     handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
4629     - if (IS_ERR(handle))
4630     - return PTR_ERR(handle);
4631     + if (IS_ERR(handle)) {
4632     + ret = PTR_ERR(handle);
4633     + goto out_unlock;
4634     + }
4635    
4636     down_write(&EXT4_I(inode)->i_data_sem);
4637     ret = ext4_ext_check_inode(inode);
4638     @@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
4639     errout:
4640     ext4_journal_stop(handle);
4641     up_write(&EXT4_I(inode)->i_data_sem);
4642     +out_unlock:
4643     + percpu_up_write(&sbi->s_writepages_rwsem);
4644     return ret;
4645     }
4646     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4647     index 94d84910dc1e..a564d0289a70 100644
4648     --- a/fs/ext4/namei.c
4649     +++ b/fs/ext4/namei.c
4650     @@ -1507,6 +1507,7 @@ restart:
4651     /*
4652     * We deal with the read-ahead logic here.
4653     */
4654     + cond_resched();
4655     if (ra_ptr >= ra_max) {
4656     /* Refill the readahead buffer */
4657     ra_ptr = 0;
4658     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4659     index c0e9aef376a7..080e25f6ef56 100644
4660     --- a/fs/ext4/resize.c
4661     +++ b/fs/ext4/resize.c
4662     @@ -17,6 +17,33 @@
4663    
4664     #include "ext4_jbd2.h"
4665    
4666     +struct ext4_rcu_ptr {
4667     + struct rcu_head rcu;
4668     + void *ptr;
4669     +};
4670     +
4671     +static void ext4_rcu_ptr_callback(struct rcu_head *head)
4672     +{
4673     + struct ext4_rcu_ptr *ptr;
4674     +
4675     + ptr = container_of(head, struct ext4_rcu_ptr, rcu);
4676     + kvfree(ptr->ptr);
4677     + kfree(ptr);
4678     +}
4679     +
4680     +void ext4_kvfree_array_rcu(void *to_free)
4681     +{
4682     + struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
4683     +
4684     + if (ptr) {
4685     + ptr->ptr = to_free;
4686     + call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
4687     + return;
4688     + }
4689     + synchronize_rcu();
4690     + kvfree(to_free);
4691     +}
4692     +
4693     int ext4_resize_begin(struct super_block *sb)
4694     {
4695     struct ext4_sb_info *sbi = EXT4_SB(sb);
4696     @@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
4697     brelse(gdb);
4698     goto out;
4699     }
4700     - memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
4701     - gdb->b_size);
4702     + memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
4703     + s_group_desc, j)->b_data, gdb->b_size);
4704     set_buffer_uptodate(gdb);
4705    
4706     err = ext4_handle_dirty_metadata(handle, NULL, gdb);
4707     @@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4708     }
4709     brelse(dind);
4710    
4711     - o_group_desc = EXT4_SB(sb)->s_group_desc;
4712     + rcu_read_lock();
4713     + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
4714     memcpy(n_group_desc, o_group_desc,
4715     EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
4716     + rcu_read_unlock();
4717     n_group_desc[gdb_num] = gdb_bh;
4718     - EXT4_SB(sb)->s_group_desc = n_group_desc;
4719     + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
4720     EXT4_SB(sb)->s_gdb_count++;
4721     - kvfree(o_group_desc);
4722     + ext4_kvfree_array_rcu(o_group_desc);
4723    
4724     le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
4725     err = ext4_handle_dirty_super(handle, sb);
4726     @@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4727     return err;
4728     }
4729    
4730     - o_group_desc = EXT4_SB(sb)->s_group_desc;
4731     + rcu_read_lock();
4732     + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
4733     memcpy(n_group_desc, o_group_desc,
4734     EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
4735     + rcu_read_unlock();
4736     n_group_desc[gdb_num] = gdb_bh;
4737    
4738     BUFFER_TRACE(gdb_bh, "get_write_access");
4739     @@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4740     return err;
4741     }
4742    
4743     - EXT4_SB(sb)->s_group_desc = n_group_desc;
4744     + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
4745     EXT4_SB(sb)->s_gdb_count++;
4746     - kvfree(o_group_desc);
4747     + ext4_kvfree_array_rcu(o_group_desc);
4748     return err;
4749     }
4750    
4751     @@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
4752     * use non-sparse filesystems anymore. This is already checked above.
4753     */
4754     if (gdb_off) {
4755     - gdb_bh = sbi->s_group_desc[gdb_num];
4756     + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
4757     + gdb_num);
4758     BUFFER_TRACE(gdb_bh, "get_write_access");
4759     err = ext4_journal_get_write_access(handle, gdb_bh);
4760    
4761     @@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
4762     /*
4763     * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
4764     */
4765     - gdb_bh = sbi->s_group_desc[gdb_num];
4766     + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
4767     /* Update group descriptor block for new group */
4768     gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
4769     gdb_off * EXT4_DESC_SIZE(sb));
4770     @@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
4771     percpu_counter_read(&sbi->s_freeclusters_counter));
4772     if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
4773     ext4_group_t flex_group;
4774     + struct flex_groups *fg;
4775     +
4776     flex_group = ext4_flex_group(sbi, group_data[0].group);
4777     + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
4778     atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
4779     - &sbi->s_flex_groups[flex_group].free_clusters);
4780     + &fg->free_clusters);
4781     atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
4782     - &sbi->s_flex_groups[flex_group].free_inodes);
4783     + &fg->free_inodes);
4784     }
4785    
4786     /*
4787     @@ -1519,7 +1554,8 @@ exit_journal:
4788     for (; gdb_num <= gdb_num_end; gdb_num++) {
4789     struct buffer_head *gdb_bh;
4790    
4791     - gdb_bh = sbi->s_group_desc[gdb_num];
4792     + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
4793     + gdb_num);
4794     if (old_gdb == gdb_bh->b_blocknr)
4795     continue;
4796     update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
4797     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4798     index 914230e63054..3ca604807839 100644
4799     --- a/fs/ext4/super.c
4800     +++ b/fs/ext4/super.c
4801     @@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
4802     {
4803     struct ext4_sb_info *sbi = EXT4_SB(sb);
4804     struct ext4_super_block *es = sbi->s_es;
4805     + struct buffer_head **group_desc;
4806     + struct flex_groups **flex_groups;
4807     int aborted = 0;
4808     int i, err;
4809    
4810     @@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
4811     if (!sb_rdonly(sb))
4812     ext4_commit_super(sb, 1);
4813    
4814     + rcu_read_lock();
4815     + group_desc = rcu_dereference(sbi->s_group_desc);
4816     for (i = 0; i < sbi->s_gdb_count; i++)
4817     - brelse(sbi->s_group_desc[i]);
4818     - kvfree(sbi->s_group_desc);
4819     - kvfree(sbi->s_flex_groups);
4820     + brelse(group_desc[i]);
4821     + kvfree(group_desc);
4822     + flex_groups = rcu_dereference(sbi->s_flex_groups);
4823     + if (flex_groups) {
4824     + for (i = 0; i < sbi->s_flex_groups_allocated; i++)
4825     + kvfree(flex_groups[i]);
4826     + kvfree(flex_groups);
4827     + }
4828     + rcu_read_unlock();
4829     percpu_counter_destroy(&sbi->s_freeclusters_counter);
4830     percpu_counter_destroy(&sbi->s_freeinodes_counter);
4831     percpu_counter_destroy(&sbi->s_dirs_counter);
4832     percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4833     - percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
4834     + percpu_free_rwsem(&sbi->s_writepages_rwsem);
4835     #ifdef CONFIG_QUOTA
4836     for (i = 0; i < EXT4_MAXQUOTAS; i++)
4837     kfree(get_qf_name(sb, sbi, i));
4838     @@ -2332,8 +2342,8 @@ done:
4839     int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4840     {
4841     struct ext4_sb_info *sbi = EXT4_SB(sb);
4842     - struct flex_groups *new_groups;
4843     - int size;
4844     + struct flex_groups **old_groups, **new_groups;
4845     + int size, i;
4846    
4847     if (!sbi->s_log_groups_per_flex)
4848     return 0;
4849     @@ -2342,22 +2352,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4850     if (size <= sbi->s_flex_groups_allocated)
4851     return 0;
4852    
4853     - size = roundup_pow_of_two(size * sizeof(struct flex_groups));
4854     - new_groups = kvzalloc(size, GFP_KERNEL);
4855     + new_groups = kvzalloc(roundup_pow_of_two(size *
4856     + sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
4857     if (!new_groups) {
4858     - ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
4859     - size / (int) sizeof(struct flex_groups));
4860     + ext4_msg(sb, KERN_ERR,
4861     + "not enough memory for %d flex group pointers", size);
4862     return -ENOMEM;
4863     }
4864     -
4865     - if (sbi->s_flex_groups) {
4866     - memcpy(new_groups, sbi->s_flex_groups,
4867     - (sbi->s_flex_groups_allocated *
4868     - sizeof(struct flex_groups)));
4869     - kvfree(sbi->s_flex_groups);
4870     + for (i = sbi->s_flex_groups_allocated; i < size; i++) {
4871     + new_groups[i] = kvzalloc(roundup_pow_of_two(
4872     + sizeof(struct flex_groups)),
4873     + GFP_KERNEL);
4874     + if (!new_groups[i]) {
4875     + for (i--; i >= sbi->s_flex_groups_allocated; i--)
4876     + kvfree(new_groups[i]);
4877     + kvfree(new_groups);
4878     + ext4_msg(sb, KERN_ERR,
4879     + "not enough memory for %d flex groups", size);
4880     + return -ENOMEM;
4881     + }
4882     }
4883     - sbi->s_flex_groups = new_groups;
4884     - sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
4885     + rcu_read_lock();
4886     + old_groups = rcu_dereference(sbi->s_flex_groups);
4887     + if (old_groups)
4888     + memcpy(new_groups, old_groups,
4889     + (sbi->s_flex_groups_allocated *
4890     + sizeof(struct flex_groups *)));
4891     + rcu_read_unlock();
4892     + rcu_assign_pointer(sbi->s_flex_groups, new_groups);
4893     + sbi->s_flex_groups_allocated = size;
4894     + if (old_groups)
4895     + ext4_kvfree_array_rcu(old_groups);
4896     return 0;
4897     }
4898    
4899     @@ -2365,6 +2390,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
4900     {
4901     struct ext4_sb_info *sbi = EXT4_SB(sb);
4902     struct ext4_group_desc *gdp = NULL;
4903     + struct flex_groups *fg;
4904     ext4_group_t flex_group;
4905     int i, err;
4906    
4907     @@ -2382,12 +2408,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
4908     gdp = ext4_get_group_desc(sb, i, NULL);
4909    
4910     flex_group = ext4_flex_group(sbi, i);
4911     - atomic_add(ext4_free_inodes_count(sb, gdp),
4912     - &sbi->s_flex_groups[flex_group].free_inodes);
4913     + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
4914     + atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
4915     atomic64_add(ext4_free_group_clusters(sb, gdp),
4916     - &sbi->s_flex_groups[flex_group].free_clusters);
4917     - atomic_add(ext4_used_dirs_count(sb, gdp),
4918     - &sbi->s_flex_groups[flex_group].used_dirs);
4919     + &fg->free_clusters);
4920     + atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
4921     }
4922    
4923     return 1;
4924     @@ -2961,7 +2986,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
4925     return 0;
4926     }
4927    
4928     -#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
4929     +#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
4930     if (!readonly && (ext4_has_feature_quota(sb) ||
4931     ext4_has_feature_project(sb))) {
4932     ext4_msg(sb, KERN_ERR,
4933     @@ -3586,9 +3611,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4934     {
4935     struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
4936     char *orig_data = kstrdup(data, GFP_KERNEL);
4937     - struct buffer_head *bh;
4938     + struct buffer_head *bh, **group_desc;
4939     struct ext4_super_block *es = NULL;
4940     struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4941     + struct flex_groups **flex_groups;
4942     ext4_fsblk_t block;
4943     ext4_fsblk_t sb_block = get_sb_block(&data);
4944     ext4_fsblk_t logical_sb_block;
4945     @@ -4242,9 +4268,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4946     goto failed_mount;
4947     }
4948     }
4949     - sbi->s_group_desc = kvmalloc_array(db_count,
4950     - sizeof(struct buffer_head *),
4951     - GFP_KERNEL);
4952     + rcu_assign_pointer(sbi->s_group_desc,
4953     + kvmalloc_array(db_count,
4954     + sizeof(struct buffer_head *),
4955     + GFP_KERNEL));
4956     if (sbi->s_group_desc == NULL) {
4957     ext4_msg(sb, KERN_ERR, "not enough memory");
4958     ret = -ENOMEM;
4959     @@ -4260,14 +4287,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4960     }
4961    
4962     for (i = 0; i < db_count; i++) {
4963     + struct buffer_head *bh;
4964     +
4965     block = descriptor_loc(sb, logical_sb_block, i);
4966     - sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
4967     - if (!sbi->s_group_desc[i]) {
4968     + bh = sb_bread_unmovable(sb, block);
4969     + if (!bh) {
4970     ext4_msg(sb, KERN_ERR,
4971     "can't read group descriptor %d", i);
4972     db_count = i;
4973     goto failed_mount2;
4974     }
4975     + rcu_read_lock();
4976     + rcu_dereference(sbi->s_group_desc)[i] = bh;
4977     + rcu_read_unlock();
4978     }
4979     sbi->s_gdb_count = db_count;
4980     if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4981     @@ -4553,7 +4585,7 @@ no_journal:
4982     err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4983     GFP_KERNEL);
4984     if (!err)
4985     - err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
4986     + err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
4987    
4988     if (err) {
4989     ext4_msg(sb, KERN_ERR, "insufficient memory");
4990     @@ -4641,13 +4673,19 @@ failed_mount7:
4991     ext4_unregister_li_request(sb);
4992     failed_mount6:
4993     ext4_mb_release(sb);
4994     - if (sbi->s_flex_groups)
4995     - kvfree(sbi->s_flex_groups);
4996     + rcu_read_lock();
4997     + flex_groups = rcu_dereference(sbi->s_flex_groups);
4998     + if (flex_groups) {
4999     + for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5000     + kvfree(flex_groups[i]);
5001     + kvfree(flex_groups);
5002     + }
5003     + rcu_read_unlock();
5004     percpu_counter_destroy(&sbi->s_freeclusters_counter);
5005     percpu_counter_destroy(&sbi->s_freeinodes_counter);
5006     percpu_counter_destroy(&sbi->s_dirs_counter);
5007     percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5008     - percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
5009     + percpu_free_rwsem(&sbi->s_writepages_rwsem);
5010     failed_mount5:
5011     ext4_ext_release(sb);
5012     ext4_release_system_zone(sb);
5013     @@ -4676,9 +4714,12 @@ failed_mount3:
5014     if (sbi->s_mmp_tsk)
5015     kthread_stop(sbi->s_mmp_tsk);
5016     failed_mount2:
5017     + rcu_read_lock();
5018     + group_desc = rcu_dereference(sbi->s_group_desc);
5019     for (i = 0; i < db_count; i++)
5020     - brelse(sbi->s_group_desc[i]);
5021     - kvfree(sbi->s_group_desc);
5022     + brelse(group_desc[i]);
5023     + kvfree(group_desc);
5024     + rcu_read_unlock();
5025     failed_mount:
5026     if (sbi->s_chksum_driver)
5027     crypto_free_shash(sbi->s_chksum_driver);
5028     diff --git a/fs/io_uring.c b/fs/io_uring.c
5029     index 709671faaed6..ed9a551882cf 100644
5030     --- a/fs/io_uring.c
5031     +++ b/fs/io_uring.c
5032     @@ -882,11 +882,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
5033     mutex_unlock(&ctx->uring_lock);
5034     }
5035    
5036     -static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5037     - long min)
5038     +static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5039     + long min)
5040     {
5041     int iters = 0, ret = 0;
5042    
5043     + /*
5044     + * We disallow the app entering submit/complete with polling, but we
5045     + * still need to lock the ring to prevent racing with polled issue
5046     + * that got punted to a workqueue.
5047     + */
5048     + mutex_lock(&ctx->uring_lock);
5049     do {
5050     int tmin = 0;
5051    
5052     @@ -922,21 +928,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5053     ret = 0;
5054     } while (min && !*nr_events && !need_resched());
5055    
5056     - return ret;
5057     -}
5058     -
5059     -static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5060     - long min)
5061     -{
5062     - int ret;
5063     -
5064     - /*
5065     - * We disallow the app entering submit/complete with polling, but we
5066     - * still need to lock the ring to prevent racing with polled issue
5067     - * that got punted to a workqueue.
5068     - */
5069     - mutex_lock(&ctx->uring_lock);
5070     - ret = __io_iopoll_check(ctx, nr_events, min);
5071     mutex_unlock(&ctx->uring_lock);
5072     return ret;
5073     }
5074     @@ -2721,7 +2712,7 @@ static int io_sq_thread(void *data)
5075     */
5076     mutex_lock(&ctx->uring_lock);
5077     if (!list_empty(&ctx->poll_list))
5078     - __io_iopoll_check(ctx, &nr_events, 0);
5079     + io_iopoll_getevents(ctx, &nr_events, 0);
5080     else
5081     inflight = 0;
5082     mutex_unlock(&ctx->uring_lock);
5083     @@ -2740,16 +2731,6 @@ static int io_sq_thread(void *data)
5084    
5085     to_submit = io_sqring_entries(ctx);
5086     if (!to_submit) {
5087     - /*
5088     - * We're polling. If we're within the defined idle
5089     - * period, then let us spin without work before going
5090     - * to sleep.
5091     - */
5092     - if (inflight || !time_after(jiffies, timeout)) {
5093     - cond_resched();
5094     - continue;
5095     - }
5096     -
5097     /*
5098     * Drop cur_mm before scheduling, we can't hold it for
5099     * long periods (or over schedule()). Do this before
5100     @@ -2762,6 +2743,16 @@ static int io_sq_thread(void *data)
5101     cur_mm = NULL;
5102     }
5103    
5104     + /*
5105     + * We're polling. If we're within the defined idle
5106     + * period, then let us spin without work before going
5107     + * to sleep.
5108     + */
5109     + if (inflight || !time_after(jiffies, timeout)) {
5110     + cond_resched();
5111     + continue;
5112     + }
5113     +
5114     prepare_to_wait(&ctx->sqo_wait, &wait,
5115     TASK_INTERRUPTIBLE);
5116    
5117     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
5118     index 3930c68a9c20..b17f05ae6011 100644
5119     --- a/fs/jbd2/transaction.c
5120     +++ b/fs/jbd2/transaction.c
5121     @@ -865,8 +865,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
5122     char *frozen_buffer = NULL;
5123     unsigned long start_lock, time_lock;
5124    
5125     - if (is_handle_aborted(handle))
5126     - return -EROFS;
5127     journal = transaction->t_journal;
5128    
5129     jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
5130     @@ -1118,6 +1116,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
5131     struct journal_head *jh;
5132     int rc;
5133    
5134     + if (is_handle_aborted(handle))
5135     + return -EROFS;
5136     +
5137     if (jbd2_write_access_granted(handle, bh, false))
5138     return 0;
5139    
5140     @@ -1255,6 +1256,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
5141     struct journal_head *jh;
5142     char *committed_data = NULL;
5143    
5144     + if (is_handle_aborted(handle))
5145     + return -EROFS;
5146     +
5147     if (jbd2_write_access_granted(handle, bh, true))
5148     return 0;
5149    
5150     diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
5151     index d1fdf26ccb33..4010c42e40bd 100644
5152     --- a/include/acpi/acpixf.h
5153     +++ b/include/acpi/acpixf.h
5154     @@ -749,6 +749,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
5155     ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
5156     ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
5157     ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
5158     +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
5159    
5160     ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
5161     acpi_get_gpe_device(u32 gpe_index,
5162     diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
5163     index 94f047a8a845..d7c403d0dd27 100644
5164     --- a/include/linux/intel-svm.h
5165     +++ b/include/linux/intel-svm.h
5166     @@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
5167     BUG();
5168     }
5169    
5170     -static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5171     +static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5172     {
5173     return -EINVAL;
5174     }
5175     diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
5176     index aba5ada373d6..e85f714a623e 100644
5177     --- a/include/linux/irqdomain.h
5178     +++ b/include/linux/irqdomain.h
5179     @@ -191,7 +191,7 @@ enum {
5180     IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
5181    
5182     /* Irq domain name was allocated in __irq_domain_add() */
5183     - IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
5184     + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
5185    
5186     /* Irq domain is an IPI domain with virq per cpu */
5187     IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
5188     diff --git a/include/linux/libata.h b/include/linux/libata.h
5189     index fa0c3dae2094..c44e4cfbcb16 100644
5190     --- a/include/linux/libata.h
5191     +++ b/include/linux/libata.h
5192     @@ -1220,6 +1220,7 @@ struct pci_bits {
5193     };
5194    
5195     extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
5196     +extern void ata_pci_shutdown_one(struct pci_dev *pdev);
5197     extern void ata_pci_remove_one(struct pci_dev *pdev);
5198    
5199     #ifdef CONFIG_PM
5200     diff --git a/include/linux/tty.h b/include/linux/tty.h
5201     index bfa4e2ee94a9..bd5fe0e907e8 100644
5202     --- a/include/linux/tty.h
5203     +++ b/include/linux/tty.h
5204     @@ -225,6 +225,8 @@ struct tty_port_client_operations {
5205     void (*write_wakeup)(struct tty_port *port);
5206     };
5207    
5208     +extern const struct tty_port_client_operations tty_port_default_client_ops;
5209     +
5210     struct tty_port {
5211     struct tty_bufhead buf; /* Locked internally */
5212     struct tty_struct *tty; /* Back pointer */
5213     diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
5214     index a1be64c9940f..22c1f579afe3 100644
5215     --- a/include/linux/usb/quirks.h
5216     +++ b/include/linux/usb/quirks.h
5217     @@ -69,4 +69,7 @@
5218     /* Hub needs extra delay after resetting its port. */
5219     #define USB_QUIRK_HUB_SLOW_RESET BIT(14)
5220    
5221     +/* device has blacklisted endpoints */
5222     +#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
5223     +
5224     #endif /* __LINUX_USB_QUIRKS_H */
5225     diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
5226     index 533f56733ba8..b71b5c4f418c 100644
5227     --- a/include/scsi/iscsi_proto.h
5228     +++ b/include/scsi/iscsi_proto.h
5229     @@ -627,7 +627,6 @@ struct iscsi_reject {
5230     #define ISCSI_REASON_BOOKMARK_INVALID 9
5231     #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
5232     #define ISCSI_REASON_NEGOTIATION_RESET 11
5233     -#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
5234    
5235     /* Max. number of Key=Value pairs in a text message */
5236     #define MAX_KEY_VALUE_PAIRS 8192
5237     diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
5238     index 40ab20439fee..a36b7227a15a 100644
5239     --- a/include/sound/rawmidi.h
5240     +++ b/include/sound/rawmidi.h
5241     @@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
5242     struct list_head list; /* list of all substream for given stream */
5243     int stream; /* direction */
5244     int number; /* substream number */
5245     - unsigned int opened: 1, /* open flag */
5246     - append: 1, /* append flag (merge more streams) */
5247     - active_sensing: 1; /* send active sensing when close */
5248     + bool opened; /* open flag */
5249     + bool append; /* append flag (merge more streams) */
5250     + bool active_sensing; /* send active sensing when close */
5251     int use_count; /* use counter (for output) */
5252     size_t bytes;
5253     struct snd_rawmidi *rmidi;
5254     diff --git a/ipc/sem.c b/ipc/sem.c
5255     index ec97a7072413..fe12ea8dd2b3 100644
5256     --- a/ipc/sem.c
5257     +++ b/ipc/sem.c
5258     @@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk)
5259     ipc_assert_locked_object(&sma->sem_perm);
5260     list_del(&un->list_id);
5261    
5262     - /* we are the last process using this ulp, acquiring ulp->lock
5263     - * isn't required. Besides that, we are also protected against
5264     - * IPC_RMID as we hold sma->sem_perm lock now
5265     - */
5266     + spin_lock(&ulp->lock);
5267     list_del_rcu(&un->list_proc);
5268     + spin_unlock(&ulp->lock);
5269    
5270     /* perform adjustments registered in un */
5271     for (i = 0; i < sma->sem_nsems; i++) {
5272     diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
5273     index 5b9da0954a27..3668a0bc18ec 100644
5274     --- a/kernel/bpf/offload.c
5275     +++ b/kernel/bpf/offload.c
5276     @@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
5277    
5278     ulen = info->jited_prog_len;
5279     info->jited_prog_len = aux->offload->jited_len;
5280     - if (info->jited_prog_len & ulen) {
5281     + if (info->jited_prog_len && ulen) {
5282     uinsns = u64_to_user_ptr(info->jited_prog_insns);
5283     ulen = min_t(u32, info->jited_prog_len, ulen);
5284     if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
5285     diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
5286     index 3924fbe829d4..c9d8eb7f5c02 100644
5287     --- a/kernel/irq/internals.h
5288     +++ b/kernel/irq/internals.h
5289     @@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
5290    
5291     extern bool irq_can_set_affinity_usr(unsigned int irq);
5292    
5293     -extern int irq_select_affinity_usr(unsigned int irq);
5294     -
5295     extern void irq_set_thread_affinity(struct irq_desc *desc);
5296    
5297     extern int irq_do_set_affinity(struct irq_data *data,
5298     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
5299     index 1753486b440c..55b080101a20 100644
5300     --- a/kernel/irq/manage.c
5301     +++ b/kernel/irq/manage.c
5302     @@ -442,23 +442,9 @@ int irq_setup_affinity(struct irq_desc *desc)
5303     {
5304     return irq_select_affinity(irq_desc_get_irq(desc));
5305     }
5306     -#endif
5307     +#endif /* CONFIG_AUTO_IRQ_AFFINITY */
5308     +#endif /* CONFIG_SMP */
5309    
5310     -/*
5311     - * Called when a bogus affinity is set via /proc/irq
5312     - */
5313     -int irq_select_affinity_usr(unsigned int irq)
5314     -{
5315     - struct irq_desc *desc = irq_to_desc(irq);
5316     - unsigned long flags;
5317     - int ret;
5318     -
5319     - raw_spin_lock_irqsave(&desc->lock, flags);
5320     - ret = irq_setup_affinity(desc);
5321     - raw_spin_unlock_irqrestore(&desc->lock, flags);
5322     - return ret;
5323     -}
5324     -#endif
5325    
5326     /**
5327     * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
5328     diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
5329     index cfc4f088a0e7..f5958c55406f 100644
5330     --- a/kernel/irq/proc.c
5331     +++ b/kernel/irq/proc.c
5332     @@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
5333     return show_irq_affinity(AFFINITY_LIST, m);
5334     }
5335    
5336     +#ifndef CONFIG_AUTO_IRQ_AFFINITY
5337     +static inline int irq_select_affinity_usr(unsigned int irq)
5338     +{
5339     + /*
5340     + * If the interrupt is started up already then this fails. The
5341     + * interrupt is assigned to an online CPU already. There is no
5342     + * point to move it around randomly. Tell user space that the
5343     + * selected mask is bogus.
5344     + *
5345     + * If not then any change to the affinity is pointless because the
5346     + * startup code invokes irq_setup_affinity() which will select
5347     + * a online CPU anyway.
5348     + */
5349     + return -EINVAL;
5350     +}
5351     +#else
5352     +/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
5353     +static inline int irq_select_affinity_usr(unsigned int irq)
5354     +{
5355     + return irq_select_affinity(irq);
5356     +}
5357     +#endif
5358    
5359     static ssize_t write_irq_affinity(int type, struct file *file,
5360     const char __user *buffer, size_t count, loff_t *pos)
5361     diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
5362     index ce8f6748678a..9154e745f097 100644
5363     --- a/kernel/sched/psi.c
5364     +++ b/kernel/sched/psi.c
5365     @@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
5366     if (static_branch_likely(&psi_disabled))
5367     return -EOPNOTSUPP;
5368    
5369     + if (!nbytes)
5370     + return -EINVAL;
5371     +
5372     buf_size = min(nbytes, sizeof(buf));
5373     if (copy_from_user(buf, user_buf, buf_size))
5374     return -EFAULT;
5375     diff --git a/lib/stackdepot.c b/lib/stackdepot.c
5376     index ed717dd08ff3..81c69c08d1d1 100644
5377     --- a/lib/stackdepot.c
5378     +++ b/lib/stackdepot.c
5379     @@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
5380     return true;
5381     if (stack_slabs[depot_index] == NULL) {
5382     stack_slabs[depot_index] = *prealloc;
5383     + *prealloc = NULL;
5384     } else {
5385     - stack_slabs[depot_index + 1] = *prealloc;
5386     + /* If this is the last depot slab, do not touch the next one. */
5387     + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
5388     + stack_slabs[depot_index + 1] = *prealloc;
5389     + *prealloc = NULL;
5390     + }
5391     /*
5392     * This smp_store_release pairs with smp_load_acquire() from
5393     * |next_slab_inited| above and in stack_depot_save().
5394     */
5395     smp_store_release(&next_slab_inited, 1);
5396     }
5397     - *prealloc = NULL;
5398     return true;
5399     }
5400    
5401     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5402     index b5b4e310fe70..ae9044bc9f80 100644
5403     --- a/mm/memcontrol.c
5404     +++ b/mm/memcontrol.c
5405     @@ -418,8 +418,10 @@ int memcg_expand_shrinker_maps(int new_id)
5406     if (mem_cgroup_is_root(memcg))
5407     continue;
5408     ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
5409     - if (ret)
5410     + if (ret) {
5411     + mem_cgroup_iter_break(NULL, memcg);
5412     goto unlock;
5413     + }
5414     }
5415     unlock:
5416     if (!ret)
5417     diff --git a/mm/mmap.c b/mm/mmap.c
5418     index 4390dbea4aa5..514cc19c5916 100644
5419     --- a/mm/mmap.c
5420     +++ b/mm/mmap.c
5421     @@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
5422     bool downgraded = false;
5423     LIST_HEAD(uf);
5424    
5425     - brk = untagged_addr(brk);
5426     -
5427     if (down_write_killable(&mm->mmap_sem))
5428     return -EINTR;
5429    
5430     @@ -1583,8 +1581,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
5431     struct file *file = NULL;
5432     unsigned long retval;
5433    
5434     - addr = untagged_addr(addr);
5435     -
5436     if (!(flags & MAP_ANONYMOUS)) {
5437     audit_mmap_fd(fd, flags);
5438     file = fget(fd);
5439     diff --git a/mm/mremap.c b/mm/mremap.c
5440     index 1fc8a29fbe3f..1d98281f7204 100644
5441     --- a/mm/mremap.c
5442     +++ b/mm/mremap.c
5443     @@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
5444     LIST_HEAD(uf_unmap);
5445    
5446     addr = untagged_addr(addr);
5447     - new_addr = untagged_addr(new_addr);
5448    
5449     if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
5450     return ret;
5451     diff --git a/mm/sparse.c b/mm/sparse.c
5452     index 69b41b6046a5..a5e5c1c3a2a8 100644
5453     --- a/mm/sparse.c
5454     +++ b/mm/sparse.c
5455     @@ -884,7 +884,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
5456     * Poison uninitialized struct pages in order to catch invalid flags
5457     * combinations.
5458     */
5459     - page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
5460     + page_init_poison(memmap, sizeof(struct page) * nr_pages);
5461    
5462     ms = __nr_to_section(section_nr);
5463     set_section_nid(section_nr, nid);
5464     diff --git a/mm/vmscan.c b/mm/vmscan.c
5465     index e7f10c4b40f0..7fde5f904c8d 100644
5466     --- a/mm/vmscan.c
5467     +++ b/mm/vmscan.c
5468     @@ -2530,10 +2530,13 @@ out:
5469     /*
5470     * Scan types proportional to swappiness and
5471     * their relative recent reclaim efficiency.
5472     - * Make sure we don't miss the last page
5473     - * because of a round-off error.
5474     + * Make sure we don't miss the last page on
5475     + * the offlined memory cgroups because of a
5476     + * round-off error.
5477     */
5478     - scan = DIV64_U64_ROUND_UP(scan * fraction[file],
5479     + scan = mem_cgroup_online(memcg) ?
5480     + div64_u64(scan * fraction[file], denominator) :
5481     + DIV64_U64_ROUND_UP(scan * fraction[file],
5482     denominator);
5483     break;
5484     case SCAN_FILE:
5485     diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
5486     index ced3fc8fad7c..6520d9ec1297 100644
5487     --- a/net/netfilter/xt_hashlimit.c
5488     +++ b/net/netfilter/xt_hashlimit.c
5489     @@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
5490     return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
5491     }
5492    
5493     +#define HASHLIMIT_MAX_SIZE 1048576
5494     +
5495     static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
5496     struct xt_hashlimit_htable **hinfo,
5497     struct hashlimit_cfg3 *cfg,
5498     @@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
5499    
5500     if (cfg->gc_interval == 0 || cfg->expire == 0)
5501     return -EINVAL;
5502     + if (cfg->size > HASHLIMIT_MAX_SIZE) {
5503     + cfg->size = HASHLIMIT_MAX_SIZE;
5504     + pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
5505     + }
5506     + if (cfg->max > HASHLIMIT_MAX_SIZE) {
5507     + cfg->max = HASHLIMIT_MAX_SIZE;
5508     + pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
5509     + }
5510     if (par->family == NFPROTO_IPV4) {
5511     if (cfg->srcmask > 32 || cfg->dstmask > 32)
5512     return -EINVAL;
5513     diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
5514     index dbdbc4f18b5e..c9f34b0a11df 100644
5515     --- a/net/rxrpc/call_object.c
5516     +++ b/net/rxrpc/call_object.c
5517     @@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
5518     }
5519    
5520     /*
5521     - * Final call destruction under RCU.
5522     + * Final call destruction - but must be done in process context.
5523     */
5524     -static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5525     +static void rxrpc_destroy_call(struct work_struct *work)
5526     {
5527     - struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
5528     + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
5529     struct rxrpc_net *rxnet = call->rxnet;
5530    
5531     rxrpc_put_connection(call->conn);
5532     @@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5533     wake_up_var(&rxnet->nr_calls);
5534     }
5535    
5536     +/*
5537     + * Final call destruction under RCU.
5538     + */
5539     +static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5540     +{
5541     + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
5542     +
5543     + if (in_softirq()) {
5544     + INIT_WORK(&call->processor, rxrpc_destroy_call);
5545     + if (!rxrpc_queue_work(&call->processor))
5546     + BUG();
5547     + } else {
5548     + rxrpc_destroy_call(&call->processor);
5549     + }
5550     +}
5551     +
5552     /*
5553     * clean up a call
5554     */
5555     diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
5556     index 6d9592f0ae1d..cc93157fa950 100644
5557     --- a/sound/core/seq/seq_clientmgr.c
5558     +++ b/sound/core/seq/seq_clientmgr.c
5559     @@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
5560     event->queue = queue;
5561     event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
5562     if (real_time) {
5563     - event->time.time = snd_seq_timer_get_cur_time(q->timer);
5564     + event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
5565     event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
5566     } else {
5567     event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
5568     @@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
5569     tmr = queue->timer;
5570     status->events = queue->tickq->cells + queue->timeq->cells;
5571    
5572     - status->time = snd_seq_timer_get_cur_time(tmr);
5573     + status->time = snd_seq_timer_get_cur_time(tmr, true);
5574     status->tick = snd_seq_timer_get_cur_tick(tmr);
5575    
5576     status->running = tmr->running;
5577     diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
5578     index caf68bf42f13..71a6ea62c3be 100644
5579     --- a/sound/core/seq/seq_queue.c
5580     +++ b/sound/core/seq/seq_queue.c
5581     @@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
5582     {
5583     unsigned long flags;
5584     struct snd_seq_event_cell *cell;
5585     + snd_seq_tick_time_t cur_tick;
5586     + snd_seq_real_time_t cur_time;
5587    
5588     if (q == NULL)
5589     return;
5590     @@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
5591    
5592     __again:
5593     /* Process tick queue... */
5594     + cur_tick = snd_seq_timer_get_cur_tick(q->timer);
5595     for (;;) {
5596     - cell = snd_seq_prioq_cell_out(q->tickq,
5597     - &q->timer->tick.cur_tick);
5598     + cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
5599     if (!cell)
5600     break;
5601     snd_seq_dispatch_event(cell, atomic, hop);
5602     }
5603    
5604     /* Process time queue... */
5605     + cur_time = snd_seq_timer_get_cur_time(q->timer, false);
5606     for (;;) {
5607     - cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
5608     + cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
5609     if (!cell)
5610     break;
5611     snd_seq_dispatch_event(cell, atomic, hop);
5612     @@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
5613     int snd_seq_queue_set_owner(int queueid, int client, int locked)
5614     {
5615     struct snd_seq_queue *q = queueptr(queueid);
5616     + unsigned long flags;
5617    
5618     if (q == NULL)
5619     return -EINVAL;
5620     @@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
5621     return -EPERM;
5622     }
5623    
5624     + spin_lock_irqsave(&q->owner_lock, flags);
5625     q->locked = locked ? 1 : 0;
5626     q->owner = client;
5627     + spin_unlock_irqrestore(&q->owner_lock, flags);
5628     queue_access_unlock(q);
5629     queuefree(q);
5630    
5631     @@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
5632     unsigned long flags;
5633     int i;
5634     struct snd_seq_queue *q;
5635     + bool matched;
5636    
5637     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
5638     if ((q = queueptr(i)) == NULL)
5639     continue;
5640     spin_lock_irqsave(&q->owner_lock, flags);
5641     - if (q->owner == client)
5642     + matched = (q->owner == client);
5643     + if (matched)
5644     q->klocked = 1;
5645     spin_unlock_irqrestore(&q->owner_lock, flags);
5646     - if (q->owner == client) {
5647     + if (matched) {
5648     if (q->timer->running)
5649     snd_seq_timer_stop(q->timer);
5650     snd_seq_timer_reset(q->timer);
5651     @@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
5652     int i, bpm;
5653     struct snd_seq_queue *q;
5654     struct snd_seq_timer *tmr;
5655     + bool locked;
5656     + int owner;
5657    
5658     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
5659     if ((q = queueptr(i)) == NULL)
5660     @@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
5661     else
5662     bpm = 0;
5663    
5664     + spin_lock_irq(&q->owner_lock);
5665     + locked = q->locked;
5666     + owner = q->owner;
5667     + spin_unlock_irq(&q->owner_lock);
5668     +
5669     snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
5670     - snd_iprintf(buffer, "owned by client : %d\n", q->owner);
5671     - snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
5672     + snd_iprintf(buffer, "owned by client : %d\n", owner);
5673     + snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
5674     snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
5675     snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
5676     snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
5677     diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
5678     index 3bc6095df44d..0b43fc5fe349 100644
5679     --- a/sound/core/seq/seq_timer.c
5680     +++ b/sound/core/seq/seq_timer.c
5681     @@ -422,14 +422,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
5682     }
5683    
5684     /* return current 'real' time. use timeofday() to get better granularity. */
5685     -snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
5686     +snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
5687     + bool adjust_ktime)
5688     {
5689     snd_seq_real_time_t cur_time;
5690     unsigned long flags;
5691    
5692     spin_lock_irqsave(&tmr->lock, flags);
5693     cur_time = tmr->cur_time;
5694     - if (tmr->running) {
5695     + if (adjust_ktime && tmr->running) {
5696     struct timespec64 tm;
5697    
5698     ktime_get_ts64(&tm);
5699     @@ -446,7 +447,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
5700     high PPQ values) */
5701     snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
5702     {
5703     - return tmr->tick.cur_tick;
5704     + snd_seq_tick_time_t cur_tick;
5705     + unsigned long flags;
5706     +
5707     + spin_lock_irqsave(&tmr->lock, flags);
5708     + cur_tick = tmr->tick.cur_tick;
5709     + spin_unlock_irqrestore(&tmr->lock, flags);
5710     + return cur_tick;
5711     }
5712    
5713    
5714     diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
5715     index 66c3e344eae3..4bec57df8158 100644
5716     --- a/sound/core/seq/seq_timer.h
5717     +++ b/sound/core/seq/seq_timer.h
5718     @@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
5719     int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
5720     int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
5721     int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
5722     -snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
5723     +snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
5724     + bool adjust_ktime);
5725     snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
5726    
5727     extern int seq_default_timer_class;
5728     diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
5729     index 886cb7811bd6..2efee794cac6 100644
5730     --- a/sound/hda/hdmi_chmap.c
5731     +++ b/sound/hda/hdmi_chmap.c
5732     @@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
5733    
5734     for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
5735     if (spk_alloc & (1 << i))
5736     - j += snprintf(buf + j, buflen - j, " %s",
5737     + j += scnprintf(buf + j, buflen - j, " %s",
5738     cea_speaker_allocation_names[i]);
5739     }
5740     buf[j] = '\0'; /* necessary when j == 0 */
5741     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
5742     index a2fb19129219..6cb72336433a 100644
5743     --- a/sound/pci/hda/hda_codec.c
5744     +++ b/sound/pci/hda/hda_codec.c
5745     @@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
5746    
5747     for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
5748     if (pcm & (AC_SUPPCM_BITS_8 << i))
5749     - j += snprintf(buf + j, buflen - j, " %d", bits[i]);
5750     + j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
5751    
5752     buf[j] = '\0'; /* necessary when j == 0 */
5753     }
5754     diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
5755     index d081fb2880a0..82cf1da2ff12 100644
5756     --- a/sound/pci/hda/hda_eld.c
5757     +++ b/sound/pci/hda/hda_eld.c
5758     @@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
5759    
5760     for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
5761     if (pcm & (1 << i))
5762     - j += snprintf(buf + j, buflen - j, " %d",
5763     + j += scnprintf(buf + j, buflen - j, " %d",
5764     alsa_rates[i]);
5765    
5766     buf[j] = '\0'; /* necessary when j == 0 */
5767     diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
5768     index fcc34417cbce..6dbe99131bc4 100644
5769     --- a/sound/pci/hda/hda_sysfs.c
5770     +++ b/sound/pci/hda/hda_sysfs.c
5771     @@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
5772     int i, len = 0;
5773     mutex_lock(&codec->user_mutex);
5774     snd_array_for_each(&codec->init_verbs, i, v) {
5775     - len += snprintf(buf + len, PAGE_SIZE - len,
5776     + len += scnprintf(buf + len, PAGE_SIZE - len,
5777     "0x%02x 0x%03x 0x%04x\n",
5778     v->nid, v->verb, v->param);
5779     }
5780     @@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
5781     int i, len = 0;
5782     mutex_lock(&codec->user_mutex);
5783     snd_array_for_each(&codec->hints, i, hint) {
5784     - len += snprintf(buf + len, PAGE_SIZE - len,
5785     + len += scnprintf(buf + len, PAGE_SIZE - len,
5786     "%s = %s\n", hint->key, hint->val);
5787     }
5788     mutex_unlock(&codec->user_mutex);
5789     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5790     index f162e607fc6c..4f78b40831d8 100644
5791     --- a/sound/pci/hda/patch_realtek.c
5792     +++ b/sound/pci/hda/patch_realtek.c
5793     @@ -2447,7 +2447,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5794     SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
5795     SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
5796     SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5797     + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
5798     SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
5799     + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
5800     SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
5801     SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5802     SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
5803     diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
5804     index d1dc8e6366dc..71f2d42188c4 100644
5805     --- a/sound/soc/atmel/Kconfig
5806     +++ b/sound/soc/atmel/Kconfig
5807     @@ -10,11 +10,11 @@ config SND_ATMEL_SOC
5808     if SND_ATMEL_SOC
5809    
5810     config SND_ATMEL_SOC_PDC
5811     - tristate
5812     + bool
5813     depends on HAS_DMA
5814    
5815     config SND_ATMEL_SOC_DMA
5816     - tristate
5817     + bool
5818     select SND_SOC_GENERIC_DMAENGINE_PCM
5819    
5820     config SND_ATMEL_SOC_SSC
5821     diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
5822     index 1f6890ed3738..c7d2989791be 100644
5823     --- a/sound/soc/atmel/Makefile
5824     +++ b/sound/soc/atmel/Makefile
5825     @@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
5826     snd-soc-atmel-i2s-objs := atmel-i2s.o
5827     snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
5828    
5829     -obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
5830     -obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
5831     +# pdc and dma need to both be built-in if any user of
5832     +# ssc is built-in.
5833     +ifdef CONFIG_SND_ATMEL_SOC_PDC
5834     +obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
5835     +endif
5836     +ifdef CONFIG_SND_ATMEL_SOC_DMA
5837     +obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
5838     +endif
5839     obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
5840     obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
5841     obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
5842     diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
5843     index b517e4bc1b87..41b83ecaf008 100644
5844     --- a/sound/soc/fsl/fsl_sai.c
5845     +++ b/sound/soc/fsl/fsl_sai.c
5846     @@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
5847     ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
5848     &fsl_sai_dai, 1);
5849     if (ret)
5850     - return ret;
5851     + goto err_pm_disable;
5852    
5853     - if (sai->soc_data->use_imx_pcm)
5854     - return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
5855     - else
5856     - return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
5857     + if (sai->soc_data->use_imx_pcm) {
5858     + ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
5859     + if (ret)
5860     + goto err_pm_disable;
5861     + } else {
5862     + ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
5863     + if (ret)
5864     + goto err_pm_disable;
5865     + }
5866     +
5867     + return ret;
5868     +
5869     +err_pm_disable:
5870     + pm_runtime_disable(&pdev->dev);
5871     +
5872     + return ret;
5873     }
5874    
5875     static int fsl_sai_remove(struct platform_device *pdev)
5876     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5877     index b6378f025836..935b5375ecc5 100644
5878     --- a/sound/soc/soc-dapm.c
5879     +++ b/sound/soc/soc-dapm.c
5880     @@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
5881     runtime->rate = params_rate(params);
5882    
5883     out:
5884     - if (ret < 0)
5885     - kfree(runtime);
5886     -
5887     kfree(params);
5888     return ret;
5889     }
5890     diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
5891     index 1923b0c36bce..3f645200d3a5 100644
5892     --- a/sound/soc/sof/intel/hda-dai.c
5893     +++ b/sound/soc/sof/intel/hda-dai.c
5894     @@ -443,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = {
5895     .name = "iDisp3 Pin",
5896     .ops = &hda_link_dai_ops,
5897     },
5898     +{
5899     + .name = "iDisp4 Pin",
5900     + .ops = &hda_link_dai_ops,
5901     +},
5902     {
5903     .name = "Analog CPU DAI",
5904     .ops = &hda_link_dai_ops,
5905     diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
5906     index 55798bc8eae2..686561df8e13 100644
5907     --- a/sound/soc/sunxi/sun8i-codec.c
5908     +++ b/sound/soc/sunxi/sun8i-codec.c
5909     @@ -80,6 +80,7 @@
5910    
5911     #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
5912     #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
5913     +#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
5914     #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
5915     #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
5916     #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
5917     @@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
5918     return -EINVAL;
5919     }
5920     regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
5921     - BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
5922     + SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
5923     value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
5924    
5925     return 0;
5926     diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5927     index 07f5b462c2ef..aa43e0bd210c 100644
5928     --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5929     +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5930     @@ -3,6 +3,11 @@
5931    
5932     #include "test_progs.h"
5933    
5934     +#define TCP_REPAIR 19 /* TCP sock is under repair right now */
5935     +
5936     +#define TCP_REPAIR_ON 1
5937     +#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
5938     +
5939     static int connected_socket_v4(void)
5940     {
5941     struct sockaddr_in addr = {