Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.20/0100-4.20.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3277 - (hide annotations) (download)
Mon Mar 4 10:34:37 2019 UTC (5 years, 2 months ago) by niro
File size: 214378 byte(s)
-linux-4.20.1
1 niro 3277 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index aefd358a5ca3..bb5c9dc4d270 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -2096,6 +2096,9 @@
6     off
7     Disables hypervisor mitigations and doesn't
8     emit any warnings.
9     + It also drops the swap size and available
10     + RAM limit restriction on both hypervisor and
11     + bare metal.
12    
13     Default is 'flush'.
14    
15     diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
16     index b85dd80510b0..9af977384168 100644
17     --- a/Documentation/admin-guide/l1tf.rst
18     +++ b/Documentation/admin-guide/l1tf.rst
19     @@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
20    
21     off Disables hypervisor mitigations and doesn't emit any
22     warnings.
23     + It also drops the swap size and available RAM limit restrictions
24     + on both hypervisor and bare metal.
25     +
26     ============ =============================================================
27    
28     The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
29     @@ -576,7 +579,8 @@ Default mitigations
30     The kernel default mitigations for vulnerable processors are:
31    
32     - PTE inversion to protect against malicious user space. This is done
33     - unconditionally and cannot be controlled.
34     + unconditionally and cannot be controlled. The swap storage is limited
35     + to ~16TB.
36    
37     - L1D conditional flushing on VMENTER when EPT is enabled for
38     a guest.
39     diff --git a/Makefile b/Makefile
40     index 7a2a9a175756..84d2f8deea30 100644
41     --- a/Makefile
42     +++ b/Makefile
43     @@ -1,7 +1,7 @@
44     # SPDX-License-Identifier: GPL-2.0
45     VERSION = 4
46     PATCHLEVEL = 20
47     -SUBLEVEL = 0
48     +SUBLEVEL = 1
49     EXTRAVERSION =
50     NAME = Shy Crocodile
51    
52     diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
53     index 6dd783557330..dadb494d83fd 100644
54     --- a/arch/arc/Kconfig
55     +++ b/arch/arc/Kconfig
56     @@ -26,6 +26,7 @@ config ARC
57     select GENERIC_IRQ_SHOW
58     select GENERIC_PCI_IOMAP
59     select GENERIC_PENDING_IRQ if SMP
60     + select GENERIC_SCHED_CLOCK
61     select GENERIC_SMP_IDLE_THREAD
62     select HAVE_ARCH_KGDB
63     select HAVE_ARCH_TRACEHOOK
64     diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
65     index 03611d50c5a9..e84544b220b9 100644
66     --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
67     +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
68     @@ -26,8 +26,7 @@
69     "Speakers", "SPKL",
70     "Speakers", "SPKR";
71    
72     - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
73     - <&clock CLK_MOUT_EPLL>,
74     + assigned-clocks = <&clock CLK_MOUT_EPLL>,
75     <&clock CLK_MOUT_MAU_EPLL>,
76     <&clock CLK_MOUT_USER_MAU_EPLL>,
77     <&clock_audss EXYNOS_MOUT_AUDSS>,
78     @@ -36,15 +35,13 @@
79     <&clock_audss EXYNOS_DOUT_AUD_BUS>,
80     <&clock_audss EXYNOS_DOUT_I2S>;
81    
82     - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
83     - <&clock CLK_FOUT_EPLL>,
84     + assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
85     <&clock CLK_MOUT_EPLL>,
86     <&clock CLK_MOUT_MAU_EPLL>,
87     <&clock CLK_MAU_EPLL>,
88     <&clock_audss EXYNOS_MOUT_AUDSS>;
89    
90     assigned-clock-rates = <0>,
91     - <0>,
92     <0>,
93     <0>,
94     <0>,
95     @@ -84,4 +81,6 @@
96    
97     &i2s0 {
98     status = "okay";
99     + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
100     + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
101     };
102     diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
103     index 4a30cc849b00..122174ea9e0a 100644
104     --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
105     +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
106     @@ -33,8 +33,7 @@
107     compatible = "samsung,odroid-xu3-audio";
108     model = "Odroid-XU4";
109    
110     - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
111     - <&clock CLK_MOUT_EPLL>,
112     + assigned-clocks = <&clock CLK_MOUT_EPLL>,
113     <&clock CLK_MOUT_MAU_EPLL>,
114     <&clock CLK_MOUT_USER_MAU_EPLL>,
115     <&clock_audss EXYNOS_MOUT_AUDSS>,
116     @@ -43,15 +42,13 @@
117     <&clock_audss EXYNOS_DOUT_AUD_BUS>,
118     <&clock_audss EXYNOS_DOUT_I2S>;
119    
120     - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
121     - <&clock CLK_FOUT_EPLL>,
122     + assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
123     <&clock CLK_MOUT_EPLL>,
124     <&clock CLK_MOUT_MAU_EPLL>,
125     <&clock CLK_MAU_EPLL>,
126     <&clock_audss EXYNOS_MOUT_AUDSS>;
127    
128     assigned-clock-rates = <0>,
129     - <0>,
130     <0>,
131     <0>,
132     <0>,
133     @@ -79,6 +76,8 @@
134    
135     &i2s0 {
136     status = "okay";
137     + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
138     + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
139     };
140    
141     &pwm {
142     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
143     index 6f602af5263c..2dafd936d84d 100644
144     --- a/arch/arm64/include/asm/kvm_arm.h
145     +++ b/arch/arm64/include/asm/kvm_arm.h
146     @@ -104,7 +104,7 @@
147     TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
148    
149     /* VTCR_EL2 Registers bits */
150     -#define VTCR_EL2_RES1 (1 << 31)
151     +#define VTCR_EL2_RES1 (1U << 31)
152     #define VTCR_EL2_HD (1 << 22)
153     #define VTCR_EL2_HA (1 << 21)
154     #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
155     diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
156     index b13ca091f833..85d5c1026204 100644
157     --- a/arch/arm64/include/asm/unistd.h
158     +++ b/arch/arm64/include/asm/unistd.h
159     @@ -40,8 +40,9 @@
160     * The following SVCs are ARM private.
161     */
162     #define __ARM_NR_COMPAT_BASE 0x0f0000
163     -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
164     -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
165     +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
166     +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
167     +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
168    
169     #define __NR_compat_syscalls 399
170     #endif
171     diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
172     index 32653d156747..bc348ab3dd6b 100644
173     --- a/arch/arm64/kernel/sys_compat.c
174     +++ b/arch/arm64/kernel/sys_compat.c
175     @@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
176     /*
177     * Handle all unrecognised system calls.
178     */
179     -long compat_arm_syscall(struct pt_regs *regs)
180     +long compat_arm_syscall(struct pt_regs *regs, int scno)
181     {
182     - unsigned int no = regs->regs[7];
183     void __user *addr;
184    
185     - switch (no) {
186     + switch (scno) {
187     /*
188     * Flush a region from virtual address 'r0' to virtual address 'r1'
189     * _exclusive_. There is no alignment requirement on either address;
190     @@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs)
191    
192     default:
193     /*
194     - * Calls 9f00xx..9f07ff are defined to return -ENOSYS
195     + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
196     * if not implemented, rather than raising SIGILL. This
197     * way the calling program can gracefully determine whether
198     * a feature is supported.
199     */
200     - if ((no & 0xffff) <= 0x7ff)
201     + if (scno < __ARM_NR_COMPAT_END)
202     return -ENOSYS;
203     break;
204     }
205     @@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
206     (compat_thumb_mode(regs) ? 2 : 4);
207    
208     arm64_notify_die("Oops - bad compat syscall(2)", regs,
209     - SIGILL, ILL_ILLTRP, addr, no);
210     + SIGILL, ILL_ILLTRP, addr, scno);
211     return 0;
212     }
213     diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
214     index 032d22312881..5610ac01c1ec 100644
215     --- a/arch/arm64/kernel/syscall.c
216     +++ b/arch/arm64/kernel/syscall.c
217     @@ -13,16 +13,15 @@
218     #include <asm/thread_info.h>
219     #include <asm/unistd.h>
220    
221     -long compat_arm_syscall(struct pt_regs *regs);
222     -
223     +long compat_arm_syscall(struct pt_regs *regs, int scno);
224     long sys_ni_syscall(void);
225    
226     -asmlinkage long do_ni_syscall(struct pt_regs *regs)
227     +static long do_ni_syscall(struct pt_regs *regs, int scno)
228     {
229     #ifdef CONFIG_COMPAT
230     long ret;
231     if (is_compat_task()) {
232     - ret = compat_arm_syscall(regs);
233     + ret = compat_arm_syscall(regs, scno);
234     if (ret != -ENOSYS)
235     return ret;
236     }
237     @@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
238     syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
239     ret = __invoke_syscall(regs, syscall_fn);
240     } else {
241     - ret = do_ni_syscall(regs);
242     + ret = do_ni_syscall(regs, scno);
243     }
244    
245     regs->regs[0] = ret;
246     diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
247     index 4dbd9c69a96d..7fcc9c1a5f45 100644
248     --- a/arch/arm64/kvm/hyp/tlb.c
249     +++ b/arch/arm64/kvm/hyp/tlb.c
250     @@ -15,14 +15,19 @@
251     * along with this program. If not, see <http://www.gnu.org/licenses/>.
252     */
253    
254     +#include <linux/irqflags.h>
255     +
256     #include <asm/kvm_hyp.h>
257     #include <asm/kvm_mmu.h>
258     #include <asm/tlbflush.h>
259    
260     -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
261     +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
262     + unsigned long *flags)
263     {
264     u64 val;
265    
266     + local_irq_save(*flags);
267     +
268     /*
269     * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
270     * most TLB operations target EL2/EL0. In order to affect the
271     @@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
272     isb();
273     }
274    
275     -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
276     +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
277     + unsigned long *flags)
278     {
279     __load_guest_stage2(kvm);
280     isb();
281     @@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
282     __tlb_switch_to_guest_vhe,
283     ARM64_HAS_VIRT_HOST_EXTN);
284    
285     -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
286     +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
287     + unsigned long flags)
288     {
289     /*
290     * We're done with the TLB operation, let's restore the host's
291     @@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
292     */
293     write_sysreg(0, vttbr_el2);
294     write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
295     + isb();
296     + local_irq_restore(flags);
297     }
298    
299     -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
300     +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
301     + unsigned long flags)
302     {
303     write_sysreg(0, vttbr_el2);
304     }
305     @@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
306    
307     void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
308     {
309     + unsigned long flags;
310     +
311     dsb(ishst);
312    
313     /* Switch to requested VMID */
314     kvm = kern_hyp_va(kvm);
315     - __tlb_switch_to_guest()(kvm);
316     + __tlb_switch_to_guest()(kvm, &flags);
317    
318     /*
319     * We could do so much better if we had the VA as well.
320     @@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
321     if (!has_vhe() && icache_is_vpipt())
322     __flush_icache_all();
323    
324     - __tlb_switch_to_host()(kvm);
325     + __tlb_switch_to_host()(kvm, flags);
326     }
327    
328     void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
329     {
330     + unsigned long flags;
331     +
332     dsb(ishst);
333    
334     /* Switch to requested VMID */
335     kvm = kern_hyp_va(kvm);
336     - __tlb_switch_to_guest()(kvm);
337     + __tlb_switch_to_guest()(kvm, &flags);
338    
339     __tlbi(vmalls12e1is);
340     dsb(ish);
341     isb();
342    
343     - __tlb_switch_to_host()(kvm);
344     + __tlb_switch_to_host()(kvm, flags);
345     }
346    
347     void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
348     {
349     struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
350     + unsigned long flags;
351    
352     /* Switch to requested VMID */
353     - __tlb_switch_to_guest()(kvm);
354     + __tlb_switch_to_guest()(kvm, &flags);
355    
356     __tlbi(vmalle1);
357     dsb(nsh);
358     isb();
359    
360     - __tlb_switch_to_host()(kvm);
361     + __tlb_switch_to_host()(kvm, flags);
362     }
363    
364     void __hyp_text __kvm_flush_vm_context(void)
365     diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
366     index 37fe58c19a90..542c3ede9722 100644
367     --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
368     +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
369     @@ -13,6 +13,7 @@
370     #include <stdint.h>
371     #include <stdio.h>
372     #include <stdlib.h>
373     +#include "../../../../include/linux/sizes.h"
374    
375     int main(int argc, char *argv[])
376     {
377     @@ -45,11 +46,11 @@ int main(int argc, char *argv[])
378     vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
379    
380     /*
381     - * Align with 16 bytes: "greater than that used for any standard data
382     - * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
383     + * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
384     + * which may be as large as 64KB depending on the kernel configuration.
385     */
386    
387     - vmlinuz_load_addr += (16 - vmlinux_size % 16);
388     + vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
389    
390     printf("0x%llx\n", vmlinuz_load_addr);
391    
392     diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
393     index 6c79e8a16a26..3ddbb98dff84 100644
394     --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
395     +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
396     @@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
397     case 3:
398     return CVMX_HELPER_INTERFACE_MODE_LOOP;
399     case 4:
400     - return CVMX_HELPER_INTERFACE_MODE_RGMII;
401     + /* TODO: Implement support for AGL (RGMII). */
402     + return CVMX_HELPER_INTERFACE_MODE_DISABLED;
403     default:
404     return CVMX_HELPER_INTERFACE_MODE_DISABLED;
405     }
406     diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
407     index d4ea7a5b60cf..9e805317847d 100644
408     --- a/arch/mips/include/asm/atomic.h
409     +++ b/arch/mips/include/asm/atomic.h
410     @@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
411     { \
412     long result; \
413     \
414     - if (kernel_uses_llsc && R10000_LLSC_WAR) { \
415     + if (kernel_uses_llsc) { \
416     long temp; \
417     \
418     __asm__ __volatile__( \
419     diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
420     index a41059d47d31..ed7ffe4e63a3 100644
421     --- a/arch/mips/include/asm/cpu-info.h
422     +++ b/arch/mips/include/asm/cpu-info.h
423     @@ -50,7 +50,7 @@ struct guest_info {
424     #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
425    
426     struct cpuinfo_mips {
427     - unsigned long asid_cache;
428     + u64 asid_cache;
429     #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
430     unsigned long asid_mask;
431     #endif
432     diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
433     index c9f7e231e66b..59c8b11c090e 100644
434     --- a/arch/mips/include/asm/mach-loongson64/mmzone.h
435     +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
436     @@ -21,6 +21,7 @@
437     #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
438    
439     #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
440     +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
441    
442     #define LEVELS_PER_SLICE 128
443    
444     diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
445     index 0740be7d5d4a..24d6b42345fb 100644
446     --- a/arch/mips/include/asm/mmu.h
447     +++ b/arch/mips/include/asm/mmu.h
448     @@ -7,7 +7,7 @@
449     #include <linux/wait.h>
450    
451     typedef struct {
452     - unsigned long asid[NR_CPUS];
453     + u64 asid[NR_CPUS];
454     void *vdso;
455     atomic_t fp_mode_switching;
456    
457     diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
458     index 94414561de0e..a589585be21b 100644
459     --- a/arch/mips/include/asm/mmu_context.h
460     +++ b/arch/mips/include/asm/mmu_context.h
461     @@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
462     * All unused by hardware upper bits will be considered
463     * as a software asid extension.
464     */
465     -static unsigned long asid_version_mask(unsigned int cpu)
466     +static inline u64 asid_version_mask(unsigned int cpu)
467     {
468     unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
469    
470     - return ~(asid_mask | (asid_mask - 1));
471     + return ~(u64)(asid_mask | (asid_mask - 1));
472     }
473    
474     -static unsigned long asid_first_version(unsigned int cpu)
475     +static inline u64 asid_first_version(unsigned int cpu)
476     {
477     return ~asid_version_mask(cpu) + 1;
478     }
479     @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
480     static inline void
481     get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
482     {
483     - unsigned long asid = asid_cache(cpu);
484     + u64 asid = asid_cache(cpu);
485    
486     if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
487     if (cpu_has_vtag_icache)
488     flush_icache_all();
489     local_flush_tlb_all(); /* start new asid cycle */
490     - if (!asid) /* fix version if needed */
491     - asid = asid_first_version(cpu);
492     }
493    
494     cpu_context(cpu, mm) = asid_cache(cpu) = asid;
495     diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
496     index f085fba41da5..b826b8473e95 100644
497     --- a/arch/mips/include/asm/mmzone.h
498     +++ b/arch/mips/include/asm/mmzone.h
499     @@ -7,7 +7,18 @@
500     #define _ASM_MMZONE_H_
501    
502     #include <asm/page.h>
503     -#include <mmzone.h>
504     +
505     +#ifdef CONFIG_NEED_MULTIPLE_NODES
506     +# include <mmzone.h>
507     +#endif
508     +
509     +#ifndef pa_to_nid
510     +#define pa_to_nid(addr) 0
511     +#endif
512     +
513     +#ifndef nid_to_addrbase
514     +#define nid_to_addrbase(nid) 0
515     +#endif
516    
517     #ifdef CONFIG_DISCONTIGMEM
518    
519     diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
520     index 0036ea0c7173..93a9dce31f25 100644
521     --- a/arch/mips/include/asm/pgtable-64.h
522     +++ b/arch/mips/include/asm/pgtable-64.h
523     @@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
524    
525     static inline int pmd_present(pmd_t pmd)
526     {
527     +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
528     + if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
529     + return pmd_val(pmd) & _PAGE_PRESENT;
530     +#endif
531     +
532     return pmd_val(pmd) != (unsigned long) invalid_pte_table;
533     }
534    
535     diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
536     index d19b2d65336b..7f4a32d3345a 100644
537     --- a/arch/mips/include/asm/r4kcache.h
538     +++ b/arch/mips/include/asm/r4kcache.h
539     @@ -20,6 +20,7 @@
540     #include <asm/cpu-features.h>
541     #include <asm/cpu-type.h>
542     #include <asm/mipsmtregs.h>
543     +#include <asm/mmzone.h>
544     #include <linux/uaccess.h> /* for uaccess_kernel() */
545    
546     extern void (*r4k_blast_dcache)(void);
547     @@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
548     __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
549     __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
550    
551     +/* Currently, this is very specific to Loongson-3 */
552     +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
553     +static inline void blast_##pfx##cache##lsize##_node(long node) \
554     +{ \
555     + unsigned long start = CAC_BASE | nid_to_addrbase(node); \
556     + unsigned long end = start + current_cpu_data.desc.waysize; \
557     + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
558     + unsigned long ws_end = current_cpu_data.desc.ways << \
559     + current_cpu_data.desc.waybit; \
560     + unsigned long ws, addr; \
561     + \
562     + for (ws = 0; ws < ws_end; ws += ws_inc) \
563     + for (addr = start; addr < end; addr += lsize * 32) \
564     + cache##lsize##_unroll32(addr|ws, indexop); \
565     +}
566     +
567     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
568     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
569     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
570     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
571     +
572     #endif /* _ASM_R4KCACHE_H */
573     diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
574     index 48a9c6b90e07..9df3ebdc7b0f 100644
575     --- a/arch/mips/kernel/vdso.c
576     +++ b/arch/mips/kernel/vdso.c
577     @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
578    
579     /* Map delay slot emulation page */
580     base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
581     - VM_READ|VM_WRITE|VM_EXEC|
582     - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
583     + VM_READ | VM_EXEC |
584     + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
585     0, NULL);
586     if (IS_ERR_VALUE(base)) {
587     ret = base;
588     diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
589     index 5450f4d1c920..e2d46cb93ca9 100644
590     --- a/arch/mips/math-emu/dsemul.c
591     +++ b/arch/mips/math-emu/dsemul.c
592     @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
593     {
594     int isa16 = get_isa16_mode(regs->cp0_epc);
595     mips_instruction break_math;
596     - struct emuframe __user *fr;
597     - int err, fr_idx;
598     + unsigned long fr_uaddr;
599     + struct emuframe fr;
600     + int fr_idx, ret;
601    
602     /* NOP is easy */
603     if (ir == 0)
604     @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
605     fr_idx = alloc_emuframe();
606     if (fr_idx == BD_EMUFRAME_NONE)
607     return SIGBUS;
608     - fr = &dsemul_page()[fr_idx];
609    
610     /* Retrieve the appropriately encoded break instruction */
611     break_math = BREAK_MATH(isa16);
612    
613     /* Write the instructions to the frame */
614     if (isa16) {
615     - err = __put_user(ir >> 16,
616     - (u16 __user *)(&fr->emul));
617     - err |= __put_user(ir & 0xffff,
618     - (u16 __user *)((long)(&fr->emul) + 2));
619     - err |= __put_user(break_math >> 16,
620     - (u16 __user *)(&fr->badinst));
621     - err |= __put_user(break_math & 0xffff,
622     - (u16 __user *)((long)(&fr->badinst) + 2));
623     + union mips_instruction _emul = {
624     + .halfword = { ir >> 16, ir }
625     + };
626     + union mips_instruction _badinst = {
627     + .halfword = { break_math >> 16, break_math }
628     + };
629     +
630     + fr.emul = _emul.word;
631     + fr.badinst = _badinst.word;
632     } else {
633     - err = __put_user(ir, &fr->emul);
634     - err |= __put_user(break_math, &fr->badinst);
635     + fr.emul = ir;
636     + fr.badinst = break_math;
637     }
638    
639     - if (unlikely(err)) {
640     + /* Write the frame to user memory */
641     + fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
642     + ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
643     + FOLL_FORCE | FOLL_WRITE);
644     + if (unlikely(ret != sizeof(fr))) {
645     MIPS_FPU_EMU_INC_STATS(errors);
646     free_emuframe(fr_idx, current->mm);
647     return SIGBUS;
648     @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
649     atomic_set(&current->thread.bd_emu_frame, fr_idx);
650    
651     /* Change user register context to execute the frame */
652     - regs->cp0_epc = (unsigned long)&fr->emul | isa16;
653     -
654     - /* Ensure the icache observes our newly written frame */
655     - flush_cache_sigtramp((unsigned long)&fr->emul);
656     + regs->cp0_epc = fr_uaddr | isa16;
657    
658     return 0;
659     }
660     diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
661     index 3466fcdae0ca..01848cdf2074 100644
662     --- a/arch/mips/mm/c-r3k.c
663     +++ b/arch/mips/mm/c-r3k.c
664     @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
665     pmd_t *pmdp;
666     pte_t *ptep;
667    
668     - pr_debug("cpage[%08lx,%08lx]\n",
669     + pr_debug("cpage[%08llx,%08lx]\n",
670     cpu_context(smp_processor_id(), mm), addr);
671    
672     /* No ASID => no such page in the cache. */
673     diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
674     index 05bd77727fb9..2a6ad461286f 100644
675     --- a/arch/mips/mm/c-r4k.c
676     +++ b/arch/mips/mm/c-r4k.c
677     @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
678     r4k_blast_scache = blast_scache128;
679     }
680    
681     +static void (*r4k_blast_scache_node)(long node);
682     +
683     +static void r4k_blast_scache_node_setup(void)
684     +{
685     + unsigned long sc_lsize = cpu_scache_line_size();
686     +
687     + if (current_cpu_type() != CPU_LOONGSON3)
688     + r4k_blast_scache_node = (void *)cache_noop;
689     + else if (sc_lsize == 16)
690     + r4k_blast_scache_node = blast_scache16_node;
691     + else if (sc_lsize == 32)
692     + r4k_blast_scache_node = blast_scache32_node;
693     + else if (sc_lsize == 64)
694     + r4k_blast_scache_node = blast_scache64_node;
695     + else if (sc_lsize == 128)
696     + r4k_blast_scache_node = blast_scache128_node;
697     +}
698     +
699     static inline void local_r4k___flush_cache_all(void * args)
700     {
701     switch (current_cpu_type()) {
702     case CPU_LOONGSON2:
703     - case CPU_LOONGSON3:
704     case CPU_R4000SC:
705     case CPU_R4000MC:
706     case CPU_R4400SC:
707     @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
708     r4k_blast_scache();
709     break;
710    
711     + case CPU_LOONGSON3:
712     + /* Use get_ebase_cpunum() for both NUMA=y/n */
713     + r4k_blast_scache_node(get_ebase_cpunum() >> 2);
714     + break;
715     +
716     case CPU_BMIPS5000:
717     r4k_blast_scache();
718     __sync();
719     @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
720    
721     preempt_disable();
722     if (cpu_has_inclusive_pcaches) {
723     - if (size >= scache_size)
724     - r4k_blast_scache();
725     - else
726     + if (size >= scache_size) {
727     + if (current_cpu_type() != CPU_LOONGSON3)
728     + r4k_blast_scache();
729     + else
730     + r4k_blast_scache_node(pa_to_nid(addr));
731     + } else {
732     blast_scache_range(addr, addr + size);
733     + }
734     preempt_enable();
735     __sync();
736     return;
737     @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
738    
739     preempt_disable();
740     if (cpu_has_inclusive_pcaches) {
741     - if (size >= scache_size)
742     - r4k_blast_scache();
743     - else {
744     + if (size >= scache_size) {
745     + if (current_cpu_type() != CPU_LOONGSON3)
746     + r4k_blast_scache();
747     + else
748     + r4k_blast_scache_node(pa_to_nid(addr));
749     + } else {
750     /*
751     * There is no clearly documented alignment requirement
752     * for the cache instruction on MIPS processors and
753     @@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
754     r4k_blast_scache_page_setup();
755     r4k_blast_scache_page_indexed_setup();
756     r4k_blast_scache_setup();
757     + r4k_blast_scache_node_setup();
758     #ifdef CONFIG_EVA
759     r4k_blast_dcache_user_page_setup();
760     r4k_blast_icache_user_page_setup();
761     diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
762     index 2d7cffcaa476..059187a3ded7 100644
763     --- a/arch/parisc/mm/init.c
764     +++ b/arch/parisc/mm/init.c
765     @@ -512,8 +512,8 @@ static void __init map_pages(unsigned long start_vaddr,
766    
767     void __init set_kernel_text_rw(int enable_read_write)
768     {
769     - unsigned long start = (unsigned long)__init_begin;
770     - unsigned long end = (unsigned long)_etext;
771     + unsigned long start = (unsigned long) _text;
772     + unsigned long end = (unsigned long) &data_start;
773    
774     map_pages(start, __pa(start), end-start,
775     PAGE_KERNEL_RWX, enable_read_write ? 1:0);
776     diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
777     index f6f469fc4073..1b395b85132b 100644
778     --- a/arch/powerpc/kernel/security.c
779     +++ b/arch/powerpc/kernel/security.c
780     @@ -22,7 +22,7 @@ enum count_cache_flush_type {
781     COUNT_CACHE_FLUSH_SW = 0x2,
782     COUNT_CACHE_FLUSH_HW = 0x4,
783     };
784     -static enum count_cache_flush_type count_cache_flush_type;
785     +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
786    
787     bool barrier_nospec_enabled;
788     static bool no_nospec;
789     diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
790     index e6474a45cef5..6327fd79b0fb 100644
791     --- a/arch/powerpc/kernel/signal_32.c
792     +++ b/arch/powerpc/kernel/signal_32.c
793     @@ -1140,11 +1140,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
794     {
795     struct rt_sigframe __user *rt_sf;
796     struct pt_regs *regs = current_pt_regs();
797     + int tm_restore = 0;
798     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
799     struct ucontext __user *uc_transact;
800     unsigned long msr_hi;
801     unsigned long tmp;
802     - int tm_restore = 0;
803     #endif
804     /* Always make any pending restarted system calls return -EINTR */
805     current->restart_block.fn = do_no_restart_syscall;
806     @@ -1192,11 +1192,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
807     goto bad;
808     }
809     }
810     - if (!tm_restore)
811     - /* Fall through, for non-TM restore */
812     + if (!tm_restore) {
813     + /*
814     + * Unset regs->msr because ucontext MSR TS is not
815     + * set, and recheckpoint was not called. This avoid
816     + * hitting a TM Bad thing at RFID
817     + */
818     + regs->msr &= ~MSR_TS_MASK;
819     + }
820     + /* Fall through, for non-TM restore */
821     #endif
822     - if (do_setcontext(&rt_sf->uc, regs, 1))
823     - goto bad;
824     + if (!tm_restore)
825     + if (do_setcontext(&rt_sf->uc, regs, 1))
826     + goto bad;
827    
828     /*
829     * It's not clear whether or why it is desirable to save the
830     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
831     index 83d51bf586c7..daa28cb72272 100644
832     --- a/arch/powerpc/kernel/signal_64.c
833     +++ b/arch/powerpc/kernel/signal_64.c
834     @@ -740,11 +740,23 @@ SYSCALL_DEFINE0(rt_sigreturn)
835     &uc_transact->uc_mcontext))
836     goto badframe;
837     }
838     - else
839     - /* Fall through, for non-TM restore */
840     #endif
841     - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
842     - goto badframe;
843     + /* Fall through, for non-TM restore */
844     + if (!MSR_TM_ACTIVE(msr)) {
845     + /*
846     + * Unset MSR[TS] on the thread regs since MSR from user
847     + * context does not have MSR active, and recheckpoint was
848     + * not called since restore_tm_sigcontexts() was not called
849     + * also.
850     + *
851     + * If not unsetting it, the code can RFID to userspace with
852     + * MSR[TS] set, but without CPU in the proper state,
853     + * causing a TM bad thing.
854     + */
855     + current->thread.regs->msr &= ~MSR_TS_MASK;
856     + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
857     + goto badframe;
858     + }
859    
860     if (restore_altstack(&uc->uc_stack))
861     goto badframe;
862     diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
863     index c615617e78ac..a18afda3d0f0 100644
864     --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
865     +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
866     @@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
867     srcu_idx = srcu_read_lock(&kvm->srcu);
868     slots = kvm_memslots(kvm);
869     kvm_for_each_memslot(memslot, slots) {
870     + /* Mutual exclusion with kvm_unmap_hva_range etc. */
871     + spin_lock(&kvm->mmu_lock);
872     /*
873     * This assumes it is acceptable to lose reference and
874     * change bits across a reset.
875     */
876     memset(memslot->arch.rmap, 0,
877     memslot->npages * sizeof(*memslot->arch.rmap));
878     + spin_unlock(&kvm->mmu_lock);
879     }
880     srcu_read_unlock(&kvm->srcu, srcu_idx);
881     }
882     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
883     index a56f8413758a..ab43306c4ea1 100644
884     --- a/arch/powerpc/kvm/book3s_hv.c
885     +++ b/arch/powerpc/kvm/book3s_hv.c
886     @@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
887     {
888     if (nesting_enabled(kvm))
889     kvmhv_release_all_nested(kvm);
890     + kvmppc_rmap_reset(kvm);
891     + kvm->arch.process_table = 0;
892     + /* Mutual exclusion with kvm_unmap_hva_range etc. */
893     + spin_lock(&kvm->mmu_lock);
894     + kvm->arch.radix = 0;
895     + spin_unlock(&kvm->mmu_lock);
896     kvmppc_free_radix(kvm);
897     kvmppc_update_lpcr(kvm, LPCR_VPM1,
898     LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
899     - kvmppc_rmap_reset(kvm);
900     - kvm->arch.radix = 0;
901     - kvm->arch.process_table = 0;
902     return 0;
903     }
904    
905     @@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
906     err = kvmppc_init_vm_radix(kvm);
907     if (err)
908     return err;
909     -
910     + kvmppc_rmap_reset(kvm);
911     + /* Mutual exclusion with kvm_unmap_hva_range etc. */
912     + spin_lock(&kvm->mmu_lock);
913     + kvm->arch.radix = 1;
914     + spin_unlock(&kvm->mmu_lock);
915     kvmppc_free_hpt(&kvm->arch.hpt);
916     kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
917     LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
918     - kvmppc_rmap_reset(kvm);
919     - kvm->arch.radix = 1;
920     return 0;
921     }
922    
923     diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
924     index 19b2d2a9b43d..eeb7450db18c 100644
925     --- a/arch/s390/pci/pci_clp.c
926     +++ b/arch/s390/pci/pci_clp.c
927     @@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
928     struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
929     int rc;
930    
931     - rrb = clp_alloc_block(GFP_KERNEL);
932     + rrb = clp_alloc_block(GFP_ATOMIC);
933     if (!rrb)
934     return -ENOMEM;
935    
936     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
937     index fbda5a917c5b..e5c0174e330e 100644
938     --- a/arch/x86/include/asm/kvm_host.h
939     +++ b/arch/x86/include/asm/kvm_host.h
940     @@ -1492,7 +1492,7 @@ asmlinkage void kvm_spurious_fault(void);
941     "cmpb $0, kvm_rebooting \n\t" \
942     "jne 668b \n\t" \
943     __ASM_SIZE(push) " $666b \n\t" \
944     - "call kvm_spurious_fault \n\t" \
945     + "jmp kvm_spurious_fault \n\t" \
946     ".popsection \n\t" \
947     _ASM_EXTABLE(666b, 667b)
948    
949     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
950     index 500278f5308e..362f3cde6a31 100644
951     --- a/arch/x86/kernel/cpu/bugs.c
952     +++ b/arch/x86/kernel/cpu/bugs.c
953     @@ -1002,7 +1002,8 @@ static void __init l1tf_select_mitigation(void)
954     #endif
955    
956     half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
957     - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
958     + if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
959     + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
960     pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
961     pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
962     half_pa);
963     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
964     index 8d5d984541be..95784bc4a53c 100644
965     --- a/arch/x86/kvm/vmx.c
966     +++ b/arch/x86/kvm/vmx.c
967     @@ -8031,13 +8031,16 @@ static __init int hardware_setup(void)
968    
969     kvm_mce_cap_supported |= MCG_LMCE_P;
970    
971     - return alloc_kvm_area();
972     + r = alloc_kvm_area();
973     + if (r)
974     + goto out;
975     + return 0;
976    
977     out:
978     for (i = 0; i < VMX_BITMAP_NR; i++)
979     free_page((unsigned long)vmx_bitmap[i]);
980    
981     - return r;
982     + return r;
983     }
984    
985     static __exit void hardware_unsetup(void)
986     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
987     index ef99f3892e1f..427a955a2cf2 100644
988     --- a/arch/x86/mm/init.c
989     +++ b/arch/x86/mm/init.c
990     @@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
991    
992     pages = generic_max_swapfile_size();
993    
994     - if (boot_cpu_has_bug(X86_BUG_L1TF)) {
995     + if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
996     /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
997     unsigned long long l1tf_limit = l1tf_pfn_limit();
998     /*
999     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1000     index 5fab264948c2..de95db8ac52f 100644
1001     --- a/arch/x86/mm/init_64.c
1002     +++ b/arch/x86/mm/init_64.c
1003     @@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
1004     paddr_end,
1005     page_size_mask,
1006     prot);
1007     - __flush_tlb_all();
1008     continue;
1009     }
1010     /*
1011     @@ -627,7 +626,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
1012     pud_populate(&init_mm, pud, pmd);
1013     spin_unlock(&init_mm.page_table_lock);
1014     }
1015     - __flush_tlb_all();
1016    
1017     update_page_count(PG_LEVEL_1G, pages);
1018    
1019     @@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
1020     paddr_last = phys_pud_init(pud, paddr,
1021     paddr_end,
1022     page_size_mask);
1023     - __flush_tlb_all();
1024     continue;
1025     }
1026    
1027     @@ -680,7 +677,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
1028     p4d_populate(&init_mm, p4d, pud);
1029     spin_unlock(&init_mm.page_table_lock);
1030     }
1031     - __flush_tlb_all();
1032    
1033     return paddr_last;
1034     }
1035     @@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
1036     if (pgd_changed)
1037     sync_global_pgds(vaddr_start, vaddr_end - 1);
1038    
1039     - __flush_tlb_all();
1040     -
1041     return paddr_last;
1042     }
1043    
1044     diff --git a/crypto/cfb.c b/crypto/cfb.c
1045     index 20987d0e09d8..e81e45673498 100644
1046     --- a/crypto/cfb.c
1047     +++ b/crypto/cfb.c
1048     @@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
1049    
1050     do {
1051     crypto_cfb_encrypt_one(tfm, iv, dst);
1052     - crypto_xor(dst, iv, bsize);
1053     + crypto_xor(dst, src, bsize);
1054     iv = src;
1055    
1056     src += bsize;
1057     diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
1058     index c20c9f5c18f2..1026173d721a 100644
1059     --- a/crypto/tcrypt.c
1060     +++ b/crypto/tcrypt.c
1061     @@ -1736,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1062     ret += tcrypt_test("ctr(aes)");
1063     ret += tcrypt_test("rfc3686(ctr(aes))");
1064     ret += tcrypt_test("ofb(aes)");
1065     + ret += tcrypt_test("cfb(aes)");
1066     break;
1067    
1068     case 11:
1069     @@ -2060,6 +2061,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
1070     speed_template_16_24_32);
1071     test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
1072     speed_template_16_24_32);
1073     + test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
1074     + speed_template_16_24_32);
1075     + test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
1076     + speed_template_16_24_32);
1077     break;
1078    
1079     case 201:
1080     diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1081     index b1f79c6bf409..84937ceb4bd8 100644
1082     --- a/crypto/testmgr.c
1083     +++ b/crypto/testmgr.c
1084     @@ -2690,6 +2690,13 @@ static const struct alg_test_desc alg_test_descs[] = {
1085     .dec = __VECS(aes_ccm_dec_tv_template)
1086     }
1087     }
1088     + }, {
1089     + .alg = "cfb(aes)",
1090     + .test = alg_test_skcipher,
1091     + .fips_allowed = 1,
1092     + .suite = {
1093     + .cipher = __VECS(aes_cfb_tv_template)
1094     + },
1095     }, {
1096     .alg = "chacha20",
1097     .test = alg_test_skcipher,
1098     diff --git a/crypto/testmgr.h b/crypto/testmgr.h
1099     index 1fe7b97ba03f..b5b0d29761ce 100644
1100     --- a/crypto/testmgr.h
1101     +++ b/crypto/testmgr.h
1102     @@ -11449,6 +11449,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
1103     },
1104     };
1105    
1106     +static const struct cipher_testvec aes_cfb_tv_template[] = {
1107     + { /* From NIST SP800-38A */
1108     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
1109     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
1110     + .klen = 16,
1111     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1112     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1113     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1114     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1115     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1116     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1117     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1118     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1119     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1120     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1121     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
1122     + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
1123     + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
1124     + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
1125     + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
1126     + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
1127     + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
1128     + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
1129     + .len = 64,
1130     + }, {
1131     + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
1132     + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
1133     + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
1134     + .klen = 24,
1135     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1136     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1137     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1138     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1139     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1140     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1141     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1142     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1143     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1144     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1145     + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
1146     + "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
1147     + "\x67\xce\x7f\x7f\x81\x17\x36\x21"
1148     + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
1149     + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
1150     + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
1151     + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
1152     + "\x42\xae\x8f\xba\x58\x4b\x09\xff",
1153     + .len = 64,
1154     + }, {
1155     + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
1156     + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
1157     + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
1158     + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
1159     + .klen = 32,
1160     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1161     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1162     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1163     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1164     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1165     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1166     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1167     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1168     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1169     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1170     + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
1171     + "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
1172     + "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
1173     + "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
1174     + "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
1175     + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
1176     + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
1177     + "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
1178     + .len = 64,
1179     + },
1180     +};
1181     +
1182     static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
1183     { /* Input data from RFC 2410 Case 1 */
1184     #ifdef __LITTLE_ENDIAN
1185     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1186     index 9f1000d2a40c..b834ee335d9a 100644
1187     --- a/drivers/android/binder.c
1188     +++ b/drivers/android/binder.c
1189     @@ -72,6 +72,7 @@
1190     #include <linux/spinlock.h>
1191     #include <linux/ratelimit.h>
1192     #include <linux/syscalls.h>
1193     +#include <linux/task_work.h>
1194    
1195     #include <uapi/linux/android/binder.h>
1196    
1197     @@ -2160,6 +2161,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
1198     return (fixup_offset >= last_min_offset);
1199     }
1200    
1201     +/**
1202     + * struct binder_task_work_cb - for deferred close
1203     + *
1204     + * @twork: callback_head for task work
1205     + * @fd: fd to close
1206     + *
1207     + * Structure to pass task work to be handled after
1208     + * returning from binder_ioctl() via task_work_add().
1209     + */
1210     +struct binder_task_work_cb {
1211     + struct callback_head twork;
1212     + struct file *file;
1213     +};
1214     +
1215     +/**
1216     + * binder_do_fd_close() - close list of file descriptors
1217     + * @twork: callback head for task work
1218     + *
1219     + * It is not safe to call ksys_close() during the binder_ioctl()
1220     + * function if there is a chance that binder's own file descriptor
1221     + * might be closed. This is to meet the requirements for using
1222     + * fdget() (see comments for __fget_light()). Therefore use
1223     + * task_work_add() to schedule the close operation once we have
1224     + * returned from binder_ioctl(). This function is a callback
1225     + * for that mechanism and does the actual ksys_close() on the
1226     + * given file descriptor.
1227     + */
1228     +static void binder_do_fd_close(struct callback_head *twork)
1229     +{
1230     + struct binder_task_work_cb *twcb = container_of(twork,
1231     + struct binder_task_work_cb, twork);
1232     +
1233     + fput(twcb->file);
1234     + kfree(twcb);
1235     +}
1236     +
1237     +/**
1238     + * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1239     + * @fd: file-descriptor to close
1240     + *
1241     + * See comments in binder_do_fd_close(). This function is used to schedule
1242     + * a file-descriptor to be closed after returning from binder_ioctl().
1243     + */
1244     +static void binder_deferred_fd_close(int fd)
1245     +{
1246     + struct binder_task_work_cb *twcb;
1247     +
1248     + twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1249     + if (!twcb)
1250     + return;
1251     + init_task_work(&twcb->twork, binder_do_fd_close);
1252     + __close_fd_get_file(fd, &twcb->file);
1253     + if (twcb->file)
1254     + task_work_add(current, &twcb->twork, true);
1255     + else
1256     + kfree(twcb);
1257     +}
1258     +
1259     static void binder_transaction_buffer_release(struct binder_proc *proc,
1260     struct binder_buffer *buffer,
1261     binder_size_t *failed_at)
1262     @@ -2299,7 +2358,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1263     }
1264     fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
1265     for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1266     - ksys_close(fd_array[fd_index]);
1267     + binder_deferred_fd_close(fd_array[fd_index]);
1268     } break;
1269     default:
1270     pr_err("transaction release %d bad object type %x\n",
1271     @@ -3912,7 +3971,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
1272     } else if (ret) {
1273     u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
1274    
1275     - ksys_close(*fdp);
1276     + binder_deferred_fd_close(*fdp);
1277     }
1278     list_del(&fixup->fixup_entry);
1279     kfree(fixup);
1280     diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
1281     index f39a920496fb..8da314b81eab 100644
1282     --- a/drivers/base/platform-msi.c
1283     +++ b/drivers/base/platform-msi.c
1284     @@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1285     unsigned int nvec)
1286     {
1287     struct platform_msi_priv_data *data = domain->host_data;
1288     - struct msi_desc *desc;
1289     - for_each_msi_entry(desc, data->dev) {
1290     + struct msi_desc *desc, *tmp;
1291     + for_each_msi_entry_safe(desc, tmp, data->dev) {
1292     if (WARN_ON(!desc->irq || desc->nvec_used != 1))
1293     return;
1294     if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
1295     continue;
1296    
1297     irq_domain_free_irqs_common(domain, desc->irq, 1);
1298     + list_del(&desc->list);
1299     + free_msi_entry(desc);
1300     }
1301     }
1302    
1303     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1304     index 129f640424b7..95db630dd722 100644
1305     --- a/drivers/char/tpm/tpm-interface.c
1306     +++ b/drivers/char/tpm/tpm-interface.c
1307     @@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1308    
1309     if (need_locality) {
1310     rc = tpm_request_locality(chip, flags);
1311     - if (rc < 0)
1312     - goto out_no_locality;
1313     + if (rc < 0) {
1314     + need_locality = false;
1315     + goto out_locality;
1316     + }
1317     }
1318    
1319     rc = tpm_cmd_ready(chip, flags);
1320     if (rc)
1321     - goto out;
1322     + goto out_locality;
1323    
1324     rc = tpm2_prepare_space(chip, space, ordinal, buf);
1325     if (rc)
1326     @@ -547,14 +549,13 @@ out_recv:
1327     dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
1328    
1329     out:
1330     - rc = tpm_go_idle(chip, flags);
1331     - if (rc)
1332     - goto out;
1333     + /* may fail but do not override previous error value in rc */
1334     + tpm_go_idle(chip, flags);
1335    
1336     +out_locality:
1337     if (need_locality)
1338     tpm_relinquish_locality(chip, flags);
1339    
1340     -out_no_locality:
1341     if (chip->ops->clk_enable != NULL)
1342     chip->ops->clk_enable(chip, false);
1343    
1344     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1345     index caa86b19c76d..f74f451baf6a 100644
1346     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1347     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1348     @@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1349     struct device *dev = chip->dev.parent;
1350     struct i2c_client *client = to_i2c_client(dev);
1351     u32 ordinal;
1352     + unsigned long duration;
1353     size_t count = 0;
1354     int burst_count, bytes2write, retries, rc = -EIO;
1355    
1356     @@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1357     return rc;
1358     }
1359     ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
1360     - rc = i2c_nuvoton_wait_for_data_avail(chip,
1361     - tpm_calc_ordinal_duration(chip,
1362     - ordinal),
1363     - &priv->read_queue);
1364     + if (chip->flags & TPM_CHIP_FLAG_TPM2)
1365     + duration = tpm2_calc_ordinal_duration(chip, ordinal);
1366     + else
1367     + duration = tpm_calc_ordinal_duration(chip, ordinal);
1368     +
1369     + rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
1370     if (rc) {
1371     dev_err(dev, "%s() timeout command duration\n", __func__);
1372     i2c_nuvoton_ready(chip);
1373     diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
1374     index fa25e35ce7d5..08b42b053fce 100644
1375     --- a/drivers/clk/rockchip/clk-rk3188.c
1376     +++ b/drivers/clk/rockchip/clk-rk3188.c
1377     @@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
1378     COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
1379     RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
1380     RK2928_CLKGATE_CON(0), 13, GFLAGS),
1381     - COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
1382     + COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
1383     RK2928_CLKSEL_CON(9), 0,
1384     RK2928_CLKGATE_CON(0), 14, GFLAGS,
1385     &common_spdif_fracmux),
1386     diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
1387     index 6fe3c14f7b2d..424d8635b053 100644
1388     --- a/drivers/clk/sunxi-ng/ccu_nm.c
1389     +++ b/drivers/clk/sunxi-ng/ccu_nm.c
1390     @@ -19,6 +19,17 @@ struct _ccu_nm {
1391     unsigned long m, min_m, max_m;
1392     };
1393    
1394     +static unsigned long ccu_nm_calc_rate(unsigned long parent,
1395     + unsigned long n, unsigned long m)
1396     +{
1397     + u64 rate = parent;
1398     +
1399     + rate *= n;
1400     + do_div(rate, m);
1401     +
1402     + return rate;
1403     +}
1404     +
1405     static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1406     struct _ccu_nm *nm)
1407     {
1408     @@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1409    
1410     for (_n = nm->min_n; _n <= nm->max_n; _n++) {
1411     for (_m = nm->min_m; _m <= nm->max_m; _m++) {
1412     - unsigned long tmp_rate = parent * _n / _m;
1413     + unsigned long tmp_rate = ccu_nm_calc_rate(parent,
1414     + _n, _m);
1415    
1416     if (tmp_rate > rate)
1417     continue;
1418     @@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
1419     if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
1420     rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
1421     else
1422     - rate = parent_rate * n / m;
1423     + rate = ccu_nm_calc_rate(parent_rate, n, m);
1424    
1425     if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1426     rate /= nm->fixed_post_div;
1427     @@ -149,7 +161,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
1428     _nm.max_m = nm->m.max ?: 1 << nm->m.width;
1429    
1430     ccu_nm_find_best(*parent_rate, rate, &_nm);
1431     - rate = *parent_rate * _nm.n / _nm.m;
1432     + rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
1433    
1434     if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1435     rate /= nm->fixed_post_div;
1436     diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
1437     index 55c77e44bb2d..d9c8a779dd7d 100644
1438     --- a/drivers/clocksource/Kconfig
1439     +++ b/drivers/clocksource/Kconfig
1440     @@ -290,6 +290,7 @@ config CLKSRC_MPS2
1441    
1442     config ARC_TIMERS
1443     bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
1444     + depends on GENERIC_SCHED_CLOCK
1445     select TIMER_OF
1446     help
1447     These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
1448     diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
1449     index 20da9b1d7f7d..b28970ca4a7a 100644
1450     --- a/drivers/clocksource/arc_timer.c
1451     +++ b/drivers/clocksource/arc_timer.c
1452     @@ -23,6 +23,7 @@
1453     #include <linux/cpu.h>
1454     #include <linux/of.h>
1455     #include <linux/of_irq.h>
1456     +#include <linux/sched_clock.h>
1457    
1458     #include <soc/arc/timers.h>
1459     #include <soc/arc/mcip.h>
1460     @@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
1461     return (((u64)h) << 32) | l;
1462     }
1463    
1464     +static notrace u64 arc_gfrc_clock_read(void)
1465     +{
1466     + return arc_read_gfrc(NULL);
1467     +}
1468     +
1469     static struct clocksource arc_counter_gfrc = {
1470     .name = "ARConnect GFRC",
1471     .rating = 400,
1472     @@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
1473     if (ret)
1474     return ret;
1475    
1476     + sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
1477     +
1478     return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
1479     }
1480     TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
1481     @@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
1482     return (((u64)h) << 32) | l;
1483     }
1484    
1485     +static notrace u64 arc_rtc_clock_read(void)
1486     +{
1487     + return arc_read_rtc(NULL);
1488     +}
1489     +
1490     static struct clocksource arc_counter_rtc = {
1491     .name = "ARCv2 RTC",
1492     .rating = 350,
1493     @@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
1494    
1495     write_aux_reg(AUX_RTC_CTRL, 1);
1496    
1497     + sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
1498     +
1499     return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
1500     }
1501     TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
1502     @@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
1503     return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
1504     }
1505    
1506     +static notrace u64 arc_timer1_clock_read(void)
1507     +{
1508     + return arc_read_timer1(NULL);
1509     +}
1510     +
1511     static struct clocksource arc_counter_timer1 = {
1512     .name = "ARC Timer1",
1513     .rating = 300,
1514     @@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
1515     write_aux_reg(ARC_REG_TIMER1_CNT, 0);
1516     write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
1517    
1518     + sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
1519     +
1520     return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
1521     }
1522    
1523     diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1524     index 2ae6124e5da6..5d54ebc20cb3 100644
1525     --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
1526     +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1527     @@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
1528     static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1529     {
1530     struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
1531     - void *fctx;
1532     + struct crypto_ctx_hdr *chdr;
1533    
1534     /* get the first device */
1535     nctx->ndev = nitrox_get_first_device();
1536     @@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1537     return -ENODEV;
1538    
1539     /* allocate nitrox crypto context */
1540     - fctx = crypto_alloc_context(nctx->ndev);
1541     - if (!fctx) {
1542     + chdr = crypto_alloc_context(nctx->ndev);
1543     + if (!chdr) {
1544     nitrox_put_device(nctx->ndev);
1545     return -ENOMEM;
1546     }
1547     - nctx->u.ctx_handle = (uintptr_t)fctx;
1548     + nctx->chdr = chdr;
1549     + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
1550     + sizeof(struct ctx_hdr));
1551     crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
1552     sizeof(struct nitrox_kcrypt_request));
1553     return 0;
1554     @@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
1555    
1556     memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
1557     memset(&fctx->auth, 0, sizeof(struct auth_keys));
1558     - crypto_free_context((void *)fctx);
1559     + crypto_free_context((void *)nctx->chdr);
1560     }
1561     nitrox_put_device(nctx->ndev);
1562    
1563     diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1564     index 2260efa42308..9138bae12521 100644
1565     --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
1566     +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1567     @@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
1568     void *crypto_alloc_context(struct nitrox_device *ndev)
1569     {
1570     struct ctx_hdr *ctx;
1571     + struct crypto_ctx_hdr *chdr;
1572     void *vaddr;
1573     dma_addr_t dma;
1574    
1575     + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
1576     + if (!chdr)
1577     + return NULL;
1578     +
1579     vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
1580     - if (!vaddr)
1581     + if (!vaddr) {
1582     + kfree(chdr);
1583     return NULL;
1584     + }
1585    
1586     /* fill meta data */
1587     ctx = vaddr;
1588     @@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1589     ctx->dma = dma;
1590     ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
1591    
1592     - return ((u8 *)vaddr + sizeof(struct ctx_hdr));
1593     + chdr->pool = ndev->ctx_pool;
1594     + chdr->dma = dma;
1595     + chdr->vaddr = vaddr;
1596     +
1597     + return chdr;
1598     }
1599    
1600     /**
1601     @@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1602     */
1603     void crypto_free_context(void *ctx)
1604     {
1605     - struct ctx_hdr *ctxp;
1606     + struct crypto_ctx_hdr *ctxp;
1607    
1608     if (!ctx)
1609     return;
1610    
1611     - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
1612     - dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
1613     + ctxp = ctx;
1614     + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
1615     + kfree(ctxp);
1616     }
1617    
1618     /**
1619     diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
1620     index d091b6f5f5dd..19f0a20e3bb3 100644
1621     --- a/drivers/crypto/cavium/nitrox/nitrox_req.h
1622     +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
1623     @@ -181,12 +181,19 @@ struct flexi_crypto_context {
1624     struct auth_keys auth;
1625     };
1626    
1627     +struct crypto_ctx_hdr {
1628     + struct dma_pool *pool;
1629     + dma_addr_t dma;
1630     + void *vaddr;
1631     +};
1632     +
1633     struct nitrox_crypto_ctx {
1634     struct nitrox_device *ndev;
1635     union {
1636     u64 ctx_handle;
1637     struct flexi_crypto_context *fctx;
1638     } u;
1639     + struct crypto_ctx_hdr *chdr;
1640     };
1641    
1642     struct nitrox_kcrypt_request {
1643     diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
1644     index 461b97e2f1fd..1ff8738631a3 100644
1645     --- a/drivers/crypto/chelsio/chcr_ipsec.c
1646     +++ b/drivers/crypto/chelsio/chcr_ipsec.c
1647     @@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1648    
1649     static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
1650     {
1651     - int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
1652     + int hdrlen;
1653     +
1654     + hdrlen = sizeof(struct fw_ulptx_wr) +
1655     + sizeof(struct chcr_ipsec_req) + kctx_len;
1656    
1657     hdrlen += sizeof(struct cpl_tx_pkt);
1658     if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
1659     diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1660     index 1aaccbe7e1de..c45711fd78e9 100644
1661     --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1662     +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1663     @@ -1605,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
1664     (char __user *)urelocs + copied,
1665     len)) {
1666     end_user:
1667     + user_access_end();
1668     kvfree(relocs);
1669     err = -EFAULT;
1670     goto err;
1671     diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1672     index f455f095a146..1b014d92855b 100644
1673     --- a/drivers/gpu/drm/udl/udl_main.c
1674     +++ b/drivers/gpu/drm/udl/udl_main.c
1675     @@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
1676     if (ret)
1677     goto err;
1678    
1679     - ret = drm_vblank_init(dev, 1);
1680     - if (ret)
1681     - goto err_fb;
1682     -
1683     drm_kms_helper_poll_init(dev);
1684    
1685     return 0;
1686     -err_fb:
1687     - udl_fbdev_cleanup(dev);
1688     +
1689     err:
1690     if (udl->urbs.count)
1691     udl_free_urb_list(dev);
1692     diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
1693     index 4db62c545748..26470c77eb6e 100644
1694     --- a/drivers/gpu/drm/v3d/v3d_debugfs.c
1695     +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
1696     @@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
1697     V3D_READ(v3d_hub_reg_defs[i].reg));
1698     }
1699    
1700     - for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1701     - seq_printf(m, "%s (0x%04x): 0x%08x\n",
1702     - v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
1703     - V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1704     + if (v3d->ver < 41) {
1705     + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1706     + seq_printf(m, "%s (0x%04x): 0x%08x\n",
1707     + v3d_gca_reg_defs[i].name,
1708     + v3d_gca_reg_defs[i].reg,
1709     + V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1710     + }
1711     }
1712    
1713     for (core = 0; core < v3d->cores; core++) {
1714     diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1715     index a365089a9305..b7257d7dd925 100644
1716     --- a/drivers/infiniband/hw/hfi1/verbs.c
1717     +++ b/drivers/infiniband/hw/hfi1/verbs.c
1718     @@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1719    
1720     if (slen > len)
1721     slen = len;
1722     + if (slen > ss->sge.sge_length)
1723     + slen = ss->sge.sge_length;
1724     rvt_update_sge(ss, slen, false);
1725     seg_pio_copy_mid(pbuf, addr, slen);
1726     len -= slen;
1727     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1728     index a94b6494e71a..f322a1768fbb 100644
1729     --- a/drivers/input/mouse/elan_i2c_core.c
1730     +++ b/drivers/input/mouse/elan_i2c_core.c
1731     @@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1732     static const struct acpi_device_id elan_acpi_id[] = {
1733     { "ELAN0000", 0 },
1734     { "ELAN0100", 0 },
1735     + { "ELAN0501", 0 },
1736     { "ELAN0600", 0 },
1737     { "ELAN0602", 0 },
1738     { "ELAN0605", 0 },
1739     diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1740     index d3aacd534e9c..5c63d25ce84e 100644
1741     --- a/drivers/input/touchscreen/atmel_mxt_ts.c
1742     +++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1743     @@ -1585,10 +1585,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
1744     /* T7 config may have changed */
1745     mxt_init_t7_power_cfg(data);
1746    
1747     -release_raw:
1748     - kfree(cfg.raw);
1749     release_mem:
1750     kfree(cfg.mem);
1751     +release_raw:
1752     + kfree(cfg.raw);
1753     return ret;
1754     }
1755    
1756     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1757     index 6947ccf26512..71eda422c926 100644
1758     --- a/drivers/iommu/arm-smmu-v3.c
1759     +++ b/drivers/iommu/arm-smmu-v3.c
1760     @@ -828,7 +828,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1761     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
1762     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
1763     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
1764     - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
1765     + /*
1766     + * Commands are written little-endian, but we want the SMMU to
1767     + * receive MSIData, and thus write it back to memory, in CPU
1768     + * byte order, so big-endian needs an extra byteswap here.
1769     + */
1770     + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
1771     + cpu_to_le32(ent->sync.msidata));
1772     cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
1773     break;
1774     default:
1775     diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
1776     index 0ff517d3c98f..a4ceb61c5b60 100644
1777     --- a/drivers/isdn/capi/kcapi.c
1778     +++ b/drivers/isdn/capi/kcapi.c
1779     @@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1780     u16 ret;
1781    
1782     if (contr == 0) {
1783     - strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1784     + strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1785     return CAPI_NOERROR;
1786     }
1787    
1788     @@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1789    
1790     ctr = get_capi_ctr_by_nr(contr);
1791     if (ctr && ctr->state == CAPI_CTR_RUNNING) {
1792     - strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1793     + strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1794     ret = CAPI_NOERROR;
1795     } else
1796     ret = CAPI_REGNOTINSTALLED;
1797     diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
1798     index 65a933a21e68..9a5334b726d6 100644
1799     --- a/drivers/media/cec/cec-adap.c
1800     +++ b/drivers/media/cec/cec-adap.c
1801     @@ -455,7 +455,7 @@ int cec_thread_func(void *_adap)
1802     (adap->needs_hpd &&
1803     (!adap->is_configured && !adap->is_configuring)) ||
1804     kthread_should_stop() ||
1805     - (!adap->transmitting &&
1806     + (!adap->transmit_in_progress &&
1807     !list_empty(&adap->transmit_queue)),
1808     msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
1809     timeout = err == 0;
1810     @@ -463,7 +463,7 @@ int cec_thread_func(void *_adap)
1811     /* Otherwise we just wait for something to happen. */
1812     wait_event_interruptible(adap->kthread_waitq,
1813     kthread_should_stop() ||
1814     - (!adap->transmitting &&
1815     + (!adap->transmit_in_progress &&
1816     !list_empty(&adap->transmit_queue)));
1817     }
1818    
1819     @@ -488,6 +488,7 @@ int cec_thread_func(void *_adap)
1820     pr_warn("cec-%s: message %*ph timed out\n", adap->name,
1821     adap->transmitting->msg.len,
1822     adap->transmitting->msg.msg);
1823     + adap->transmit_in_progress = false;
1824     adap->tx_timeouts++;
1825     /* Just give up on this. */
1826     cec_data_cancel(adap->transmitting,
1827     @@ -499,7 +500,7 @@ int cec_thread_func(void *_adap)
1828     * If we are still transmitting, or there is nothing new to
1829     * transmit, then just continue waiting.
1830     */
1831     - if (adap->transmitting || list_empty(&adap->transmit_queue))
1832     + if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
1833     goto unlock;
1834    
1835     /* Get a new message to transmit */
1836     @@ -545,6 +546,8 @@ int cec_thread_func(void *_adap)
1837     if (adap->ops->adap_transmit(adap, data->attempts,
1838     signal_free_time, &data->msg))
1839     cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
1840     + else
1841     + adap->transmit_in_progress = true;
1842    
1843     unlock:
1844     mutex_unlock(&adap->lock);
1845     @@ -575,14 +578,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
1846     data = adap->transmitting;
1847     if (!data) {
1848     /*
1849     - * This can happen if a transmit was issued and the cable is
1850     + * This might happen if a transmit was issued and the cable is
1851     * unplugged while the transmit is ongoing. Ignore this
1852     * transmit in that case.
1853     */
1854     - dprintk(1, "%s was called without an ongoing transmit!\n",
1855     - __func__);
1856     - goto unlock;
1857     + if (!adap->transmit_in_progress)
1858     + dprintk(1, "%s was called without an ongoing transmit!\n",
1859     + __func__);
1860     + adap->transmit_in_progress = false;
1861     + goto wake_thread;
1862     }
1863     + adap->transmit_in_progress = false;
1864    
1865     msg = &data->msg;
1866    
1867     @@ -648,7 +654,6 @@ wake_thread:
1868     * for transmitting or to retry the current message.
1869     */
1870     wake_up_interruptible(&adap->kthread_waitq);
1871     -unlock:
1872     mutex_unlock(&adap->lock);
1873     }
1874     EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
1875     @@ -1496,8 +1501,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1876     if (adap->monitor_all_cnt)
1877     WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1878     mutex_lock(&adap->devnode.lock);
1879     - if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1880     + if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
1881     WARN_ON(adap->ops->adap_enable(adap, false));
1882     + adap->transmit_in_progress = false;
1883     + wake_up_interruptible(&adap->kthread_waitq);
1884     + }
1885     mutex_unlock(&adap->devnode.lock);
1886     if (phys_addr == CEC_PHYS_ADDR_INVALID)
1887     return;
1888     @@ -1505,6 +1513,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1889    
1890     mutex_lock(&adap->devnode.lock);
1891     adap->last_initiator = 0xff;
1892     + adap->transmit_in_progress = false;
1893    
1894     if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
1895     adap->ops->adap_enable(adap, true)) {
1896     diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
1897     index 635db8e70ead..8f987bc0dd88 100644
1898     --- a/drivers/media/cec/cec-pin.c
1899     +++ b/drivers/media/cec/cec-pin.c
1900     @@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
1901     break;
1902     /* Was the message ACKed? */
1903     ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
1904     - if (!ack && !pin->tx_ignore_nack_until_eom &&
1905     - pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
1906     + if (!ack && (!pin->tx_ignore_nack_until_eom ||
1907     + pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
1908     + !pin->tx_post_eom) {
1909     /*
1910     * Note: the CEC spec is ambiguous regarding
1911     * what action to take when a NACK appears
1912     diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1913     index fa483b95bc5a..d9a590ae7545 100644
1914     --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1915     +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1916     @@ -1769,7 +1769,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
1917     unsigned s; \
1918     \
1919     for (s = 0; s < len; s++) { \
1920     - u8 chr = font8x16[text[s] * 16 + line]; \
1921     + u8 chr = font8x16[(u8)text[s] * 16 + line]; \
1922     \
1923     if (hdiv == 2 && tpg->hflip) { \
1924     pos[3] = (chr & (0x01 << 6) ? fg : bg); \
1925     diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1926     index 8ff8722cb6b1..99f736c81286 100644
1927     --- a/drivers/media/common/videobuf2/videobuf2-core.c
1928     +++ b/drivers/media/common/videobuf2/videobuf2-core.c
1929     @@ -812,6 +812,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
1930     memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
1931     q->memory = memory;
1932     q->waiting_for_buffers = !q->is_output;
1933     + } else if (q->memory != memory) {
1934     + dprintk(1, "memory model mismatch\n");
1935     + return -EINVAL;
1936     }
1937    
1938     num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
1939     diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
1940     index 11c69281692e..95a0e7d9851a 100644
1941     --- a/drivers/media/i2c/imx274.c
1942     +++ b/drivers/media/i2c/imx274.c
1943     @@ -619,16 +619,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
1944    
1945     static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
1946     {
1947     + unsigned int uint_val;
1948     int err;
1949    
1950     - err = regmap_read(priv->regmap, addr, (unsigned int *)val);
1951     + err = regmap_read(priv->regmap, addr, &uint_val);
1952     if (err)
1953     dev_err(&priv->client->dev,
1954     "%s : i2c read failed, addr = %x\n", __func__, addr);
1955     else
1956     dev_dbg(&priv->client->dev,
1957     "%s : addr 0x%x, val=0x%x\n", __func__,
1958     - addr, *val);
1959     + addr, uint_val);
1960     +
1961     + *val = uint_val;
1962     return err;
1963     }
1964    
1965     diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
1966     index eaefdb58653b..703d29abb363 100644
1967     --- a/drivers/media/i2c/ov5640.c
1968     +++ b/drivers/media/i2c/ov5640.c
1969     @@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
1970     struct ov5640_dev *sensor = to_ov5640_dev(sd);
1971     const struct ov5640_mode_info *new_mode;
1972     struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
1973     + struct v4l2_mbus_framefmt *fmt;
1974     int ret;
1975    
1976     if (format->pad != 0)
1977     @@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
1978     if (ret)
1979     goto out;
1980    
1981     - if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
1982     - struct v4l2_mbus_framefmt *fmt =
1983     - v4l2_subdev_get_try_format(sd, cfg, 0);
1984     + if (format->which == V4L2_SUBDEV_FORMAT_TRY)
1985     + fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
1986     + else
1987     + fmt = &sensor->fmt;
1988    
1989     - *fmt = *mbus_fmt;
1990     - goto out;
1991     - }
1992     + *fmt = *mbus_fmt;
1993    
1994     if (new_mode != sensor->current_mode) {
1995     sensor->current_mode = new_mode;
1996     sensor->pending_mode_change = true;
1997     }
1998     - if (mbus_fmt->code != sensor->fmt.code) {
1999     - sensor->fmt = *mbus_fmt;
2000     + if (mbus_fmt->code != sensor->fmt.code)
2001     sensor->pending_fmt_change = true;
2002     - }
2003     +
2004     out:
2005     mutex_unlock(&sensor->lock);
2006     return ret;
2007     diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
2008     index d82db738f174..f938a2c54314 100644
2009     --- a/drivers/media/platform/vim2m.c
2010     +++ b/drivers/media/platform/vim2m.c
2011     @@ -805,10 +805,11 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
2012     static void vim2m_stop_streaming(struct vb2_queue *q)
2013     {
2014     struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
2015     + struct vim2m_dev *dev = ctx->dev;
2016     struct vb2_v4l2_buffer *vbuf;
2017     unsigned long flags;
2018    
2019     - flush_scheduled_work();
2020     + cancel_delayed_work_sync(&dev->work_run);
2021     for (;;) {
2022     if (V4L2_TYPE_IS_OUTPUT(q->type))
2023     vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
2024     diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
2025     index 673772cd17d6..a88637a42f44 100644
2026     --- a/drivers/media/platform/vivid/vivid-vid-cap.c
2027     +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
2028     @@ -449,6 +449,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
2029     tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
2030     break;
2031     }
2032     + vfree(dev->bitmap_cap);
2033     + dev->bitmap_cap = NULL;
2034     vivid_update_quality(dev);
2035     tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
2036     dev->crop_cap = dev->src_rect;
2037     diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2038     index 552bbe82a160..877978dbd409 100644
2039     --- a/drivers/media/rc/rc-main.c
2040     +++ b/drivers/media/rc/rc-main.c
2041     @@ -695,7 +695,8 @@ void rc_repeat(struct rc_dev *dev)
2042     (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
2043     };
2044    
2045     - ir_lirc_scancode_event(dev, &sc);
2046     + if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
2047     + ir_lirc_scancode_event(dev, &sc);
2048    
2049     spin_lock_irqsave(&dev->keylock, flags);
2050    
2051     @@ -735,7 +736,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
2052     .keycode = keycode
2053     };
2054    
2055     - ir_lirc_scancode_event(dev, &sc);
2056     + if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
2057     + ir_lirc_scancode_event(dev, &sc);
2058    
2059     if (new_event && dev->keypressed)
2060     ir_do_keyup(dev, false);
2061     diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
2062     index 024c751eb165..2ad2ddeaff51 100644
2063     --- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
2064     +++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
2065     @@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
2066     stream->props.u.bulk.buffersize,
2067     usb_urb_complete, stream);
2068    
2069     - stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
2070     stream->urbs_initialized++;
2071     }
2072     return 0;
2073     @@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
2074     urb->complete = usb_urb_complete;
2075     urb->pipe = usb_rcvisocpipe(stream->udev,
2076     stream->props.endpoint);
2077     - urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
2078     + urb->transfer_flags = URB_ISO_ASAP;
2079     urb->interval = stream->props.u.isoc.interval;
2080     urb->number_of_packets = stream->props.u.isoc.framesperurb;
2081     urb->transfer_buffer_length = stream->props.u.isoc.framesize *
2082     @@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
2083     if (stream->state & USB_STATE_URB_BUF) {
2084     while (stream->buf_num) {
2085     stream->buf_num--;
2086     - stream->buf_list[stream->buf_num] = NULL;
2087     + kfree(stream->buf_list[stream->buf_num]);
2088     }
2089     }
2090    
2091     diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
2092     index 218f0da0ce76..edd34cf09cf8 100644
2093     --- a/drivers/media/v4l2-core/v4l2-fwnode.c
2094     +++ b/drivers/media/v4l2-core/v4l2-fwnode.c
2095     @@ -310,8 +310,8 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
2096     }
2097    
2098     if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
2099     - flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
2100     - V4L2_MBUS_PCLK_SAMPLE_FALLING);
2101     + flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
2102     + V4L2_MBUS_DATA_ACTIVE_LOW);
2103     flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
2104     V4L2_MBUS_DATA_ACTIVE_LOW;
2105     pr_debug("data-active %s\n", v ? "high" : "low");
2106     diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
2107     index 57a6bb1fd3c9..8f2c5d8bd2ee 100644
2108     --- a/drivers/misc/ocxl/config.c
2109     +++ b/drivers/misc/ocxl/config.c
2110     @@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
2111     if (rc)
2112     return rc;
2113     ptr = (u32 *) &afu->name[i];
2114     - *ptr = val;
2115     + *ptr = le32_to_cpu((__force __le32) val);
2116     }
2117     afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
2118     return 0;
2119     diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
2120     index 31695a078485..646d16450066 100644
2121     --- a/drivers/misc/ocxl/link.c
2122     +++ b/drivers/misc/ocxl/link.c
2123     @@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
2124    
2125     mutex_lock(&spa->spa_lock);
2126    
2127     - pe->tid = tid;
2128     + pe->tid = cpu_to_be32(tid);
2129    
2130     /*
2131     * The barrier makes sure the PE is updated
2132     diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
2133     index 650f2b490a05..9dc16a23429a 100644
2134     --- a/drivers/mtd/nand/raw/marvell_nand.c
2135     +++ b/drivers/mtd/nand/raw/marvell_nand.c
2136     @@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
2137     writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
2138     }
2139    
2140     -static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
2141     +static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
2142     {
2143     + u32 reg;
2144     +
2145     + reg = readl_relaxed(nfc->regs + NDSR);
2146     writel_relaxed(int_mask, nfc->regs + NDSR);
2147     +
2148     + return reg & int_mask;
2149     }
2150    
2151     static void marvell_nfc_force_byte_access(struct nand_chip *chip,
2152     @@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
2153     static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
2154     {
2155     struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2156     + u32 pending;
2157     int ret;
2158    
2159     /* Timeout is expressed in ms */
2160     @@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
2161     ret = wait_for_completion_timeout(&nfc->complete,
2162     msecs_to_jiffies(timeout_ms));
2163     marvell_nfc_disable_int(nfc, NDCR_RDYM);
2164     - marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
2165     - if (!ret) {
2166     + pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
2167     +
2168     + /*
2169     + * In case the interrupt was not served in the required time frame,
2170     + * check if the ISR was not served or if something went actually wrong.
2171     + */
2172     + if (ret && !pending) {
2173     dev_err(nfc->dev, "Timeout waiting for RB signal\n");
2174     return -ETIMEDOUT;
2175     }
2176     diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
2177     index 5c26492c841d..38b5dc22cb30 100644
2178     --- a/drivers/mtd/nand/raw/nand_jedec.c
2179     +++ b/drivers/mtd/nand/raw/nand_jedec.c
2180     @@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
2181     pr_warn("Invalid codeword size\n");
2182     }
2183    
2184     + ret = 1;
2185     +
2186     free_jedec_param_page:
2187     kfree(p);
2188     return ret;
2189     diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
2190     index 886d05c391ef..68e8b9f7f372 100644
2191     --- a/drivers/mtd/nand/raw/omap2.c
2192     +++ b/drivers/mtd/nand/raw/omap2.c
2193     @@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
2194     case NAND_OMAP_PREFETCH_DMA:
2195     dma_cap_zero(mask);
2196     dma_cap_set(DMA_SLAVE, mask);
2197     - info->dma = dma_request_chan(dev, "rxtx");
2198     + info->dma = dma_request_chan(dev->parent, "rxtx");
2199    
2200     if (IS_ERR(info->dma)) {
2201     dev_err(dev, "DMA engine request failed\n");
2202     diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
2203     index 6cc9c929ff57..37775fc09e09 100644
2204     --- a/drivers/mtd/spi-nor/Kconfig
2205     +++ b/drivers/mtd/spi-nor/Kconfig
2206     @@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
2207    
2208     config SPI_ATMEL_QUADSPI
2209     tristate "Atmel Quad SPI Controller"
2210     - depends on ARCH_AT91 || (ARM && COMPILE_TEST)
2211     + depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
2212     depends on OF && HAS_IOMEM
2213     help
2214     This enables support for the Quad SPI controller in master mode.
2215     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2216     index b164f705709d..3b5b47e98c73 100644
2217     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2218     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
2219     @@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
2220     BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
2221     rc);
2222    
2223     - /* Remove all currently configured VLANs */
2224     - rc = bnx2x_del_all_vlans(bp);
2225     - if (rc < 0)
2226     - BNX2X_ERR("Failed to delete all VLANs\n");
2227     + /* The whole *vlan_obj structure may be not initialized if VLAN
2228     + * filtering offload is not supported by hardware. Currently this is
2229     + * true for all hardware covered by CHIP_IS_E1x().
2230     + */
2231     + if (!CHIP_IS_E1x(bp)) {
2232     + /* Remove all currently configured VLANs */
2233     + rc = bnx2x_del_all_vlans(bp);
2234     + if (rc < 0)
2235     + BNX2X_ERR("Failed to delete all VLANs\n");
2236     + }
2237    
2238     /* Disable LLH */
2239     if (!CHIP_IS_E1(bp))
2240     diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
2241     index a4681780a55d..098d8764c0ea 100644
2242     --- a/drivers/net/ethernet/ibm/ibmveth.c
2243     +++ b/drivers/net/ethernet/ibm/ibmveth.c
2244     @@ -1171,11 +1171,15 @@ out:
2245    
2246     map_failed_frags:
2247     last = i+1;
2248     - for (i = 0; i < last; i++)
2249     + for (i = 1; i < last; i++)
2250     dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
2251     descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
2252     DMA_TO_DEVICE);
2253    
2254     + dma_unmap_single(&adapter->vdev->dev,
2255     + descs[0].fields.address,
2256     + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
2257     + DMA_TO_DEVICE);
2258     map_failed:
2259     if (!firmware_has_feature(FW_FEATURE_CMO))
2260     netdev_err(netdev, "tx: unable to map xmit buffer\n");
2261     diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
2262     index 17e6dcd2eb42..c99dd3f1e6a8 100644
2263     --- a/drivers/net/hamradio/6pack.c
2264     +++ b/drivers/net/hamradio/6pack.c
2265     @@ -523,10 +523,7 @@ static void resync_tnc(struct timer_list *t)
2266    
2267    
2268     /* Start resync timer again -- the TNC might be still absent */
2269     -
2270     - del_timer(&sp->resync_t);
2271     - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2272     - add_timer(&sp->resync_t);
2273     + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2274     }
2275    
2276     static inline int tnc_init(struct sixpack *sp)
2277     @@ -537,9 +534,7 @@ static inline int tnc_init(struct sixpack *sp)
2278    
2279     sp->tty->ops->write(sp->tty, &inbyte, 1);
2280    
2281     - del_timer(&sp->resync_t);
2282     - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2283     - add_timer(&sp->resync_t);
2284     + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2285    
2286     return 0;
2287     }
2288     @@ -897,11 +892,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
2289     /* if the state byte has been received, the TNC is present,
2290     so the resync timer can be reset. */
2291    
2292     - if (sp->tnc_state == TNC_IN_SYNC) {
2293     - del_timer(&sp->resync_t);
2294     - sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
2295     - add_timer(&sp->resync_t);
2296     - }
2297     + if (sp->tnc_state == TNC_IN_SYNC)
2298     + mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
2299    
2300     sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
2301     }
2302     diff --git a/drivers/net/tap.c b/drivers/net/tap.c
2303     index f03004f37eca..276f800ed57f 100644
2304     --- a/drivers/net/tap.c
2305     +++ b/drivers/net/tap.c
2306     @@ -1177,8 +1177,6 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
2307     goto err_kfree;
2308     }
2309    
2310     - skb_probe_transport_header(skb, ETH_HLEN);
2311     -
2312     /* Move network header to the right position for VLAN tagged packets */
2313     if ((skb->protocol == htons(ETH_P_8021Q) ||
2314     skb->protocol == htons(ETH_P_8021AD)) &&
2315     @@ -1189,6 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
2316     tap = rcu_dereference(q->tap);
2317     if (tap) {
2318     skb->dev = tap->dev;
2319     + skb_probe_transport_header(skb, ETH_HLEN);
2320     dev_queue_xmit(skb);
2321     } else {
2322     kfree_skb(skb);
2323     diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
2324     index 1098263ab862..46c3d983b7b7 100644
2325     --- a/drivers/net/wan/x25_asy.c
2326     +++ b/drivers/net/wan/x25_asy.c
2327     @@ -485,8 +485,10 @@ static int x25_asy_open(struct net_device *dev)
2328    
2329     /* Cleanup */
2330     kfree(sl->xbuff);
2331     + sl->xbuff = NULL;
2332     noxbuff:
2333     kfree(sl->rbuff);
2334     + sl->rbuff = NULL;
2335     norbuff:
2336     return -ENOMEM;
2337     }
2338     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2339     index 7f0a5bade70a..c0e3ae7bf2ae 100644
2340     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2341     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2342     @@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
2343     .del_pmk = brcmf_cfg80211_del_pmk,
2344     };
2345    
2346     -struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
2347     +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
2348     {
2349     - return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2350     + struct cfg80211_ops *ops;
2351     +
2352     + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2353     GFP_KERNEL);
2354     +
2355     + if (ops && settings->roamoff)
2356     + ops->update_connect_params = NULL;
2357     +
2358     + return ops;
2359     }
2360    
2361     struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2362     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2363     index a4aec0004e4f..9a6287f084a9 100644
2364     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2365     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2366     @@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
2367     void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
2368     s32 brcmf_cfg80211_up(struct net_device *ndev);
2369     s32 brcmf_cfg80211_down(struct net_device *ndev);
2370     -struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
2371     +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
2372     enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
2373    
2374     struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2375     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2376     index b1f702faff4f..860a4372cb56 100644
2377     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2378     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2379     @@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
2380    
2381     brcmf_dbg(TRACE, "Enter\n");
2382    
2383     - ops = brcmf_cfg80211_get_ops();
2384     + ops = brcmf_cfg80211_get_ops(settings);
2385     if (!ops)
2386     return -ENOMEM;
2387    
2388     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2389     index 9095b830ae4d..9927079a9ace 100644
2390     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2391     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2392     @@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2393     struct brcmf_fw_request *fwreq;
2394     char chipname[12];
2395     const char *mp_path;
2396     + size_t mp_path_len;
2397     u32 i, j;
2398     - char end;
2399     + char end = '\0';
2400     size_t reqsz;
2401    
2402     for (i = 0; i < table_size; i++) {
2403     @@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2404     mapping_table[i].fw_base, chipname);
2405    
2406     mp_path = brcmf_mp_global.firmware_path;
2407     - end = mp_path[strlen(mp_path) - 1];
2408     + mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
2409     + if (mp_path_len)
2410     + end = mp_path[mp_path_len - 1];
2411     +
2412     fwreq->n_items = n_fwnames;
2413    
2414     for (j = 0; j < n_fwnames; j++) {
2415     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2416     index 9e015212c2c0..8d4711590dfc 100644
2417     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2418     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2419     @@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2420     {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
2421    
2422     /* 9000 Series */
2423     + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
2424     + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
2425     + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
2426     + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
2427     + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
2428     + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
2429     + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2430     + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2431     + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
2432     + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
2433     + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
2434     + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
2435     + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
2436     + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
2437     + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2438     + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2439     + {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
2440     + {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
2441     + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
2442     + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
2443     + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
2444     + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
2445     + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2446     + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
2447     + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2448     + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
2449     + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
2450     + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
2451     + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
2452     + {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
2453     + {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
2454     + {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2455     + {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2456     + {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
2457     + {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
2458     + {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
2459     + {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
2460     + {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
2461     + {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
2462     + {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2463     + {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2464     + {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
2465     + {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
2466     + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
2467     + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
2468     + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
2469     + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
2470     + {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2471     + {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
2472     + {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2473     {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
2474     {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
2475     {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
2476     diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
2477     index a3fb235fea0d..7431a795a624 100644
2478     --- a/drivers/rtc/rtc-m41t80.c
2479     +++ b/drivers/rtc/rtc-m41t80.c
2480     @@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
2481     alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
2482     alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
2483     alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
2484     - alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
2485     + alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
2486    
2487     alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
2488     alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
2489     diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
2490     index f35cc10772f6..25abf2d1732a 100644
2491     --- a/drivers/spi/spi-bcm2835.c
2492     +++ b/drivers/spi/spi-bcm2835.c
2493     @@ -88,7 +88,7 @@ struct bcm2835_spi {
2494     u8 *rx_buf;
2495     int tx_len;
2496     int rx_len;
2497     - bool dma_pending;
2498     + unsigned int dma_pending;
2499     };
2500    
2501     static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
2502     @@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
2503     /* Write as many bytes as possible to FIFO */
2504     bcm2835_wr_fifo(bs);
2505    
2506     - /* based on flags decide if we can finish the transfer */
2507     - if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
2508     + if (!bs->rx_len) {
2509     /* Transfer complete - reset SPI HW */
2510     bcm2835_spi_reset_hw(master);
2511     /* wake up the framework */
2512     @@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
2513     * is called the tx-dma must have finished - can't get to this
2514     * situation otherwise...
2515     */
2516     - dmaengine_terminate_all(master->dma_tx);
2517     -
2518     - /* mark as no longer pending */
2519     - bs->dma_pending = 0;
2520     + if (cmpxchg(&bs->dma_pending, true, false)) {
2521     + dmaengine_terminate_all(master->dma_tx);
2522     + }
2523    
2524     /* and mark as completed */;
2525     complete(&master->xfer_completion);
2526     @@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
2527     if (ret) {
2528     /* need to reset on errors */
2529     dmaengine_terminate_all(master->dma_tx);
2530     + bs->dma_pending = false;
2531     bcm2835_spi_reset_hw(master);
2532     return ret;
2533     }
2534     @@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
2535     struct bcm2835_spi *bs = spi_master_get_devdata(master);
2536    
2537     /* if an error occurred and we have an active dma, then terminate */
2538     - if (bs->dma_pending) {
2539     + if (cmpxchg(&bs->dma_pending, true, false)) {
2540     dmaengine_terminate_all(master->dma_tx);
2541     dmaengine_terminate_all(master->dma_rx);
2542     - bs->dma_pending = 0;
2543     }
2544     /* and reset */
2545     bcm2835_spi_reset_hw(master);
2546     diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2547     index 781754f36da7..8da66e996d23 100644
2548     --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2549     +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
2550     @@ -143,7 +143,6 @@ vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
2551     dev_err(instance->dev,
2552     "failed to open VCHI service connection (status=%d)\n",
2553     status);
2554     - kfree(instance);
2555     return -EPERM;
2556     }
2557    
2558     diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
2559     index ca351c950344..5c3e4df804eb 100644
2560     --- a/drivers/staging/wilc1000/wilc_sdio.c
2561     +++ b/drivers/staging/wilc1000/wilc_sdio.c
2562     @@ -841,6 +841,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
2563     if (!sdio_priv->irq_gpio) {
2564     int i;
2565    
2566     + cmd.read_write = 0;
2567     cmd.function = 1;
2568     cmd.address = 0x04;
2569     cmd.data = 0;
2570     diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2571     index 57c66d2c3471..5413a04023f9 100644
2572     --- a/drivers/tty/serial/xilinx_uartps.c
2573     +++ b/drivers/tty/serial/xilinx_uartps.c
2574     @@ -123,7 +123,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
2575     #define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
2576     #define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
2577     #define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
2578     -#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
2579     +#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
2580    
2581     /*
2582     * Do not enable parity error interrupt for the following
2583     @@ -364,7 +364,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
2584     cdns_uart_handle_tx(dev_id);
2585     isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
2586     }
2587     - if (isrstatus & CDNS_UART_IXR_MASK)
2588     + if (isrstatus & CDNS_UART_IXR_RXMASK)
2589     cdns_uart_handle_rx(dev_id, isrstatus);
2590    
2591     spin_unlock(&port->lock);
2592     diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
2593     index 987fc5ba6321..70e6c956c23c 100644
2594     --- a/drivers/usb/Kconfig
2595     +++ b/drivers/usb/Kconfig
2596     @@ -205,8 +205,4 @@ config USB_ULPI_BUS
2597     To compile this driver as a module, choose M here: the module will
2598     be called ulpi.
2599    
2600     -config USB_ROLE_SWITCH
2601     - tristate
2602     - select USB_COMMON
2603     -
2604     endif # USB_SUPPORT
2605     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2606     index 1b68fed464cb..ed8c62b2d9d1 100644
2607     --- a/drivers/usb/class/cdc-acm.c
2608     +++ b/drivers/usb/class/cdc-acm.c
2609     @@ -581,6 +581,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
2610     if (retval)
2611     goto error_init_termios;
2612    
2613     + /*
2614     + * Suppress initial echoing for some devices which might send data
2615     + * immediately after acm driver has been installed.
2616     + */
2617     + if (acm->quirks & DISABLE_ECHO)
2618     + tty->termios.c_lflag &= ~ECHO;
2619     +
2620     tty->driver_data = acm;
2621    
2622     return 0;
2623     @@ -1657,6 +1664,9 @@ static const struct usb_device_id acm_ids[] = {
2624     { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
2625     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2626     },
2627     + { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
2628     + .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
2629     + },
2630     { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
2631     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2632     },
2633     diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
2634     index ca06b20d7af9..515aad0847ee 100644
2635     --- a/drivers/usb/class/cdc-acm.h
2636     +++ b/drivers/usb/class/cdc-acm.h
2637     @@ -140,3 +140,4 @@ struct acm {
2638     #define QUIRK_CONTROL_LINE_STATE BIT(6)
2639     #define CLEAR_HALT_CONDITIONS BIT(7)
2640     #define SEND_ZERO_PACKET BIT(8)
2641     +#define DISABLE_ECHO BIT(9)
2642     diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
2643     index fb4d5ef4165c..0a7c45e85481 100644
2644     --- a/drivers/usb/common/Makefile
2645     +++ b/drivers/usb/common/Makefile
2646     @@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o
2647    
2648     obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
2649     obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
2650     -obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
2651     diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
2652     deleted file mode 100644
2653     index 99116af07f1d..000000000000
2654     --- a/drivers/usb/common/roles.c
2655     +++ /dev/null
2656     @@ -1,314 +0,0 @@
2657     -// SPDX-License-Identifier: GPL-2.0
2658     -/*
2659     - * USB Role Switch Support
2660     - *
2661     - * Copyright (C) 2018 Intel Corporation
2662     - * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
2663     - * Hans de Goede <hdegoede@redhat.com>
2664     - */
2665     -
2666     -#include <linux/usb/role.h>
2667     -#include <linux/device.h>
2668     -#include <linux/module.h>
2669     -#include <linux/mutex.h>
2670     -#include <linux/slab.h>
2671     -
2672     -static struct class *role_class;
2673     -
2674     -struct usb_role_switch {
2675     - struct device dev;
2676     - struct mutex lock; /* device lock*/
2677     - enum usb_role role;
2678     -
2679     - /* From descriptor */
2680     - struct device *usb2_port;
2681     - struct device *usb3_port;
2682     - struct device *udc;
2683     - usb_role_switch_set_t set;
2684     - usb_role_switch_get_t get;
2685     - bool allow_userspace_control;
2686     -};
2687     -
2688     -#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
2689     -
2690     -/**
2691     - * usb_role_switch_set_role - Set USB role for a switch
2692     - * @sw: USB role switch
2693     - * @role: USB role to be switched to
2694     - *
2695     - * Set USB role @role for @sw.
2696     - */
2697     -int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
2698     -{
2699     - int ret;
2700     -
2701     - if (IS_ERR_OR_NULL(sw))
2702     - return 0;
2703     -
2704     - mutex_lock(&sw->lock);
2705     -
2706     - ret = sw->set(sw->dev.parent, role);
2707     - if (!ret)
2708     - sw->role = role;
2709     -
2710     - mutex_unlock(&sw->lock);
2711     -
2712     - return ret;
2713     -}
2714     -EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
2715     -
2716     -/**
2717     - * usb_role_switch_get_role - Get the USB role for a switch
2718     - * @sw: USB role switch
2719     - *
2720     - * Depending on the role-switch-driver this function returns either a cached
2721     - * value of the last set role, or reads back the actual value from the hardware.
2722     - */
2723     -enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
2724     -{
2725     - enum usb_role role;
2726     -
2727     - if (IS_ERR_OR_NULL(sw))
2728     - return USB_ROLE_NONE;
2729     -
2730     - mutex_lock(&sw->lock);
2731     -
2732     - if (sw->get)
2733     - role = sw->get(sw->dev.parent);
2734     - else
2735     - role = sw->role;
2736     -
2737     - mutex_unlock(&sw->lock);
2738     -
2739     - return role;
2740     -}
2741     -EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
2742     -
2743     -static int __switch_match(struct device *dev, const void *name)
2744     -{
2745     - return !strcmp((const char *)name, dev_name(dev));
2746     -}
2747     -
2748     -static void *usb_role_switch_match(struct device_connection *con, int ep,
2749     - void *data)
2750     -{
2751     - struct device *dev;
2752     -
2753     - dev = class_find_device(role_class, NULL, con->endpoint[ep],
2754     - __switch_match);
2755     -
2756     - return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
2757     -}
2758     -
2759     -/**
2760     - * usb_role_switch_get - Find USB role switch linked with the caller
2761     - * @dev: The caller device
2762     - *
2763     - * Finds and returns role switch linked with @dev. The reference count for the
2764     - * found switch is incremented.
2765     - */
2766     -struct usb_role_switch *usb_role_switch_get(struct device *dev)
2767     -{
2768     - struct usb_role_switch *sw;
2769     -
2770     - sw = device_connection_find_match(dev, "usb-role-switch", NULL,
2771     - usb_role_switch_match);
2772     -
2773     - if (!IS_ERR_OR_NULL(sw))
2774     - WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
2775     -
2776     - return sw;
2777     -}
2778     -EXPORT_SYMBOL_GPL(usb_role_switch_get);
2779     -
2780     -/**
2781     - * usb_role_switch_put - Release handle to a switch
2782     - * @sw: USB Role Switch
2783     - *
2784     - * Decrement reference count for @sw.
2785     - */
2786     -void usb_role_switch_put(struct usb_role_switch *sw)
2787     -{
2788     - if (!IS_ERR_OR_NULL(sw)) {
2789     - put_device(&sw->dev);
2790     - module_put(sw->dev.parent->driver->owner);
2791     - }
2792     -}
2793     -EXPORT_SYMBOL_GPL(usb_role_switch_put);
2794     -
2795     -static umode_t
2796     -usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
2797     -{
2798     - struct device *dev = container_of(kobj, typeof(*dev), kobj);
2799     - struct usb_role_switch *sw = to_role_switch(dev);
2800     -
2801     - if (sw->allow_userspace_control)
2802     - return attr->mode;
2803     -
2804     - return 0;
2805     -}
2806     -
2807     -static const char * const usb_roles[] = {
2808     - [USB_ROLE_NONE] = "none",
2809     - [USB_ROLE_HOST] = "host",
2810     - [USB_ROLE_DEVICE] = "device",
2811     -};
2812     -
2813     -static ssize_t
2814     -role_show(struct device *dev, struct device_attribute *attr, char *buf)
2815     -{
2816     - struct usb_role_switch *sw = to_role_switch(dev);
2817     - enum usb_role role = usb_role_switch_get_role(sw);
2818     -
2819     - return sprintf(buf, "%s\n", usb_roles[role]);
2820     -}
2821     -
2822     -static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2823     - const char *buf, size_t size)
2824     -{
2825     - struct usb_role_switch *sw = to_role_switch(dev);
2826     - int ret;
2827     -
2828     - ret = sysfs_match_string(usb_roles, buf);
2829     - if (ret < 0) {
2830     - bool res;
2831     -
2832     - /* Extra check if the user wants to disable the switch */
2833     - ret = kstrtobool(buf, &res);
2834     - if (ret || res)
2835     - return -EINVAL;
2836     - }
2837     -
2838     - ret = usb_role_switch_set_role(sw, ret);
2839     - if (ret)
2840     - return ret;
2841     -
2842     - return size;
2843     -}
2844     -static DEVICE_ATTR_RW(role);
2845     -
2846     -static struct attribute *usb_role_switch_attrs[] = {
2847     - &dev_attr_role.attr,
2848     - NULL,
2849     -};
2850     -
2851     -static const struct attribute_group usb_role_switch_group = {
2852     - .is_visible = usb_role_switch_is_visible,
2853     - .attrs = usb_role_switch_attrs,
2854     -};
2855     -
2856     -static const struct attribute_group *usb_role_switch_groups[] = {
2857     - &usb_role_switch_group,
2858     - NULL,
2859     -};
2860     -
2861     -static int
2862     -usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2863     -{
2864     - int ret;
2865     -
2866     - ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
2867     - if (ret)
2868     - dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
2869     -
2870     - return ret;
2871     -}
2872     -
2873     -static void usb_role_switch_release(struct device *dev)
2874     -{
2875     - struct usb_role_switch *sw = to_role_switch(dev);
2876     -
2877     - kfree(sw);
2878     -}
2879     -
2880     -static const struct device_type usb_role_dev_type = {
2881     - .name = "usb_role_switch",
2882     - .groups = usb_role_switch_groups,
2883     - .uevent = usb_role_switch_uevent,
2884     - .release = usb_role_switch_release,
2885     -};
2886     -
2887     -/**
2888     - * usb_role_switch_register - Register USB Role Switch
2889     - * @parent: Parent device for the switch
2890     - * @desc: Description of the switch
2891     - *
2892     - * USB Role Switch is a device capable or choosing the role for USB connector.
2893     - * On platforms where the USB controller is dual-role capable, the controller
2894     - * driver will need to register the switch. On platforms where the USB host and
2895     - * USB device controllers behind the connector are separate, there will be a
2896     - * mux, and the driver for that mux will need to register the switch.
2897     - *
2898     - * Returns handle to a new role switch or ERR_PTR. The content of @desc is
2899     - * copied.
2900     - */
2901     -struct usb_role_switch *
2902     -usb_role_switch_register(struct device *parent,
2903     - const struct usb_role_switch_desc *desc)
2904     -{
2905     - struct usb_role_switch *sw;
2906     - int ret;
2907     -
2908     - if (!desc || !desc->set)
2909     - return ERR_PTR(-EINVAL);
2910     -
2911     - sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2912     - if (!sw)
2913     - return ERR_PTR(-ENOMEM);
2914     -
2915     - mutex_init(&sw->lock);
2916     -
2917     - sw->allow_userspace_control = desc->allow_userspace_control;
2918     - sw->usb2_port = desc->usb2_port;
2919     - sw->usb3_port = desc->usb3_port;
2920     - sw->udc = desc->udc;
2921     - sw->set = desc->set;
2922     - sw->get = desc->get;
2923     -
2924     - sw->dev.parent = parent;
2925     - sw->dev.class = role_class;
2926     - sw->dev.type = &usb_role_dev_type;
2927     - dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
2928     -
2929     - ret = device_register(&sw->dev);
2930     - if (ret) {
2931     - put_device(&sw->dev);
2932     - return ERR_PTR(ret);
2933     - }
2934     -
2935     - /* TODO: Symlinks for the host port and the device controller. */
2936     -
2937     - return sw;
2938     -}
2939     -EXPORT_SYMBOL_GPL(usb_role_switch_register);
2940     -
2941     -/**
2942     - * usb_role_switch_unregister - Unregsiter USB Role Switch
2943     - * @sw: USB Role Switch
2944     - *
2945     - * Unregister switch that was registered with usb_role_switch_register().
2946     - */
2947     -void usb_role_switch_unregister(struct usb_role_switch *sw)
2948     -{
2949     - if (!IS_ERR_OR_NULL(sw))
2950     - device_unregister(&sw->dev);
2951     -}
2952     -EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
2953     -
2954     -static int __init usb_roles_init(void)
2955     -{
2956     - role_class = class_create(THIS_MODULE, "usb_role");
2957     - return PTR_ERR_OR_ZERO(role_class);
2958     -}
2959     -subsys_initcall(usb_roles_init);
2960     -
2961     -static void __exit usb_roles_exit(void)
2962     -{
2963     - class_destroy(role_class);
2964     -}
2965     -module_exit(usb_roles_exit);
2966     -
2967     -MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
2968     -MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
2969     -MODULE_LICENSE("GPL v2");
2970     -MODULE_DESCRIPTION("USB Role Class");
2971     diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
2972     index 3f9bccc95add..c089ffa1f0a8 100644
2973     --- a/drivers/usb/dwc2/hcd.h
2974     +++ b/drivers/usb/dwc2/hcd.h
2975     @@ -366,7 +366,7 @@ struct dwc2_qh {
2976     u32 desc_list_sz;
2977     u32 *n_bytes;
2978     struct timer_list unreserve_timer;
2979     - struct timer_list wait_timer;
2980     + struct hrtimer wait_timer;
2981     struct dwc2_tt *dwc_tt;
2982     int ttport;
2983     unsigned tt_buffer_dirty:1;
2984     diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
2985     index 40839591d2ec..ea3aa640c15c 100644
2986     --- a/drivers/usb/dwc2/hcd_queue.c
2987     +++ b/drivers/usb/dwc2/hcd_queue.c
2988     @@ -59,7 +59,7 @@
2989     #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
2990    
2991     /* If we get a NAK, wait this long before retrying */
2992     -#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
2993     +#define DWC2_RETRY_WAIT_DELAY 1*1E6L
2994    
2995     /**
2996     * dwc2_periodic_channel_available() - Checks that a channel is available for a
2997     @@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
2998     * qh back to the "inactive" list, then queues transactions.
2999     *
3000     * @t: Pointer to wait_timer in a qh.
3001     + *
3002     + * Return: HRTIMER_NORESTART to not automatically restart this timer.
3003     */
3004     -static void dwc2_wait_timer_fn(struct timer_list *t)
3005     +static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
3006     {
3007     - struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
3008     + struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
3009     struct dwc2_hsotg *hsotg = qh->hsotg;
3010     unsigned long flags;
3011    
3012     @@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t)
3013     }
3014    
3015     spin_unlock_irqrestore(&hsotg->lock, flags);
3016     + return HRTIMER_NORESTART;
3017     }
3018    
3019     /**
3020     @@ -1521,7 +1524,8 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
3021     /* Initialize QH */
3022     qh->hsotg = hsotg;
3023     timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
3024     - timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
3025     + hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3026     + qh->wait_timer.function = &dwc2_wait_timer_fn;
3027     qh->ep_type = ep_type;
3028     qh->ep_is_in = ep_is_in;
3029    
3030     @@ -1690,7 +1694,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3031     * won't do anything anyway, but we want it to finish before we free
3032     * memory.
3033     */
3034     - del_timer_sync(&qh->wait_timer);
3035     + hrtimer_cancel(&qh->wait_timer);
3036    
3037     dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
3038    
3039     @@ -1716,6 +1720,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3040     {
3041     int status;
3042     u32 intr_mask;
3043     + ktime_t delay;
3044    
3045     if (dbg_qh(qh))
3046     dev_vdbg(hsotg->dev, "%s()\n", __func__);
3047     @@ -1734,8 +1739,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3048     list_add_tail(&qh->qh_list_entry,
3049     &hsotg->non_periodic_sched_waiting);
3050     qh->wait_timer_cancel = false;
3051     - mod_timer(&qh->wait_timer,
3052     - jiffies + DWC2_RETRY_WAIT_DELAY + 1);
3053     + delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
3054     + hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
3055     } else {
3056     list_add_tail(&qh->qh_list_entry,
3057     &hsotg->non_periodic_sched_inactive);
3058     diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
3059     index 7c1b6938f212..38c813b1d203 100644
3060     --- a/drivers/usb/dwc2/params.c
3061     +++ b/drivers/usb/dwc2/params.c
3062     @@ -111,6 +111,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
3063     p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
3064     p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
3065     GAHBCFG_HBSTLEN_SHIFT;
3066     + p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
3067     }
3068    
3069     static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
3070     diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3071     index 842795856bf4..fdc6e4e403e8 100644
3072     --- a/drivers/usb/dwc3/dwc3-pci.c
3073     +++ b/drivers/usb/dwc3/dwc3-pci.c
3074     @@ -170,20 +170,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
3075     * put the gpio descriptors again here because the phy driver
3076     * might want to grab them, too.
3077     */
3078     - gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
3079     - GPIOD_OUT_LOW);
3080     + gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
3081     if (IS_ERR(gpio))
3082     return PTR_ERR(gpio);
3083    
3084     gpiod_set_value_cansleep(gpio, 1);
3085     + gpiod_put(gpio);
3086    
3087     - gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
3088     - GPIOD_OUT_LOW);
3089     + gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
3090     if (IS_ERR(gpio))
3091     return PTR_ERR(gpio);
3092    
3093     if (gpio) {
3094     gpiod_set_value_cansleep(gpio, 1);
3095     + gpiod_put(gpio);
3096     usleep_range(10000, 11000);
3097     }
3098     }
3099     diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
3100     index 984892dd72f5..42668aeca57c 100644
3101     --- a/drivers/usb/host/r8a66597-hcd.c
3102     +++ b/drivers/usb/host/r8a66597-hcd.c
3103     @@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
3104    
3105     static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3106     struct usb_host_endpoint *hep)
3107     +__acquires(r8a66597->lock)
3108     +__releases(r8a66597->lock)
3109     {
3110     struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
3111     struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
3112     @@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3113     return;
3114     pipenum = pipe->info.pipenum;
3115    
3116     + spin_lock_irqsave(&r8a66597->lock, flags);
3117     if (pipenum == 0) {
3118     kfree(hep->hcpriv);
3119     hep->hcpriv = NULL;
3120     + spin_unlock_irqrestore(&r8a66597->lock, flags);
3121     return;
3122     }
3123    
3124     - spin_lock_irqsave(&r8a66597->lock, flags);
3125     pipe_stop(r8a66597, pipe);
3126     pipe_irq_disable(r8a66597, pipenum);
3127     disable_irq_empty(r8a66597, pipenum);
3128     diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
3129     index f5a5e6f79f1b..e4194ac94510 100644
3130     --- a/drivers/usb/roles/Kconfig
3131     +++ b/drivers/usb/roles/Kconfig
3132     @@ -1,3 +1,16 @@
3133     +config USB_ROLE_SWITCH
3134     + tristate "USB Role Switch Support"
3135     + help
3136     + USB Role Switch is a device that can select the USB role - host or
3137     + device - for a USB port (connector). In most cases dual-role capable
3138     + USB controller will also represent the switch, but on some platforms
3139     + multiplexer/demultiplexer switch is used to route the data lines on
3140     + the USB connector between separate USB host and device controllers.
3141     +
3142     + Say Y here if your USB connectors support both device and host roles.
3143     + To compile the driver as module, choose M here: the module will be
3144     + called roles.ko.
3145     +
3146     if USB_ROLE_SWITCH
3147    
3148     config USB_ROLES_INTEL_XHCI
3149     diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
3150     index e44b179ba275..c02873206fc1 100644
3151     --- a/drivers/usb/roles/Makefile
3152     +++ b/drivers/usb/roles/Makefile
3153     @@ -1 +1,3 @@
3154     -obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3155     +obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
3156     +roles-y := class.o
3157     +obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3158     diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
3159     new file mode 100644
3160     index 000000000000..99116af07f1d
3161     --- /dev/null
3162     +++ b/drivers/usb/roles/class.c
3163     @@ -0,0 +1,314 @@
3164     +// SPDX-License-Identifier: GPL-2.0
3165     +/*
3166     + * USB Role Switch Support
3167     + *
3168     + * Copyright (C) 2018 Intel Corporation
3169     + * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
3170     + * Hans de Goede <hdegoede@redhat.com>
3171     + */
3172     +
3173     +#include <linux/usb/role.h>
3174     +#include <linux/device.h>
3175     +#include <linux/module.h>
3176     +#include <linux/mutex.h>
3177     +#include <linux/slab.h>
3178     +
3179     +static struct class *role_class;
3180     +
3181     +struct usb_role_switch {
3182     + struct device dev;
3183     + struct mutex lock; /* device lock*/
3184     + enum usb_role role;
3185     +
3186     + /* From descriptor */
3187     + struct device *usb2_port;
3188     + struct device *usb3_port;
3189     + struct device *udc;
3190     + usb_role_switch_set_t set;
3191     + usb_role_switch_get_t get;
3192     + bool allow_userspace_control;
3193     +};
3194     +
3195     +#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
3196     +
3197     +/**
3198     + * usb_role_switch_set_role - Set USB role for a switch
3199     + * @sw: USB role switch
3200     + * @role: USB role to be switched to
3201     + *
3202     + * Set USB role @role for @sw.
3203     + */
3204     +int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
3205     +{
3206     + int ret;
3207     +
3208     + if (IS_ERR_OR_NULL(sw))
3209     + return 0;
3210     +
3211     + mutex_lock(&sw->lock);
3212     +
3213     + ret = sw->set(sw->dev.parent, role);
3214     + if (!ret)
3215     + sw->role = role;
3216     +
3217     + mutex_unlock(&sw->lock);
3218     +
3219     + return ret;
3220     +}
3221     +EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
3222     +
3223     +/**
3224     + * usb_role_switch_get_role - Get the USB role for a switch
3225     + * @sw: USB role switch
3226     + *
3227     + * Depending on the role-switch-driver this function returns either a cached
3228     + * value of the last set role, or reads back the actual value from the hardware.
3229     + */
3230     +enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
3231     +{
3232     + enum usb_role role;
3233     +
3234     + if (IS_ERR_OR_NULL(sw))
3235     + return USB_ROLE_NONE;
3236     +
3237     + mutex_lock(&sw->lock);
3238     +
3239     + if (sw->get)
3240     + role = sw->get(sw->dev.parent);
3241     + else
3242     + role = sw->role;
3243     +
3244     + mutex_unlock(&sw->lock);
3245     +
3246     + return role;
3247     +}
3248     +EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
3249     +
3250     +static int __switch_match(struct device *dev, const void *name)
3251     +{
3252     + return !strcmp((const char *)name, dev_name(dev));
3253     +}
3254     +
3255     +static void *usb_role_switch_match(struct device_connection *con, int ep,
3256     + void *data)
3257     +{
3258     + struct device *dev;
3259     +
3260     + dev = class_find_device(role_class, NULL, con->endpoint[ep],
3261     + __switch_match);
3262     +
3263     + return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
3264     +}
3265     +
3266     +/**
3267     + * usb_role_switch_get - Find USB role switch linked with the caller
3268     + * @dev: The caller device
3269     + *
3270     + * Finds and returns role switch linked with @dev. The reference count for the
3271     + * found switch is incremented.
3272     + */
3273     +struct usb_role_switch *usb_role_switch_get(struct device *dev)
3274     +{
3275     + struct usb_role_switch *sw;
3276     +
3277     + sw = device_connection_find_match(dev, "usb-role-switch", NULL,
3278     + usb_role_switch_match);
3279     +
3280     + if (!IS_ERR_OR_NULL(sw))
3281     + WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
3282     +
3283     + return sw;
3284     +}
3285     +EXPORT_SYMBOL_GPL(usb_role_switch_get);
3286     +
3287     +/**
3288     + * usb_role_switch_put - Release handle to a switch
3289     + * @sw: USB Role Switch
3290     + *
3291     + * Decrement reference count for @sw.
3292     + */
3293     +void usb_role_switch_put(struct usb_role_switch *sw)
3294     +{
3295     + if (!IS_ERR_OR_NULL(sw)) {
3296     + put_device(&sw->dev);
3297     + module_put(sw->dev.parent->driver->owner);
3298     + }
3299     +}
3300     +EXPORT_SYMBOL_GPL(usb_role_switch_put);
3301     +
3302     +static umode_t
3303     +usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
3304     +{
3305     + struct device *dev = container_of(kobj, typeof(*dev), kobj);
3306     + struct usb_role_switch *sw = to_role_switch(dev);
3307     +
3308     + if (sw->allow_userspace_control)
3309     + return attr->mode;
3310     +
3311     + return 0;
3312     +}
3313     +
3314     +static const char * const usb_roles[] = {
3315     + [USB_ROLE_NONE] = "none",
3316     + [USB_ROLE_HOST] = "host",
3317     + [USB_ROLE_DEVICE] = "device",
3318     +};
3319     +
3320     +static ssize_t
3321     +role_show(struct device *dev, struct device_attribute *attr, char *buf)
3322     +{
3323     + struct usb_role_switch *sw = to_role_switch(dev);
3324     + enum usb_role role = usb_role_switch_get_role(sw);
3325     +
3326     + return sprintf(buf, "%s\n", usb_roles[role]);
3327     +}
3328     +
3329     +static ssize_t role_store(struct device *dev, struct device_attribute *attr,
3330     + const char *buf, size_t size)
3331     +{
3332     + struct usb_role_switch *sw = to_role_switch(dev);
3333     + int ret;
3334     +
3335     + ret = sysfs_match_string(usb_roles, buf);
3336     + if (ret < 0) {
3337     + bool res;
3338     +
3339     + /* Extra check if the user wants to disable the switch */
3340     + ret = kstrtobool(buf, &res);
3341     + if (ret || res)
3342     + return -EINVAL;
3343     + }
3344     +
3345     + ret = usb_role_switch_set_role(sw, ret);
3346     + if (ret)
3347     + return ret;
3348     +
3349     + return size;
3350     +}
3351     +static DEVICE_ATTR_RW(role);
3352     +
3353     +static struct attribute *usb_role_switch_attrs[] = {
3354     + &dev_attr_role.attr,
3355     + NULL,
3356     +};
3357     +
3358     +static const struct attribute_group usb_role_switch_group = {
3359     + .is_visible = usb_role_switch_is_visible,
3360     + .attrs = usb_role_switch_attrs,
3361     +};
3362     +
3363     +static const struct attribute_group *usb_role_switch_groups[] = {
3364     + &usb_role_switch_group,
3365     + NULL,
3366     +};
3367     +
3368     +static int
3369     +usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
3370     +{
3371     + int ret;
3372     +
3373     + ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
3374     + if (ret)
3375     + dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
3376     +
3377     + return ret;
3378     +}
3379     +
3380     +static void usb_role_switch_release(struct device *dev)
3381     +{
3382     + struct usb_role_switch *sw = to_role_switch(dev);
3383     +
3384     + kfree(sw);
3385     +}
3386     +
3387     +static const struct device_type usb_role_dev_type = {
3388     + .name = "usb_role_switch",
3389     + .groups = usb_role_switch_groups,
3390     + .uevent = usb_role_switch_uevent,
3391     + .release = usb_role_switch_release,
3392     +};
3393     +
3394     +/**
3395     + * usb_role_switch_register - Register USB Role Switch
3396     + * @parent: Parent device for the switch
3397     + * @desc: Description of the switch
3398     + *
3399     + * USB Role Switch is a device capable or choosing the role for USB connector.
3400     + * On platforms where the USB controller is dual-role capable, the controller
3401     + * driver will need to register the switch. On platforms where the USB host and
3402     + * USB device controllers behind the connector are separate, there will be a
3403     + * mux, and the driver for that mux will need to register the switch.
3404     + *
3405     + * Returns handle to a new role switch or ERR_PTR. The content of @desc is
3406     + * copied.
3407     + */
3408     +struct usb_role_switch *
3409     +usb_role_switch_register(struct device *parent,
3410     + const struct usb_role_switch_desc *desc)
3411     +{
3412     + struct usb_role_switch *sw;
3413     + int ret;
3414     +
3415     + if (!desc || !desc->set)
3416     + return ERR_PTR(-EINVAL);
3417     +
3418     + sw = kzalloc(sizeof(*sw), GFP_KERNEL);
3419     + if (!sw)
3420     + return ERR_PTR(-ENOMEM);
3421     +
3422     + mutex_init(&sw->lock);
3423     +
3424     + sw->allow_userspace_control = desc->allow_userspace_control;
3425     + sw->usb2_port = desc->usb2_port;
3426     + sw->usb3_port = desc->usb3_port;
3427     + sw->udc = desc->udc;
3428     + sw->set = desc->set;
3429     + sw->get = desc->get;
3430     +
3431     + sw->dev.parent = parent;
3432     + sw->dev.class = role_class;
3433     + sw->dev.type = &usb_role_dev_type;
3434     + dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
3435     +
3436     + ret = device_register(&sw->dev);
3437     + if (ret) {
3438     + put_device(&sw->dev);
3439     + return ERR_PTR(ret);
3440     + }
3441     +
3442     + /* TODO: Symlinks for the host port and the device controller. */
3443     +
3444     + return sw;
3445     +}
3446     +EXPORT_SYMBOL_GPL(usb_role_switch_register);
3447     +
3448     +/**
3449     + * usb_role_switch_unregister - Unregsiter USB Role Switch
3450     + * @sw: USB Role Switch
3451     + *
3452     + * Unregister switch that was registered with usb_role_switch_register().
3453     + */
3454     +void usb_role_switch_unregister(struct usb_role_switch *sw)
3455     +{
3456     + if (!IS_ERR_OR_NULL(sw))
3457     + device_unregister(&sw->dev);
3458     +}
3459     +EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
3460     +
3461     +static int __init usb_roles_init(void)
3462     +{
3463     + role_class = class_create(THIS_MODULE, "usb_role");
3464     + return PTR_ERR_OR_ZERO(role_class);
3465     +}
3466     +subsys_initcall(usb_roles_init);
3467     +
3468     +static void __exit usb_roles_exit(void)
3469     +{
3470     + class_destroy(role_class);
3471     +}
3472     +module_exit(usb_roles_exit);
3473     +
3474     +MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
3475     +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
3476     +MODULE_LICENSE("GPL v2");
3477     +MODULE_DESCRIPTION("USB Role Class");
3478     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3479     index 1ce27f3ff7a7..aef15497ff31 100644
3480     --- a/drivers/usb/serial/option.c
3481     +++ b/drivers/usb/serial/option.c
3482     @@ -1955,6 +1955,10 @@ static const struct usb_device_id option_ids[] = {
3483     { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
3484     { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
3485     .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
3486     + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
3487     + .driver_info = RSVD(4) | RSVD(5) },
3488     + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
3489     + .driver_info = RSVD(6) },
3490     { } /* Terminating entry */
3491     };
3492     MODULE_DEVICE_TABLE(usb, option_ids);
3493     diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3494     index a4e0d13fc121..98e7a5df0f6d 100644
3495     --- a/drivers/usb/serial/pl2303.c
3496     +++ b/drivers/usb/serial/pl2303.c
3497     @@ -91,9 +91,14 @@ static const struct usb_device_id id_table[] = {
3498     { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
3499     { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
3500     { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
3501     + { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
3502     { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
3503     + { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
3504     { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
3505     { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
3506     + { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
3507     + { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
3508     + { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
3509     { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
3510     { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
3511     { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
3512     diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3513     index 26965cc23c17..4e2554d55362 100644
3514     --- a/drivers/usb/serial/pl2303.h
3515     +++ b/drivers/usb/serial/pl2303.h
3516     @@ -119,10 +119,15 @@
3517    
3518     /* Hewlett-Packard POS Pole Displays */
3519     #define HP_VENDOR_ID 0x03f0
3520     +#define HP_LM920_PRODUCT_ID 0x026b
3521     +#define HP_TD620_PRODUCT_ID 0x0956
3522     #define HP_LD960_PRODUCT_ID 0x0b39
3523     #define HP_LCM220_PRODUCT_ID 0x3139
3524     #define HP_LCM960_PRODUCT_ID 0x3239
3525     #define HP_LD220_PRODUCT_ID 0x3524
3526     +#define HP_LD220TA_PRODUCT_ID 0x4349
3527     +#define HP_LD960TA_PRODUCT_ID 0x4439
3528     +#define HP_LM940_PRODUCT_ID 0x5039
3529    
3530     /* Cressi Edy (diving computer) PC interface */
3531     #define CRESSI_VENDOR_ID 0x04b8
3532     diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
3533     index 97d91e55b70a..a0e230b31a88 100644
3534     --- a/fs/btrfs/btrfs_inode.h
3535     +++ b/fs/btrfs/btrfs_inode.h
3536     @@ -146,6 +146,12 @@ struct btrfs_inode {
3537     */
3538     u64 last_unlink_trans;
3539    
3540     + /*
3541     + * Track the transaction id of the last transaction used to create a
3542     + * hard link for the inode. This is used by the log tree (fsync).
3543     + */
3544     + u64 last_link_trans;
3545     +
3546     /*
3547     * Number of bytes outstanding that are going to need csums. This is
3548     * used in ENOSPC accounting.
3549     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3550     index 539901fb5165..99e7645ad94e 100644
3551     --- a/fs/btrfs/ctree.c
3552     +++ b/fs/btrfs/ctree.c
3553     @@ -2584,14 +2584,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
3554     root_lock = BTRFS_READ_LOCK;
3555    
3556     if (p->search_commit_root) {
3557     - /* The commit roots are read only so we always do read locks */
3558     - if (p->need_commit_sem)
3559     + /*
3560     + * The commit roots are read only so we always do read locks,
3561     + * and we always must hold the commit_root_sem when doing
3562     + * searches on them, the only exception is send where we don't
3563     + * want to block transaction commits for a long time, so
3564     + * we need to clone the commit root in order to avoid races
3565     + * with transaction commits that create a snapshot of one of
3566     + * the roots used by a send operation.
3567     + */
3568     + if (p->need_commit_sem) {
3569     down_read(&fs_info->commit_root_sem);
3570     - b = root->commit_root;
3571     - extent_buffer_get(b);
3572     - level = btrfs_header_level(b);
3573     - if (p->need_commit_sem)
3574     + b = btrfs_clone_extent_buffer(root->commit_root);
3575     up_read(&fs_info->commit_root_sem);
3576     + if (!b)
3577     + return ERR_PTR(-ENOMEM);
3578     +
3579     + } else {
3580     + b = root->commit_root;
3581     + extent_buffer_get(b);
3582     + }
3583     + level = btrfs_header_level(b);
3584     /*
3585     * Ensure that all callers have set skip_locking when
3586     * p->search_commit_root = 1.
3587     @@ -2717,6 +2730,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3588     again:
3589     prev_cmp = -1;
3590     b = btrfs_search_slot_get_root(root, p, write_lock_level);
3591     + if (IS_ERR(b)) {
3592     + ret = PTR_ERR(b);
3593     + goto done;
3594     + }
3595    
3596     while (b) {
3597     level = btrfs_header_level(b);
3598     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
3599     index 2aa48aecc52b..329d3afcf304 100644
3600     --- a/fs/btrfs/dev-replace.c
3601     +++ b/fs/btrfs/dev-replace.c
3602     @@ -884,6 +884,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3603     "cannot continue dev_replace, tgtdev is missing");
3604     btrfs_info(fs_info,
3605     "you may cancel the operation after 'mount -o degraded'");
3606     + dev_replace->replace_state =
3607     + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3608     btrfs_dev_replace_write_unlock(dev_replace);
3609     return 0;
3610     }
3611     @@ -895,6 +897,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3612     * dev-replace to start anyway.
3613     */
3614     if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3615     + btrfs_dev_replace_write_lock(dev_replace);
3616     + dev_replace->replace_state =
3617     + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3618     + btrfs_dev_replace_write_unlock(dev_replace);
3619     btrfs_info(fs_info,
3620     "cannot resume dev-replace, other exclusive operation running");
3621     return 0;
3622     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3623     index a1febf155747..fe1fef3d7eed 100644
3624     --- a/fs/btrfs/extent-tree.c
3625     +++ b/fs/btrfs/extent-tree.c
3626     @@ -8944,6 +8944,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
3627     goto out_free;
3628     }
3629    
3630     + err = btrfs_run_delayed_items(trans);
3631     + if (err)
3632     + goto out_end_trans;
3633     +
3634     if (block_rsv)
3635     trans->block_rsv = block_rsv;
3636    
3637     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3638     index 9ea4c6f0352f..423281c19fad 100644
3639     --- a/fs/btrfs/inode.c
3640     +++ b/fs/btrfs/inode.c
3641     @@ -1372,7 +1372,8 @@ next_slot:
3642     * Do the same check as in btrfs_cross_ref_exist but
3643     * without the unnecessary search.
3644     */
3645     - if (btrfs_file_extent_generation(leaf, fi) <=
3646     + if (!nolock &&
3647     + btrfs_file_extent_generation(leaf, fi) <=
3648     btrfs_root_last_snapshot(&root->root_item))
3649     goto out_check;
3650     if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
3651     @@ -3686,6 +3687,21 @@ cache_index:
3652     * inode is not a directory, logging its parent unnecessarily.
3653     */
3654     BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3655     + /*
3656     + * Similar reasoning for last_link_trans, needs to be set otherwise
3657     + * for a case like the following:
3658     + *
3659     + * mkdir A
3660     + * touch foo
3661     + * ln foo A/bar
3662     + * echo 2 > /proc/sys/vm/drop_caches
3663     + * fsync foo
3664     + * <power failure>
3665     + *
3666     + * Would result in link bar and directory A not existing after the power
3667     + * failure.
3668     + */
3669     + BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3670    
3671     path->slots[0]++;
3672     if (inode->i_nlink != 1 ||
3673     @@ -6625,6 +6641,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3674     if (err)
3675     goto fail;
3676     }
3677     + BTRFS_I(inode)->last_link_trans = trans->transid;
3678     d_instantiate(dentry, inode);
3679     ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
3680     true, NULL);
3681     @@ -9157,6 +9174,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
3682     ei->index_cnt = (u64)-1;
3683     ei->dir_index = 0;
3684     ei->last_unlink_trans = 0;
3685     + ei->last_link_trans = 0;
3686     ei->last_log_commit = 0;
3687    
3688     spin_lock_init(&ei->lock);
3689     diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
3690     index 902819d3cf41..bbd1b36f4918 100644
3691     --- a/fs/btrfs/scrub.c
3692     +++ b/fs/btrfs/scrub.c
3693     @@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
3694     struct rb_node *parent = NULL;
3695     struct full_stripe_lock *entry;
3696     struct full_stripe_lock *ret;
3697     + unsigned int nofs_flag;
3698    
3699     lockdep_assert_held(&locks_root->lock);
3700    
3701     @@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
3702     }
3703     }
3704    
3705     - /* Insert new lock */
3706     + /*
3707     + * Insert new lock.
3708     + *
3709     + * We must use GFP_NOFS because the scrub task might be waiting for a
3710     + * worker task executing this function and in turn a transaction commit
3711     + * might be waiting the scrub task to pause (which needs to wait for all
3712     + * the worker tasks to complete before pausing).
3713     + */
3714     + nofs_flag = memalloc_nofs_save();
3715     ret = kmalloc(sizeof(*ret), GFP_KERNEL);
3716     + memalloc_nofs_restore(nofs_flag);
3717     if (!ret)
3718     return ERR_PTR(-ENOMEM);
3719     ret->logical = fstripe_logical;
3720     @@ -1620,8 +1630,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
3721     mutex_lock(&sctx->wr_lock);
3722     again:
3723     if (!sctx->wr_curr_bio) {
3724     + unsigned int nofs_flag;
3725     +
3726     + /*
3727     + * We must use GFP_NOFS because the scrub task might be waiting
3728     + * for a worker task executing this function and in turn a
3729     + * transaction commit might be waiting the scrub task to pause
3730     + * (which needs to wait for all the worker tasks to complete
3731     + * before pausing).
3732     + */
3733     + nofs_flag = memalloc_nofs_save();
3734     sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
3735     GFP_KERNEL);
3736     + memalloc_nofs_restore(nofs_flag);
3737     if (!sctx->wr_curr_bio) {
3738     mutex_unlock(&sctx->wr_lock);
3739     return -ENOMEM;
3740     @@ -3772,6 +3793,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3741     struct scrub_ctx *sctx;
3742     int ret;
3743     struct btrfs_device *dev;
3744     + unsigned int nofs_flag;
3745    
3746     if (btrfs_fs_closing(fs_info))
3747     return -EINVAL;
3748     @@ -3875,6 +3897,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3749     atomic_inc(&fs_info->scrubs_running);
3750     mutex_unlock(&fs_info->scrub_lock);
3751    
3752     + /*
3753     + * In order to avoid deadlock with reclaim when there is a transaction
3754     + * trying to pause scrub, make sure we use GFP_NOFS for all the
3755     + * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3756     + * invoked by our callees. The pausing request is done when the
3757     + * transaction commit starts, and it blocks the transaction until scrub
3758     + * is paused (done at specific points at scrub_stripe() or right above
3759     + * before incrementing fs_info->scrubs_running).
3760     + */
3761     + nofs_flag = memalloc_nofs_save();
3762     if (!is_dev_replace) {
3763     /*
3764     * by holding device list mutex, we can
3765     @@ -3887,6 +3919,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3766    
3767     if (!ret)
3768     ret = scrub_enumerate_chunks(sctx, dev, start, end);
3769     + memalloc_nofs_restore(nofs_flag);
3770    
3771     wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3772     atomic_dec(&fs_info->scrubs_running);
3773     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3774     index a5ce99a6c936..15d2914f0a67 100644
3775     --- a/fs/btrfs/tree-log.c
3776     +++ b/fs/btrfs/tree-log.c
3777     @@ -5778,6 +5778,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3778     goto end_trans;
3779     }
3780    
3781     + /*
3782     + * If a new hard link was added to the inode in the current transaction
3783     + * and its link count is now greater than 1, we need to fallback to a
3784     + * transaction commit, otherwise we can end up not logging all its new
3785     + * parents for all the hard links. Here just from the dentry used to
3786     + * fsync, we can not visit the ancestor inodes for all the other hard
3787     + * links to figure out if any is new, so we fallback to a transaction
3788     + * commit (instead of adding a lot of complexity of scanning a btree,
3789     + * since this scenario is not a common use case).
3790     + */
3791     + if (inode->vfs_inode.i_nlink > 1 &&
3792     + inode->last_link_trans > last_committed) {
3793     + ret = -EMLINK;
3794     + goto end_trans;
3795     + }
3796     +
3797     while (1) {
3798     if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
3799     break;
3800     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3801     index c9bc56b1baac..c23bf9da93d2 100644
3802     --- a/fs/cifs/file.c
3803     +++ b/fs/cifs/file.c
3804     @@ -2617,11 +2617,13 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
3805     if (rc)
3806     break;
3807    
3808     + cur_len = min_t(const size_t, len, wsize);
3809     +
3810     if (ctx->direct_io) {
3811     ssize_t result;
3812    
3813     result = iov_iter_get_pages_alloc(
3814     - from, &pagevec, wsize, &start);
3815     + from, &pagevec, cur_len, &start);
3816     if (result < 0) {
3817     cifs_dbg(VFS,
3818     "direct_writev couldn't get user pages "
3819     @@ -2630,6 +2632,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
3820     result, from->type,
3821     from->iov_offset, from->count);
3822     dump_stack();
3823     +
3824     + rc = result;
3825     + add_credits_and_wake_if(server, credits, 0);
3826     break;
3827     }
3828     cur_len = (size_t)result;
3829     @@ -3313,13 +3318,16 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3830     cur_len, &start);
3831     if (result < 0) {
3832     cifs_dbg(VFS,
3833     - "couldn't get user pages (cur_len=%zd)"
3834     + "couldn't get user pages (rc=%zd)"
3835     " iter type %d"
3836     " iov_offset %zd count %zd\n",
3837     result, direct_iov.type,
3838     direct_iov.iov_offset,
3839     direct_iov.count);
3840     dump_stack();
3841     +
3842     + rc = result;
3843     + add_credits_and_wake_if(server, credits, 0);
3844     break;
3845     }
3846     cur_len = (size_t)result;
3847     diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
3848     index d47b7f5dfa6c..924269cec135 100644
3849     --- a/fs/cifs/smb2maperror.c
3850     +++ b/fs/cifs/smb2maperror.c
3851     @@ -379,8 +379,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
3852     {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
3853     {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
3854     {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
3855     - {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
3856     - {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
3857     + {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
3858     + {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
3859     {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
3860     {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
3861     "STATUS_CTL_FILE_NOT_SUPPORTED"},
3862     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3863     index e25c7aade98a..391b40e91910 100644
3864     --- a/fs/cifs/smb2ops.c
3865     +++ b/fs/cifs/smb2ops.c
3866     @@ -3384,8 +3384,10 @@ smb3_receive_transform(struct TCP_Server_Info *server,
3867     }
3868    
3869     /* TODO: add support for compounds containing READ. */
3870     - if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
3871     + if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3872     + *num_mids = 1;
3873     return receive_encrypted_read(server, &mids[0]);
3874     + }
3875    
3876     return receive_encrypted_standard(server, mids, bufs, num_mids);
3877     }
3878     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3879     index 3f89d0ab08fc..185a05d3257e 100644
3880     --- a/fs/ext4/ext4.h
3881     +++ b/fs/ext4/ext4.h
3882     @@ -2454,8 +2454,19 @@ int do_journal_get_write_access(handle_t *handle,
3883     #define FALL_BACK_TO_NONDELALLOC 1
3884     #define CONVERT_INLINE_DATA 2
3885    
3886     -extern struct inode *ext4_iget(struct super_block *, unsigned long);
3887     -extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
3888     +typedef enum {
3889     + EXT4_IGET_NORMAL = 0,
3890     + EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
3891     + EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
3892     +} ext4_iget_flags;
3893     +
3894     +extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3895     + ext4_iget_flags flags, const char *function,
3896     + unsigned int line);
3897     +
3898     +#define ext4_iget(sb, ino, flags) \
3899     + __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
3900     +
3901     extern int ext4_write_inode(struct inode *, struct writeback_control *);
3902     extern int ext4_setattr(struct dentry *, struct iattr *);
3903     extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
3904     @@ -2538,6 +2549,8 @@ extern int ext4_group_extend(struct super_block *sb,
3905     extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
3906    
3907     /* super.c */
3908     +extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
3909     + sector_t block, int op_flags);
3910     extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
3911     extern int ext4_calculate_overhead(struct super_block *sb);
3912     extern void ext4_superblock_csum_set(struct super_block *sb);
3913     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3914     index 014f6a698cb7..7ff14a1adba3 100644
3915     --- a/fs/ext4/ialloc.c
3916     +++ b/fs/ext4/ialloc.c
3917     @@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
3918     if (!ext4_test_bit(bit, bitmap_bh->b_data))
3919     goto bad_orphan;
3920    
3921     - inode = ext4_iget(sb, ino);
3922     + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
3923     if (IS_ERR(inode)) {
3924     err = PTR_ERR(inode);
3925     ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
3926     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
3927     index 9c4bac18cc6c..27373d88b5f0 100644
3928     --- a/fs/ext4/inline.c
3929     +++ b/fs/ext4/inline.c
3930     @@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
3931    
3932     if (!PageUptodate(page)) {
3933     ret = ext4_read_inline_page(inode, page);
3934     - if (ret < 0)
3935     + if (ret < 0) {
3936     + unlock_page(page);
3937     + put_page(page);
3938     goto out_up_read;
3939     + }
3940     }
3941    
3942     ret = 1;
3943     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3944     index 22a9d8159720..9affabd07682 100644
3945     --- a/fs/ext4/inode.c
3946     +++ b/fs/ext4/inode.c
3947     @@ -4817,7 +4817,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
3948     return inode_peek_iversion(inode);
3949     }
3950    
3951     -struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3952     +struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3953     + ext4_iget_flags flags, const char *function,
3954     + unsigned int line)
3955     {
3956     struct ext4_iloc iloc;
3957     struct ext4_inode *raw_inode;
3958     @@ -4831,6 +4833,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3959     gid_t i_gid;
3960     projid_t i_projid;
3961    
3962     + if (((flags & EXT4_IGET_NORMAL) &&
3963     + (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
3964     + (ino < EXT4_ROOT_INO) ||
3965     + (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
3966     + if (flags & EXT4_IGET_HANDLE)
3967     + return ERR_PTR(-ESTALE);
3968     + __ext4_error(sb, function, line,
3969     + "inode #%lu: comm %s: iget: illegal inode #",
3970     + ino, current->comm);
3971     + return ERR_PTR(-EFSCORRUPTED);
3972     + }
3973     +
3974     inode = iget_locked(sb, ino);
3975     if (!inode)
3976     return ERR_PTR(-ENOMEM);
3977     @@ -4846,18 +4860,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3978     raw_inode = ext4_raw_inode(&iloc);
3979    
3980     if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
3981     - EXT4_ERROR_INODE(inode, "root inode unallocated");
3982     + ext4_error_inode(inode, function, line, 0,
3983     + "iget: root inode unallocated");
3984     ret = -EFSCORRUPTED;
3985     goto bad_inode;
3986     }
3987    
3988     + if ((flags & EXT4_IGET_HANDLE) &&
3989     + (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
3990     + ret = -ESTALE;
3991     + goto bad_inode;
3992     + }
3993     +
3994     if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3995     ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3996     if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3997     EXT4_INODE_SIZE(inode->i_sb) ||
3998     (ei->i_extra_isize & 3)) {
3999     - EXT4_ERROR_INODE(inode,
4000     - "bad extra_isize %u (inode size %u)",
4001     + ext4_error_inode(inode, function, line, 0,
4002     + "iget: bad extra_isize %u "
4003     + "(inode size %u)",
4004     ei->i_extra_isize,
4005     EXT4_INODE_SIZE(inode->i_sb));
4006     ret = -EFSCORRUPTED;
4007     @@ -4879,7 +4901,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4008     }
4009    
4010     if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4011     - EXT4_ERROR_INODE(inode, "checksum invalid");
4012     + ext4_error_inode(inode, function, line, 0,
4013     + "iget: checksum invalid");
4014     ret = -EFSBADCRC;
4015     goto bad_inode;
4016     }
4017     @@ -4936,7 +4959,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4018     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4019     inode->i_size = ext4_isize(sb, raw_inode);
4020     if ((size = i_size_read(inode)) < 0) {
4021     - EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
4022     + ext4_error_inode(inode, function, line, 0,
4023     + "iget: bad i_size value: %lld", size);
4024     ret = -EFSCORRUPTED;
4025     goto bad_inode;
4026     }
4027     @@ -5012,7 +5036,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4028     ret = 0;
4029     if (ei->i_file_acl &&
4030     !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4031     - EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4032     + ext4_error_inode(inode, function, line, 0,
4033     + "iget: bad extended attribute block %llu",
4034     ei->i_file_acl);
4035     ret = -EFSCORRUPTED;
4036     goto bad_inode;
4037     @@ -5040,8 +5065,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4038     } else if (S_ISLNK(inode->i_mode)) {
4039     /* VFS does not allow setting these so must be corruption */
4040     if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4041     - EXT4_ERROR_INODE(inode,
4042     - "immutable or append flags not allowed on symlinks");
4043     + ext4_error_inode(inode, function, line, 0,
4044     + "iget: immutable or append flags "
4045     + "not allowed on symlinks");
4046     ret = -EFSCORRUPTED;
4047     goto bad_inode;
4048     }
4049     @@ -5071,7 +5097,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4050     make_bad_inode(inode);
4051     } else {
4052     ret = -EFSCORRUPTED;
4053     - EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4054     + ext4_error_inode(inode, function, line, 0,
4055     + "iget: bogus i_mode (%o)", inode->i_mode);
4056     goto bad_inode;
4057     }
4058     brelse(iloc.bh);
4059     @@ -5085,13 +5112,6 @@ bad_inode:
4060     return ERR_PTR(ret);
4061     }
4062    
4063     -struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
4064     -{
4065     - if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4066     - return ERR_PTR(-EFSCORRUPTED);
4067     - return ext4_iget(sb, ino);
4068     -}
4069     -
4070     static int ext4_inode_blocks_set(handle_t *handle,
4071     struct ext4_inode *raw_inode,
4072     struct ext4_inode_info *ei)
4073     @@ -5380,9 +5400,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4074     {
4075     int err;
4076    
4077     - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
4078     + if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
4079     + sb_rdonly(inode->i_sb))
4080     return 0;
4081    
4082     + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4083     + return -EIO;
4084     +
4085     if (EXT4_SB(inode->i_sb)->s_journal) {
4086     if (ext4_journal_current_handle()) {
4087     jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4088     @@ -5398,7 +5422,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4089     if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
4090     return 0;
4091    
4092     - err = ext4_force_commit(inode->i_sb);
4093     + err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4094     + EXT4_I(inode)->i_sync_tid);
4095     } else {
4096     struct ext4_iloc iloc;
4097    
4098     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
4099     index 0edee31913d1..d37dafa1d133 100644
4100     --- a/fs/ext4/ioctl.c
4101     +++ b/fs/ext4/ioctl.c
4102     @@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
4103     !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
4104     return -EPERM;
4105    
4106     - inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
4107     + inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
4108     if (IS_ERR(inode_bl))
4109     return PTR_ERR(inode_bl);
4110     ei_bl = EXT4_I(inode_bl);
4111     diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
4112     index 61a9d1927817..a98bfca9c463 100644
4113     --- a/fs/ext4/migrate.c
4114     +++ b/fs/ext4/migrate.c
4115     @@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
4116     int i, retval = 0;
4117     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4118    
4119     - bh = sb_bread(inode->i_sb, pblock);
4120     - if (!bh)
4121     - return -EIO;
4122     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4123     + if (IS_ERR(bh))
4124     + return PTR_ERR(bh);
4125    
4126     i_data = (__le32 *)bh->b_data;
4127     for (i = 0; i < max_entries; i++) {
4128     @@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
4129     int i, retval = 0;
4130     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4131    
4132     - bh = sb_bread(inode->i_sb, pblock);
4133     - if (!bh)
4134     - return -EIO;
4135     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4136     + if (IS_ERR(bh))
4137     + return PTR_ERR(bh);
4138    
4139     i_data = (__le32 *)bh->b_data;
4140     for (i = 0; i < max_entries; i++) {
4141     @@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
4142     int i, retval = 0;
4143     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4144    
4145     - bh = sb_bread(inode->i_sb, pblock);
4146     - if (!bh)
4147     - return -EIO;
4148     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4149     + if (IS_ERR(bh))
4150     + return PTR_ERR(bh);
4151    
4152     i_data = (__le32 *)bh->b_data;
4153     for (i = 0; i < max_entries; i++) {
4154     @@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
4155     struct buffer_head *bh;
4156     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4157    
4158     - bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4159     - if (!bh)
4160     - return -EIO;
4161     + bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4162     + if (IS_ERR(bh))
4163     + return PTR_ERR(bh);
4164    
4165     tmp_idata = (__le32 *)bh->b_data;
4166     for (i = 0; i < max_entries; i++) {
4167     @@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
4168     struct buffer_head *bh;
4169     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4170    
4171     - bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4172     - if (!bh)
4173     - return -EIO;
4174     + bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4175     + if (IS_ERR(bh))
4176     + return PTR_ERR(bh);
4177    
4178     tmp_idata = (__le32 *)bh->b_data;
4179     for (i = 0; i < max_entries; i++) {
4180     @@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
4181     struct ext4_extent_header *eh;
4182    
4183     block = ext4_idx_pblock(ix);
4184     - bh = sb_bread(inode->i_sb, block);
4185     - if (!bh)
4186     - return -EIO;
4187     + bh = ext4_sb_bread(inode->i_sb, block, 0);
4188     + if (IS_ERR(bh))
4189     + return PTR_ERR(bh);
4190    
4191     eh = (struct ext4_extent_header *)bh->b_data;
4192     if (eh->eh_depth != 0) {
4193     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4194     index 437f71fe83ae..2b928eb07fa2 100644
4195     --- a/fs/ext4/namei.c
4196     +++ b/fs/ext4/namei.c
4197     @@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
4198     dentry);
4199     return ERR_PTR(-EFSCORRUPTED);
4200     }
4201     - inode = ext4_iget_normal(dir->i_sb, ino);
4202     + inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
4203     if (inode == ERR_PTR(-ESTALE)) {
4204     EXT4_ERROR_INODE(dir,
4205     "deleted inode referenced: %u",
4206     @@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
4207     return ERR_PTR(-EFSCORRUPTED);
4208     }
4209    
4210     - return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
4211     + return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
4212     }
4213    
4214     /*
4215     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4216     index a5efee34415f..48421de803b7 100644
4217     --- a/fs/ext4/resize.c
4218     +++ b/fs/ext4/resize.c
4219     @@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
4220     else if (free_blocks_count < 0)
4221     ext4_warning(sb, "Bad blocks count %u",
4222     input->blocks_count);
4223     - else if (!(bh = sb_bread(sb, end - 1)))
4224     + else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
4225     + err = PTR_ERR(bh);
4226     + bh = NULL;
4227     ext4_warning(sb, "Cannot read last block (%llu)",
4228     end - 1);
4229     - else if (outside(input->block_bitmap, start, end))
4230     + } else if (outside(input->block_bitmap, start, end))
4231     ext4_warning(sb, "Block bitmap not in group (block %llu)",
4232     (unsigned long long)input->block_bitmap);
4233     else if (outside(input->inode_bitmap, start, end))
4234     @@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4235     struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4236     unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
4237     ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
4238     - struct buffer_head **o_group_desc, **n_group_desc;
4239     - struct buffer_head *dind;
4240     - struct buffer_head *gdb_bh;
4241     + struct buffer_head **o_group_desc, **n_group_desc = NULL;
4242     + struct buffer_head *dind = NULL;
4243     + struct buffer_head *gdb_bh = NULL;
4244     int gdbackups;
4245     - struct ext4_iloc iloc;
4246     + struct ext4_iloc iloc = { .bh = NULL };
4247     __le32 *data;
4248     int err;
4249    
4250     @@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4251     "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
4252     gdb_num);
4253    
4254     - gdb_bh = sb_bread(sb, gdblock);
4255     - if (!gdb_bh)
4256     - return -EIO;
4257     + gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4258     + if (IS_ERR(gdb_bh))
4259     + return PTR_ERR(gdb_bh);
4260    
4261     gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
4262     if (gdbackups < 0) {
4263     err = gdbackups;
4264     - goto exit_bh;
4265     + goto errout;
4266     }
4267    
4268     data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4269     - dind = sb_bread(sb, le32_to_cpu(*data));
4270     - if (!dind) {
4271     - err = -EIO;
4272     - goto exit_bh;
4273     + dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4274     + if (IS_ERR(dind)) {
4275     + err = PTR_ERR(dind);
4276     + dind = NULL;
4277     + goto errout;
4278     }
4279    
4280     data = (__le32 *)dind->b_data;
4281     @@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4282     ext4_warning(sb, "new group %u GDT block %llu not reserved",
4283     group, gdblock);
4284     err = -EINVAL;
4285     - goto exit_dind;
4286     + goto errout;
4287     }
4288    
4289     BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
4290     err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
4291     if (unlikely(err))
4292     - goto exit_dind;
4293     + goto errout;
4294    
4295     BUFFER_TRACE(gdb_bh, "get_write_access");
4296     err = ext4_journal_get_write_access(handle, gdb_bh);
4297     if (unlikely(err))
4298     - goto exit_dind;
4299     + goto errout;
4300    
4301     BUFFER_TRACE(dind, "get_write_access");
4302     err = ext4_journal_get_write_access(handle, dind);
4303     @@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4304     /* ext4_reserve_inode_write() gets a reference on the iloc */
4305     err = ext4_reserve_inode_write(handle, inode, &iloc);
4306     if (unlikely(err))
4307     - goto exit_dind;
4308     + goto errout;
4309    
4310     n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4311     sizeof(struct buffer_head *),
4312     @@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4313     err = -ENOMEM;
4314     ext4_warning(sb, "not enough memory for %lu groups",
4315     gdb_num + 1);
4316     - goto exit_inode;
4317     + goto errout;
4318     }
4319    
4320     /*
4321     @@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4322     err = ext4_handle_dirty_metadata(handle, NULL, dind);
4323     if (unlikely(err)) {
4324     ext4_std_error(sb, err);
4325     - goto exit_inode;
4326     + goto errout;
4327     }
4328     inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
4329     (9 - EXT4_SB(sb)->s_cluster_bits);
4330     @@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4331     err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
4332     if (unlikely(err)) {
4333     ext4_std_error(sb, err);
4334     - iloc.bh = NULL;
4335     - goto exit_inode;
4336     + goto errout;
4337     }
4338     brelse(dind);
4339    
4340     @@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4341     err = ext4_handle_dirty_super(handle, sb);
4342     if (err)
4343     ext4_std_error(sb, err);
4344     -
4345     return err;
4346     -
4347     -exit_inode:
4348     +errout:
4349     kvfree(n_group_desc);
4350     brelse(iloc.bh);
4351     -exit_dind:
4352     brelse(dind);
4353     -exit_bh:
4354     brelse(gdb_bh);
4355    
4356     ext4_debug("leaving with error %d\n", err);
4357     @@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4358    
4359     gdblock = ext4_meta_bg_first_block_no(sb, group) +
4360     ext4_bg_has_super(sb, group);
4361     - gdb_bh = sb_bread(sb, gdblock);
4362     - if (!gdb_bh)
4363     - return -EIO;
4364     + gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4365     + if (IS_ERR(gdb_bh))
4366     + return PTR_ERR(gdb_bh);
4367     n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4368     sizeof(struct buffer_head *),
4369     GFP_NOFS);
4370     @@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4371     return -ENOMEM;
4372    
4373     data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4374     - dind = sb_bread(sb, le32_to_cpu(*data));
4375     - if (!dind) {
4376     - err = -EIO;
4377     + dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4378     + if (IS_ERR(dind)) {
4379     + err = PTR_ERR(dind);
4380     + dind = NULL;
4381     goto exit_free;
4382     }
4383    
4384     @@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4385     err = -EINVAL;
4386     goto exit_bh;
4387     }
4388     - primary[res] = sb_bread(sb, blk);
4389     - if (!primary[res]) {
4390     - err = -EIO;
4391     + primary[res] = ext4_sb_bread(sb, blk, 0);
4392     + if (IS_ERR(primary[res])) {
4393     + err = PTR_ERR(primary[res]);
4394     + primary[res] = NULL;
4395     goto exit_bh;
4396     }
4397     gdbackups = verify_reserved_gdb(sb, group, primary[res]);
4398     @@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
4399     }
4400    
4401     if (reserved_gdb || gdb_off == 0) {
4402     - if (ext4_has_feature_resize_inode(sb) ||
4403     + if (!ext4_has_feature_resize_inode(sb) ||
4404     !le16_to_cpu(es->s_reserved_gdt_blocks)) {
4405     ext4_warning(sb,
4406     "No reserved GDT blocks, can't resize");
4407     return -EPERM;
4408     }
4409     - inode = ext4_iget(sb, EXT4_RESIZE_INO);
4410     + inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
4411     if (IS_ERR(inode)) {
4412     ext4_warning(sb, "Error opening resize inode");
4413     return PTR_ERR(inode);
4414     @@ -1965,7 +1965,8 @@ retry:
4415     }
4416    
4417     if (!resize_inode)
4418     - resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
4419     + resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
4420     + EXT4_IGET_SPECIAL);
4421     if (IS_ERR(resize_inode)) {
4422     ext4_warning(sb, "Error opening resize inode");
4423     return PTR_ERR(resize_inode);
4424     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4425     index 53ff6c2a26ed..6641a1b8a6a5 100644
4426     --- a/fs/ext4/super.c
4427     +++ b/fs/ext4/super.c
4428     @@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
4429     MODULE_ALIAS("ext3");
4430     #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
4431    
4432     +/*
4433     + * This works like sb_bread() except it uses ERR_PTR for error
4434     + * returns. Currently with sb_bread it's impossible to distinguish
4435     + * between ENOMEM and EIO situations (since both result in a NULL
4436     + * return.
4437     + */
4438     +struct buffer_head *
4439     +ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
4440     +{
4441     + struct buffer_head *bh = sb_getblk(sb, block);
4442     +
4443     + if (bh == NULL)
4444     + return ERR_PTR(-ENOMEM);
4445     + if (buffer_uptodate(bh))
4446     + return bh;
4447     + ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
4448     + wait_on_buffer(bh);
4449     + if (buffer_uptodate(bh))
4450     + return bh;
4451     + put_bh(bh);
4452     + return ERR_PTR(-EIO);
4453     +}
4454     +
4455     static int ext4_verify_csum_type(struct super_block *sb,
4456     struct ext4_super_block *es)
4457     {
4458     @@ -1151,20 +1174,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
4459     {
4460     struct inode *inode;
4461    
4462     - if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4463     - return ERR_PTR(-ESTALE);
4464     - if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4465     - return ERR_PTR(-ESTALE);
4466     -
4467     - /* iget isn't really right if the inode is currently unallocated!!
4468     - *
4469     - * ext4_read_inode will return a bad_inode if the inode had been
4470     - * deleted, so we should be safe.
4471     - *
4472     + /*
4473     * Currently we don't know the generation for parent directory, so
4474     * a generation of 0 means "accept any"
4475     */
4476     - inode = ext4_iget_normal(sb, ino);
4477     + inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
4478     if (IS_ERR(inode))
4479     return ERR_CAST(inode);
4480     if (generation && inode->i_generation != generation) {
4481     @@ -1189,6 +1203,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
4482     ext4_nfs_get_inode);
4483     }
4484    
4485     +static int ext4_nfs_commit_metadata(struct inode *inode)
4486     +{
4487     + struct writeback_control wbc = {
4488     + .sync_mode = WB_SYNC_ALL
4489     + };
4490     +
4491     + trace_ext4_nfs_commit_metadata(inode);
4492     + return ext4_write_inode(inode, &wbc);
4493     +}
4494     +
4495     /*
4496     * Try to release metadata pages (indirect blocks, directories) which are
4497     * mapped via the block device. Since these pages could have journal heads
4498     @@ -1393,6 +1417,7 @@ static const struct export_operations ext4_export_ops = {
4499     .fh_to_dentry = ext4_fh_to_dentry,
4500     .fh_to_parent = ext4_fh_to_parent,
4501     .get_parent = ext4_get_parent,
4502     + .commit_metadata = ext4_nfs_commit_metadata,
4503     };
4504    
4505     enum {
4506     @@ -4328,7 +4353,7 @@ no_journal:
4507     * so we can safely mount the rest of the filesystem now.
4508     */
4509    
4510     - root = ext4_iget(sb, EXT4_ROOT_INO);
4511     + root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4512     if (IS_ERR(root)) {
4513     ext4_msg(sb, KERN_ERR, "get root inode failed");
4514     ret = PTR_ERR(root);
4515     @@ -4598,7 +4623,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
4516     * happen if we iget() an unused inode, as the subsequent iput()
4517     * will try to delete it.
4518     */
4519     - journal_inode = ext4_iget(sb, journal_inum);
4520     + journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
4521     if (IS_ERR(journal_inode)) {
4522     ext4_msg(sb, KERN_ERR, "no journal found");
4523     return NULL;
4524     @@ -5680,7 +5705,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4525     if (!qf_inums[type])
4526     return -EPERM;
4527    
4528     - qf_inode = ext4_iget(sb, qf_inums[type]);
4529     + qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
4530     if (IS_ERR(qf_inode)) {
4531     ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
4532     return PTR_ERR(qf_inode);
4533     @@ -5690,9 +5715,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4534     qf_inode->i_flags |= S_NOQUOTA;
4535     lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
4536     err = dquot_enable(qf_inode, type, format_id, flags);
4537     - iput(qf_inode);
4538     if (err)
4539     lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
4540     + iput(qf_inode);
4541    
4542     return err;
4543     }
4544     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4545     index 7643d52c776c..86ed9c686249 100644
4546     --- a/fs/ext4/xattr.c
4547     +++ b/fs/ext4/xattr.c
4548     @@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
4549     struct inode *inode;
4550     int err;
4551    
4552     - inode = ext4_iget(parent->i_sb, ea_ino);
4553     + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
4554     if (IS_ERR(inode)) {
4555     err = PTR_ERR(inode);
4556     ext4_error(parent->i_sb,
4557     @@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4558     ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
4559     name_index, name, buffer, (long)buffer_size);
4560    
4561     - error = -ENODATA;
4562     if (!EXT4_I(inode)->i_file_acl)
4563     - goto cleanup;
4564     + return -ENODATA;
4565     ea_idebug(inode, "reading block %llu",
4566     (unsigned long long)EXT4_I(inode)->i_file_acl);
4567     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4568     - if (!bh)
4569     - goto cleanup;
4570     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4571     + if (IS_ERR(bh))
4572     + return PTR_ERR(bh);
4573     ea_bdebug(bh, "b_count=%d, refcount=%d",
4574     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4575     error = ext4_xattr_check_block(inode, bh);
4576     @@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
4577     ea_idebug(inode, "buffer=%p, buffer_size=%ld",
4578     buffer, (long)buffer_size);
4579    
4580     - error = 0;
4581     if (!EXT4_I(inode)->i_file_acl)
4582     - goto cleanup;
4583     + return 0;
4584     ea_idebug(inode, "reading block %llu",
4585     (unsigned long long)EXT4_I(inode)->i_file_acl);
4586     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4587     - error = -EIO;
4588     - if (!bh)
4589     - goto cleanup;
4590     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4591     + if (IS_ERR(bh))
4592     + return PTR_ERR(bh);
4593     ea_bdebug(bh, "b_count=%d, refcount=%d",
4594     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4595     error = ext4_xattr_check_block(inode, bh);
4596     if (error)
4597     goto cleanup;
4598     ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
4599     - error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
4600     -
4601     + error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
4602     + buffer_size);
4603     cleanup:
4604     brelse(bh);
4605     -
4606     return error;
4607     }
4608    
4609     @@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
4610     }
4611    
4612     if (EXT4_I(inode)->i_file_acl) {
4613     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4614     - if (!bh) {
4615     - ret = -EIO;
4616     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4617     + if (IS_ERR(bh)) {
4618     + ret = PTR_ERR(bh);
4619     goto out;
4620     }
4621    
4622     @@ -1486,7 +1482,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
4623     }
4624    
4625     while (ce) {
4626     - ea_inode = ext4_iget(inode->i_sb, ce->e_value);
4627     + ea_inode = ext4_iget(inode->i_sb, ce->e_value,
4628     + EXT4_IGET_NORMAL);
4629     if (!IS_ERR(ea_inode) &&
4630     !is_bad_inode(ea_inode) &&
4631     (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
4632     @@ -1821,16 +1818,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4633    
4634     if (EXT4_I(inode)->i_file_acl) {
4635     /* The inode already has an extended attribute block. */
4636     - bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
4637     - error = -EIO;
4638     - if (!bs->bh)
4639     - goto cleanup;
4640     + bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4641     + if (IS_ERR(bs->bh))
4642     + return PTR_ERR(bs->bh);
4643     ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
4644     atomic_read(&(bs->bh->b_count)),
4645     le32_to_cpu(BHDR(bs->bh)->h_refcount));
4646     error = ext4_xattr_check_block(inode, bs->bh);
4647     if (error)
4648     - goto cleanup;
4649     + return error;
4650     /* Find the named attribute. */
4651     bs->s.base = BHDR(bs->bh);
4652     bs->s.first = BFIRST(bs->bh);
4653     @@ -1839,13 +1835,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4654     error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
4655     i->name_index, i->name, 1);
4656     if (error && error != -ENODATA)
4657     - goto cleanup;
4658     + return error;
4659     bs->s.not_found = error;
4660     }
4661     - error = 0;
4662     -
4663     -cleanup:
4664     - return error;
4665     + return 0;
4666     }
4667    
4668     static int
4669     @@ -2274,9 +2267,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
4670    
4671     if (!EXT4_I(inode)->i_file_acl)
4672     return NULL;
4673     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4674     - if (!bh)
4675     - return ERR_PTR(-EIO);
4676     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4677     + if (IS_ERR(bh))
4678     + return bh;
4679     error = ext4_xattr_check_block(inode, bh);
4680     if (error) {
4681     brelse(bh);
4682     @@ -2729,7 +2722,7 @@ retry:
4683     base = IFIRST(header);
4684     end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
4685     min_offs = end - base;
4686     - total_ino = sizeof(struct ext4_xattr_ibody_header);
4687     + total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
4688    
4689     error = xattr_check_inode(inode, header, end);
4690     if (error)
4691     @@ -2746,10 +2739,11 @@ retry:
4692     if (EXT4_I(inode)->i_file_acl) {
4693     struct buffer_head *bh;
4694    
4695     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4696     - error = -EIO;
4697     - if (!bh)
4698     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4699     + if (IS_ERR(bh)) {
4700     + error = PTR_ERR(bh);
4701     goto cleanup;
4702     + }
4703     error = ext4_xattr_check_block(inode, bh);
4704     if (error) {
4705     brelse(bh);
4706     @@ -2903,11 +2897,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
4707     }
4708    
4709     if (EXT4_I(inode)->i_file_acl) {
4710     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4711     - if (!bh) {
4712     - EXT4_ERROR_INODE(inode, "block %llu read error",
4713     - EXT4_I(inode)->i_file_acl);
4714     - error = -EIO;
4715     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4716     + if (IS_ERR(bh)) {
4717     + error = PTR_ERR(bh);
4718     + if (error == -EIO)
4719     + EXT4_ERROR_INODE(inode, "block %llu read error",
4720     + EXT4_I(inode)->i_file_acl);
4721     goto cleanup;
4722     }
4723     error = ext4_xattr_check_block(inode, bh);
4724     @@ -3060,8 +3055,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
4725     while (ce) {
4726     struct buffer_head *bh;
4727    
4728     - bh = sb_bread(inode->i_sb, ce->e_value);
4729     - if (!bh) {
4730     + bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
4731     + if (IS_ERR(bh)) {
4732     + if (PTR_ERR(bh) == -ENOMEM)
4733     + return NULL;
4734     EXT4_ERROR_INODE(inode, "block %lu read error",
4735     (unsigned long)ce->e_value);
4736     } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
4737     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
4738     index b293cb3e27a2..17049b030b6c 100644
4739     --- a/fs/f2fs/data.c
4740     +++ b/fs/f2fs/data.c
4741     @@ -1102,8 +1102,10 @@ next_block:
4742     if (test_opt(sbi, LFS) && create &&
4743     flag == F2FS_GET_BLOCK_DIO) {
4744     err = __allocate_data_block(&dn, map->m_seg_type);
4745     - if (!err)
4746     + if (!err) {
4747     + blkaddr = dn.data_blkaddr;
4748     set_inode_flag(inode, FI_APPEND_WRITE);
4749     + }
4750     }
4751     } else {
4752     if (create) {
4753     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4754     index d338740d0fda..88be946dedd4 100644
4755     --- a/fs/f2fs/node.c
4756     +++ b/fs/f2fs/node.c
4757     @@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
4758     struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
4759     struct node_info ni;
4760     int err;
4761     + pgoff_t index;
4762    
4763     err = f2fs_get_node_info(sbi, dn->nid, &ni);
4764     if (err)
4765     @@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
4766     clear_node_page_dirty(dn->node_page);
4767     set_sbi_flag(sbi, SBI_IS_DIRTY);
4768    
4769     + index = dn->node_page->index;
4770     f2fs_put_page(dn->node_page, 1);
4771    
4772     invalidate_mapping_pages(NODE_MAPPING(sbi),
4773     - dn->node_page->index, dn->node_page->index);
4774     + index, index);
4775    
4776     dn->node_page = NULL;
4777     trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
4778     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4779     index af58b2cc21b8..855a622fb052 100644
4780     --- a/fs/f2fs/super.c
4781     +++ b/fs/f2fs/super.c
4782     @@ -1457,19 +1457,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
4783    
4784     sbi->sb->s_flags |= SB_ACTIVE;
4785    
4786     - mutex_lock(&sbi->gc_mutex);
4787     f2fs_update_time(sbi, DISABLE_TIME);
4788    
4789     while (!f2fs_time_over(sbi, DISABLE_TIME)) {
4790     + mutex_lock(&sbi->gc_mutex);
4791     err = f2fs_gc(sbi, true, false, NULL_SEGNO);
4792     if (err == -ENODATA)
4793     break;
4794     - if (err && err != -EAGAIN) {
4795     - mutex_unlock(&sbi->gc_mutex);
4796     + if (err && err != -EAGAIN)
4797     return err;
4798     - }
4799     }
4800     - mutex_unlock(&sbi->gc_mutex);
4801    
4802     err = sync_filesystem(sbi->sb);
4803     if (err)
4804     @@ -2496,10 +2493,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4805     return 1;
4806     }
4807    
4808     - if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
4809     + if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
4810     f2fs_msg(sb, KERN_INFO,
4811     - "Wrong segment_count / block_count (%u > %u)",
4812     - segment_count, le32_to_cpu(raw_super->block_count));
4813     + "Wrong segment_count / block_count (%u > %llu)",
4814     + segment_count, le64_to_cpu(raw_super->block_count));
4815     return 1;
4816     }
4817    
4818     diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
4819     index 7261245c208d..ecd2cf2fc584 100644
4820     --- a/fs/f2fs/xattr.c
4821     +++ b/fs/f2fs/xattr.c
4822     @@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
4823     static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4824     unsigned int index, unsigned int len,
4825     const char *name, struct f2fs_xattr_entry **xe,
4826     - void **base_addr)
4827     + void **base_addr, int *base_size)
4828     {
4829     void *cur_addr, *txattr_addr, *last_addr = NULL;
4830     nid_t xnid = F2FS_I(inode)->i_xattr_nid;
4831     @@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4832     if (!size && !inline_size)
4833     return -ENODATA;
4834    
4835     - txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
4836     - inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
4837     + *base_size = inline_size + size + XATTR_PADDING_SIZE;
4838     + txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
4839     if (!txattr_addr)
4840     return -ENOMEM;
4841    
4842     @@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4843    
4844     *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
4845     index, len, name);
4846     - if (*xe)
4847     + if (*xe) {
4848     + *base_size = inline_size;
4849     goto check;
4850     + }
4851     }
4852    
4853     /* read from xattr node block */
4854     @@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4855     int error = 0;
4856     unsigned int size, len;
4857     void *base_addr = NULL;
4858     + int base_size;
4859    
4860     if (name == NULL)
4861     return -EINVAL;
4862     @@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4863    
4864     down_read(&F2FS_I(inode)->i_xattr_sem);
4865     error = lookup_all_xattrs(inode, ipage, index, len, name,
4866     - &entry, &base_addr);
4867     + &entry, &base_addr, &base_size);
4868     up_read(&F2FS_I(inode)->i_xattr_sem);
4869     if (error)
4870     return error;
4871     @@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4872    
4873     if (buffer) {
4874     char *pval = entry->e_name + entry->e_name_len;
4875     +
4876     + if (base_size - (pval - (char *)base_addr) < size) {
4877     + error = -ERANGE;
4878     + goto out;
4879     + }
4880     memcpy(buffer, pval, size);
4881     }
4882     error = size;
4883     diff --git a/fs/file.c b/fs/file.c
4884     index 7ffd6e9d103d..8d059d8973e9 100644
4885     --- a/fs/file.c
4886     +++ b/fs/file.c
4887     @@ -640,6 +640,35 @@ out_unlock:
4888     }
4889     EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
4890    
4891     +/*
4892     + * variant of __close_fd that gets a ref on the file for later fput
4893     + */
4894     +int __close_fd_get_file(unsigned int fd, struct file **res)
4895     +{
4896     + struct files_struct *files = current->files;
4897     + struct file *file;
4898     + struct fdtable *fdt;
4899     +
4900     + spin_lock(&files->file_lock);
4901     + fdt = files_fdtable(files);
4902     + if (fd >= fdt->max_fds)
4903     + goto out_unlock;
4904     + file = fdt->fd[fd];
4905     + if (!file)
4906     + goto out_unlock;
4907     + rcu_assign_pointer(fdt->fd[fd], NULL);
4908     + __put_unused_fd(files, fd);
4909     + spin_unlock(&files->file_lock);
4910     + get_file(file);
4911     + *res = file;
4912     + return filp_close(file, files);
4913     +
4914     +out_unlock:
4915     + spin_unlock(&files->file_lock);
4916     + *res = NULL;
4917     + return -ENOENT;
4918     +}
4919     +
4920     void do_close_on_exec(struct files_struct *files)
4921     {
4922     unsigned i;
4923     diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
4924     index 41615f38bcff..f07c55ea0c22 100644
4925     --- a/include/linux/fdtable.h
4926     +++ b/include/linux/fdtable.h
4927     @@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
4928     unsigned int fd, struct file *file);
4929     extern int __close_fd(struct files_struct *files,
4930     unsigned int fd);
4931     +extern int __close_fd_get_file(unsigned int fd, struct file **res);
4932    
4933     extern struct kmem_cache *files_cachep;
4934    
4935     diff --git a/include/linux/msi.h b/include/linux/msi.h
4936     index 0e9c50052ff3..eb213b87617c 100644
4937     --- a/include/linux/msi.h
4938     +++ b/include/linux/msi.h
4939     @@ -116,6 +116,8 @@ struct msi_desc {
4940     list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
4941     #define for_each_msi_entry(desc, dev) \
4942     list_for_each_entry((desc), dev_to_msi_list((dev)), list)
4943     +#define for_each_msi_entry_safe(desc, tmp, dev) \
4944     + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
4945    
4946     #ifdef CONFIG_PCI_MSI
4947     #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
4948     diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
4949     index 6894976b54e3..186cd8e970c7 100644
4950     --- a/include/linux/ptr_ring.h
4951     +++ b/include/linux/ptr_ring.h
4952     @@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
4953     else if (destroy)
4954     destroy(ptr);
4955    
4956     + if (producer >= size)
4957     + producer = 0;
4958     __ptr_ring_set_size(r, size);
4959     r->producer = producer;
4960     r->consumer_head = 0;
4961     diff --git a/include/media/cec.h b/include/media/cec.h
4962     index 3fe5e5d2bb7e..707411ef8ba2 100644
4963     --- a/include/media/cec.h
4964     +++ b/include/media/cec.h
4965     @@ -155,6 +155,7 @@ struct cec_adapter {
4966     unsigned int transmit_queue_sz;
4967     struct list_head wait_queue;
4968     struct cec_data *transmitting;
4969     + bool transmit_in_progress;
4970    
4971     struct task_struct *kthread_config;
4972     struct completion config_completion;
4973     diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
4974     index 5ce926701bd0..5f67efbb77e8 100644
4975     --- a/include/net/ip_tunnels.h
4976     +++ b/include/net/ip_tunnels.h
4977     @@ -307,6 +307,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
4978     int ip_tunnel_encap_setup(struct ip_tunnel *t,
4979     struct ip_tunnel_encap *ipencap);
4980    
4981     +static inline bool pskb_inet_may_pull(struct sk_buff *skb)
4982     +{
4983     + int nhlen;
4984     +
4985     + switch (skb->protocol) {
4986     +#if IS_ENABLED(CONFIG_IPV6)
4987     + case htons(ETH_P_IPV6):
4988     + nhlen = sizeof(struct ipv6hdr);
4989     + break;
4990     +#endif
4991     + case htons(ETH_P_IP):
4992     + nhlen = sizeof(struct iphdr);
4993     + break;
4994     + default:
4995     + nhlen = 0;
4996     + }
4997     +
4998     + return pskb_network_may_pull(skb, nhlen);
4999     +}
5000     +
5001     static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
5002     {
5003     const struct ip_tunnel_encap_ops *ops;
5004     diff --git a/include/net/sock.h b/include/net/sock.h
5005     index 0e3a09380655..13f11e905a00 100644
5006     --- a/include/net/sock.h
5007     +++ b/include/net/sock.h
5008     @@ -298,6 +298,7 @@ struct sock_common {
5009     * @sk_filter: socket filtering instructions
5010     * @sk_timer: sock cleanup timer
5011     * @sk_stamp: time stamp of last packet received
5012     + * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
5013     * @sk_tsflags: SO_TIMESTAMPING socket options
5014     * @sk_tskey: counter to disambiguate concurrent tstamp requests
5015     * @sk_zckey: counter to order MSG_ZEROCOPY notifications
5016     @@ -474,6 +475,9 @@ struct sock {
5017     const struct cred *sk_peer_cred;
5018     long sk_rcvtimeo;
5019     ktime_t sk_stamp;
5020     +#if BITS_PER_LONG==32
5021     + seqlock_t sk_stamp_seq;
5022     +#endif
5023     u16 sk_tsflags;
5024     u8 sk_shutdown;
5025     u32 sk_tskey;
5026     @@ -2287,6 +2291,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
5027     atomic_add(segs, &sk->sk_drops);
5028     }
5029    
5030     +static inline ktime_t sock_read_timestamp(struct sock *sk)
5031     +{
5032     +#if BITS_PER_LONG==32
5033     + unsigned int seq;
5034     + ktime_t kt;
5035     +
5036     + do {
5037     + seq = read_seqbegin(&sk->sk_stamp_seq);
5038     + kt = sk->sk_stamp;
5039     + } while (read_seqretry(&sk->sk_stamp_seq, seq));
5040     +
5041     + return kt;
5042     +#else
5043     + return sk->sk_stamp;
5044     +#endif
5045     +}
5046     +
5047     +static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
5048     +{
5049     +#if BITS_PER_LONG==32
5050     + write_seqlock(&sk->sk_stamp_seq);
5051     + sk->sk_stamp = kt;
5052     + write_sequnlock(&sk->sk_stamp_seq);
5053     +#else
5054     + sk->sk_stamp = kt;
5055     +#endif
5056     +}
5057     +
5058     void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
5059     struct sk_buff *skb);
5060     void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
5061     @@ -2311,7 +2343,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
5062     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
5063     __sock_recv_timestamp(msg, sk, skb);
5064     else
5065     - sk->sk_stamp = kt;
5066     + sock_write_timestamp(sk, kt);
5067    
5068     if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
5069     __sock_recv_wifi_status(msg, sk, skb);
5070     @@ -2332,9 +2364,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
5071     if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
5072     __sock_recv_ts_and_drops(msg, sk, skb);
5073     else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
5074     - sk->sk_stamp = skb->tstamp;
5075     + sock_write_timestamp(sk, skb->tstamp);
5076     else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
5077     - sk->sk_stamp = 0;
5078     + sock_write_timestamp(sk, 0);
5079     }
5080    
5081     void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
5082     diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
5083     index 698e0d8a5ca4..d68e9e536814 100644
5084     --- a/include/trace/events/ext4.h
5085     +++ b/include/trace/events/ext4.h
5086     @@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
5087     (unsigned long) __entry->ino, __entry->drop)
5088     );
5089    
5090     +TRACE_EVENT(ext4_nfs_commit_metadata,
5091     + TP_PROTO(struct inode *inode),
5092     +
5093     + TP_ARGS(inode),
5094     +
5095     + TP_STRUCT__entry(
5096     + __field( dev_t, dev )
5097     + __field( ino_t, ino )
5098     + ),
5099     +
5100     + TP_fast_assign(
5101     + __entry->dev = inode->i_sb->s_dev;
5102     + __entry->ino = inode->i_ino;
5103     + ),
5104     +
5105     + TP_printk("dev %d,%d ino %lu",
5106     + MAJOR(__entry->dev), MINOR(__entry->dev),
5107     + (unsigned long) __entry->ino)
5108     +);
5109     +
5110     TRACE_EVENT(ext4_mark_inode_dirty,
5111     TP_PROTO(struct inode *inode, unsigned long IP),
5112    
5113     diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
5114     index 6aaf5dd5383b..1f84977fab47 100644
5115     --- a/kernel/cgroup/cgroup.c
5116     +++ b/kernel/cgroup/cgroup.c
5117     @@ -4202,20 +4202,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
5118    
5119     lockdep_assert_held(&css_set_lock);
5120     repeat:
5121     - /*
5122     - * Advance iterator to find next entry. cset->tasks is consumed
5123     - * first and then ->mg_tasks. After ->mg_tasks, we move onto the
5124     - * next cset.
5125     - */
5126     - next = it->task_pos->next;
5127     + if (it->task_pos) {
5128     + /*
5129     + * Advance iterator to find next entry. cset->tasks is
5130     + * consumed first and then ->mg_tasks. After ->mg_tasks,
5131     + * we move onto the next cset.
5132     + */
5133     + next = it->task_pos->next;
5134    
5135     - if (next == it->tasks_head)
5136     - next = it->mg_tasks_head->next;
5137     + if (next == it->tasks_head)
5138     + next = it->mg_tasks_head->next;
5139    
5140     - if (next == it->mg_tasks_head)
5141     + if (next == it->mg_tasks_head)
5142     + css_task_iter_advance_css_set(it);
5143     + else
5144     + it->task_pos = next;
5145     + } else {
5146     + /* called from start, proceed to the first cset */
5147     css_task_iter_advance_css_set(it);
5148     - else
5149     - it->task_pos = next;
5150     + }
5151    
5152     /* if PROCS, skip over tasks which aren't group leaders */
5153     if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
5154     @@ -4255,7 +4260,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
5155    
5156     it->cset_head = it->cset_pos;
5157    
5158     - css_task_iter_advance_css_set(it);
5159     + css_task_iter_advance(it);
5160    
5161     spin_unlock_irq(&css_set_lock);
5162     }
5163     diff --git a/kernel/panic.c b/kernel/panic.c
5164     index f6d549a29a5c..d10c340c43b0 100644
5165     --- a/kernel/panic.c
5166     +++ b/kernel/panic.c
5167     @@ -14,6 +14,7 @@
5168     #include <linux/kmsg_dump.h>
5169     #include <linux/kallsyms.h>
5170     #include <linux/notifier.h>
5171     +#include <linux/vt_kern.h>
5172     #include <linux/module.h>
5173     #include <linux/random.h>
5174     #include <linux/ftrace.h>
5175     @@ -237,7 +238,10 @@ void panic(const char *fmt, ...)
5176     if (_crash_kexec_post_notifiers)
5177     __crash_kexec(NULL);
5178    
5179     - bust_spinlocks(0);
5180     +#ifdef CONFIG_VT
5181     + unblank_screen();
5182     +#endif
5183     + console_unblank();
5184    
5185     /*
5186     * We may have ended up stopping the CPU holding the lock (in
5187     diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5188     index c603d33d5410..5d01edf8d819 100644
5189     --- a/net/ax25/af_ax25.c
5190     +++ b/net/ax25/af_ax25.c
5191     @@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
5192     break;
5193     }
5194    
5195     - dev = dev_get_by_name(&init_net, devname);
5196     + rtnl_lock();
5197     + dev = __dev_get_by_name(&init_net, devname);
5198     if (!dev) {
5199     + rtnl_unlock();
5200     res = -ENODEV;
5201     break;
5202     }
5203    
5204     ax25->ax25_dev = ax25_dev_ax25dev(dev);
5205     + if (!ax25->ax25_dev) {
5206     + rtnl_unlock();
5207     + res = -ENODEV;
5208     + break;
5209     + }
5210     ax25_fillin_cb(ax25, ax25->ax25_dev);
5211     - dev_put(dev);
5212     + rtnl_unlock();
5213     break;
5214    
5215     default:
5216     diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
5217     index 9a3a301e1e2f..d92195cd7834 100644
5218     --- a/net/ax25/ax25_dev.c
5219     +++ b/net/ax25/ax25_dev.c
5220     @@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
5221     if ((s = ax25_dev_list) == ax25_dev) {
5222     ax25_dev_list = s->next;
5223     spin_unlock_bh(&ax25_dev_lock);
5224     + dev->ax25_ptr = NULL;
5225     dev_put(dev);
5226     kfree(ax25_dev);
5227     return;
5228     @@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
5229     if (s->next == ax25_dev) {
5230     s->next = ax25_dev->next;
5231     spin_unlock_bh(&ax25_dev_lock);
5232     + dev->ax25_ptr = NULL;
5233     dev_put(dev);
5234     kfree(ax25_dev);
5235     return;
5236     diff --git a/net/compat.c b/net/compat.c
5237     index 47a614b370cd..d1f3a8a0b3ef 100644
5238     --- a/net/compat.c
5239     +++ b/net/compat.c
5240     @@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5241     ctv = (struct compat_timeval __user *) userstamp;
5242     err = -ENOENT;
5243     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5244     - tv = ktime_to_timeval(sk->sk_stamp);
5245     + tv = ktime_to_timeval(sock_read_timestamp(sk));
5246     +
5247     if (tv.tv_sec == -1)
5248     return err;
5249     if (tv.tv_sec == 0) {
5250     - sk->sk_stamp = ktime_get_real();
5251     - tv = ktime_to_timeval(sk->sk_stamp);
5252     + ktime_t kt = ktime_get_real();
5253     + sock_write_timestamp(sk, kt);
5254     + tv = ktime_to_timeval(kt);
5255     }
5256     err = 0;
5257     if (put_user(tv.tv_sec, &ctv->tv_sec) ||
5258     @@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
5259     ctv = (struct compat_timespec __user *) userstamp;
5260     err = -ENOENT;
5261     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5262     - ts = ktime_to_timespec(sk->sk_stamp);
5263     + ts = ktime_to_timespec(sock_read_timestamp(sk));
5264     if (ts.tv_sec == -1)
5265     return err;
5266     if (ts.tv_sec == 0) {
5267     - sk->sk_stamp = ktime_get_real();
5268     - ts = ktime_to_timespec(sk->sk_stamp);
5269     + ktime_t kt = ktime_get_real();
5270     + sock_write_timestamp(sk, kt);
5271     + ts = ktime_to_timespec(kt);
5272     }
5273     err = 0;
5274     if (put_user(ts.tv_sec, &ctv->tv_sec) ||
5275     diff --git a/net/core/sock.c b/net/core/sock.c
5276     index 080a880a1761..98659fb6e9fb 100644
5277     --- a/net/core/sock.c
5278     +++ b/net/core/sock.c
5279     @@ -2743,6 +2743,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5280     sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
5281    
5282     sk->sk_stamp = SK_DEFAULT_STAMP;
5283     +#if BITS_PER_LONG==32
5284     + seqlock_init(&sk->sk_stamp_seq);
5285     +#endif
5286     atomic_set(&sk->sk_zckey, 0);
5287    
5288     #ifdef CONFIG_NET_RX_BUSY_POLL
5289     @@ -2842,12 +2845,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5290     struct timeval tv;
5291    
5292     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5293     - tv = ktime_to_timeval(sk->sk_stamp);
5294     + tv = ktime_to_timeval(sock_read_timestamp(sk));
5295     if (tv.tv_sec == -1)
5296     return -ENOENT;
5297     if (tv.tv_sec == 0) {
5298     - sk->sk_stamp = ktime_get_real();
5299     - tv = ktime_to_timeval(sk->sk_stamp);
5300     + ktime_t kt = ktime_get_real();
5301     + sock_write_timestamp(sk, kt);
5302     + tv = ktime_to_timeval(kt);
5303     }
5304     return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
5305     }
5306     @@ -2858,11 +2862,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
5307     struct timespec ts;
5308    
5309     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5310     - ts = ktime_to_timespec(sk->sk_stamp);
5311     + ts = ktime_to_timespec(sock_read_timestamp(sk));
5312     if (ts.tv_sec == -1)
5313     return -ENOENT;
5314     if (ts.tv_sec == 0) {
5315     - sk->sk_stamp = ktime_get_real();
5316     + ktime_t kt = ktime_get_real();
5317     + sock_write_timestamp(sk, kt);
5318     ts = ktime_to_timespec(sk->sk_stamp);
5319     }
5320     return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
5321     diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
5322     index ca53efa17be1..8bec827081cd 100644
5323     --- a/net/ieee802154/6lowpan/tx.c
5324     +++ b/net/ieee802154/6lowpan/tx.c
5325     @@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
5326     const struct ipv6hdr *hdr = ipv6_hdr(skb);
5327     struct neighbour *n;
5328    
5329     + if (!daddr)
5330     + return -EINVAL;
5331     +
5332     /* TODO:
5333     * if this package isn't ipv6 one, where should it be routed?
5334     */
5335     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5336     index 38befe829caf..0fe9419bd12b 100644
5337     --- a/net/ipv4/ip_gre.c
5338     +++ b/net/ipv4/ip_gre.c
5339     @@ -674,6 +674,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
5340     struct ip_tunnel *tunnel = netdev_priv(dev);
5341     const struct iphdr *tnl_params;
5342    
5343     + if (!pskb_inet_may_pull(skb))
5344     + goto free_skb;
5345     +
5346     if (tunnel->collect_md) {
5347     gre_fb_xmit(skb, dev, skb->protocol);
5348     return NETDEV_TX_OK;
5349     @@ -717,6 +720,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
5350     struct ip_tunnel *tunnel = netdev_priv(dev);
5351     bool truncate = false;
5352    
5353     + if (!pskb_inet_may_pull(skb))
5354     + goto free_skb;
5355     +
5356     if (tunnel->collect_md) {
5357     erspan_fb_xmit(skb, dev, skb->protocol);
5358     return NETDEV_TX_OK;
5359     @@ -760,6 +766,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
5360     {
5361     struct ip_tunnel *tunnel = netdev_priv(dev);
5362    
5363     + if (!pskb_inet_may_pull(skb))
5364     + goto free_skb;
5365     +
5366     if (tunnel->collect_md) {
5367     gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
5368     return NETDEV_TX_OK;
5369     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5370     index 284a22154b4e..c4f5602308ed 100644
5371     --- a/net/ipv4/ip_tunnel.c
5372     +++ b/net/ipv4/ip_tunnel.c
5373     @@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5374     const struct iphdr *tnl_params, u8 protocol)
5375     {
5376     struct ip_tunnel *tunnel = netdev_priv(dev);
5377     - unsigned int inner_nhdr_len = 0;
5378     const struct iphdr *inner_iph;
5379     struct flowi4 fl4;
5380     u8 tos, ttl;
5381     @@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5382     __be32 dst;
5383     bool connected;
5384    
5385     - /* ensure we can access the inner net header, for several users below */
5386     - if (skb->protocol == htons(ETH_P_IP))
5387     - inner_nhdr_len = sizeof(struct iphdr);
5388     - else if (skb->protocol == htons(ETH_P_IPV6))
5389     - inner_nhdr_len = sizeof(struct ipv6hdr);
5390     - if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
5391     - goto tx_error;
5392     -
5393     inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
5394     connected = (tunnel->parms.iph.daddr != 0);
5395    
5396     diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5397     index de31b302d69c..d7b43e700023 100644
5398     --- a/net/ipv4/ip_vti.c
5399     +++ b/net/ipv4/ip_vti.c
5400     @@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5401     struct ip_tunnel *tunnel = netdev_priv(dev);
5402     struct flowi fl;
5403    
5404     + if (!pskb_inet_may_pull(skb))
5405     + goto tx_err;
5406     +
5407     memset(&fl, 0, sizeof(fl));
5408    
5409     switch (skb->protocol) {
5410     @@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5411     memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
5412     break;
5413     default:
5414     - dev->stats.tx_errors++;
5415     - dev_kfree_skb(skb);
5416     - return NETDEV_TX_OK;
5417     + goto tx_err;
5418     }
5419    
5420     /* override mark with tunnel output key */
5421     fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
5422    
5423     return vti_xmit(skb, dev, &fl);
5424     +
5425     +tx_err:
5426     + dev->stats.tx_errors++;
5427     + kfree_skb(skb);
5428     + return NETDEV_TX_OK;
5429     }
5430    
5431     static int vti4_err(struct sk_buff *skb, u32 info)
5432     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5433     index 515adbdba1d2..0f7d434c1eed 100644
5434     --- a/net/ipv6/ip6_gre.c
5435     +++ b/net/ipv6/ip6_gre.c
5436     @@ -879,6 +879,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
5437     struct net_device_stats *stats = &t->dev->stats;
5438     int ret;
5439    
5440     + if (!pskb_inet_may_pull(skb))
5441     + goto tx_err;
5442     +
5443     if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5444     goto tx_err;
5445    
5446     @@ -921,6 +924,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5447     int nhoff;
5448     int thoff;
5449    
5450     + if (!pskb_inet_may_pull(skb))
5451     + goto tx_err;
5452     +
5453     if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5454     goto tx_err;
5455    
5456     @@ -993,8 +999,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5457     goto tx_err;
5458     }
5459     } else {
5460     - struct ipv6hdr *ipv6h = ipv6_hdr(skb);
5461     -
5462     switch (skb->protocol) {
5463     case htons(ETH_P_IP):
5464     memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5465     @@ -1002,7 +1006,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5466     &dsfield, &encap_limit);
5467     break;
5468     case htons(ETH_P_IPV6):
5469     - if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
5470     + if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
5471     goto tx_err;
5472     if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
5473     &dsfield, &encap_limit))
5474     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
5475     index 99179b9c8384..0c6403cf8b52 100644
5476     --- a/net/ipv6/ip6_tunnel.c
5477     +++ b/net/ipv6/ip6_tunnel.c
5478     @@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5479     u8 tproto;
5480     int err;
5481    
5482     - /* ensure we can access the full inner ip header */
5483     - if (!pskb_may_pull(skb, sizeof(struct iphdr)))
5484     - return -1;
5485     -
5486     iph = ip_hdr(skb);
5487     memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5488    
5489     @@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5490     u8 tproto;
5491     int err;
5492    
5493     - if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
5494     - return -1;
5495     -
5496     ipv6h = ipv6_hdr(skb);
5497     tproto = READ_ONCE(t->parms.proto);
5498     if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
5499     @@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
5500     struct net_device_stats *stats = &t->dev->stats;
5501     int ret;
5502    
5503     + if (!pskb_inet_may_pull(skb))
5504     + goto tx_err;
5505     +
5506     switch (skb->protocol) {
5507     case htons(ETH_P_IP):
5508     ret = ip4ip6_tnl_xmit(skb, dev);
5509     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5510     index 706fe42e4928..8b6eefff2f7e 100644
5511     --- a/net/ipv6/ip6_vti.c
5512     +++ b/net/ipv6/ip6_vti.c
5513     @@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5514     {
5515     struct ip6_tnl *t = netdev_priv(dev);
5516     struct net_device_stats *stats = &t->dev->stats;
5517     - struct ipv6hdr *ipv6h;
5518     struct flowi fl;
5519     int ret;
5520    
5521     + if (!pskb_inet_may_pull(skb))
5522     + goto tx_err;
5523     +
5524     memset(&fl, 0, sizeof(fl));
5525    
5526     switch (skb->protocol) {
5527     case htons(ETH_P_IPV6):
5528     - ipv6h = ipv6_hdr(skb);
5529     -
5530     if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
5531     - vti6_addr_conflict(t, ipv6h))
5532     + vti6_addr_conflict(t, ipv6_hdr(skb)))
5533     goto tx_err;
5534    
5535     xfrm_decode_session(skb, &fl, AF_INET6);
5536     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
5537     index 377a2ee5d9ad..eb3220812b56 100644
5538     --- a/net/ipv6/ip6mr.c
5539     +++ b/net/ipv6/ip6mr.c
5540     @@ -51,6 +51,7 @@
5541     #include <linux/export.h>
5542     #include <net/ip6_checksum.h>
5543     #include <linux/netconf.h>
5544     +#include <net/ip_tunnels.h>
5545    
5546     #include <linux/nospec.h>
5547    
5548     @@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5549     .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
5550     .flowi6_mark = skb->mark,
5551     };
5552     - int err;
5553    
5554     - err = ip6mr_fib_lookup(net, &fl6, &mrt);
5555     - if (err < 0) {
5556     - kfree_skb(skb);
5557     - return err;
5558     - }
5559     + if (!pskb_inet_may_pull(skb))
5560     + goto tx_err;
5561     +
5562     + if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
5563     + goto tx_err;
5564    
5565     read_lock(&mrt_lock);
5566     dev->stats.tx_bytes += skb->len;
5567     @@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5568     read_unlock(&mrt_lock);
5569     kfree_skb(skb);
5570     return NETDEV_TX_OK;
5571     +
5572     +tx_err:
5573     + dev->stats.tx_errors++;
5574     + kfree_skb(skb);
5575     + return NETDEV_TX_OK;
5576     }
5577    
5578     static int reg_vif_get_iflink(const struct net_device *dev)
5579     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
5580     index 51c9f75f34b9..1e03305c0549 100644
5581     --- a/net/ipv6/sit.c
5582     +++ b/net/ipv6/sit.c
5583     @@ -1021,6 +1021,9 @@ tx_error:
5584     static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
5585     struct net_device *dev)
5586     {
5587     + if (!pskb_inet_may_pull(skb))
5588     + goto tx_err;
5589     +
5590     switch (skb->protocol) {
5591     case htons(ETH_P_IP):
5592     sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
5593     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
5594     index 03f37c4e64fe..1d3144d19903 100644
5595     --- a/net/netrom/af_netrom.c
5596     +++ b/net/netrom/af_netrom.c
5597     @@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
5598     sk_for_each(s, &nr_list)
5599     if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
5600     s->sk_state == TCP_LISTEN) {
5601     - bh_lock_sock(s);
5602     + sock_hold(s);
5603     goto found;
5604     }
5605     s = NULL;
5606     @@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
5607     struct nr_sock *nr = nr_sk(s);
5608    
5609     if (nr->my_index == index && nr->my_id == id) {
5610     - bh_lock_sock(s);
5611     + sock_hold(s);
5612     goto found;
5613     }
5614     }
5615     @@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
5616    
5617     if (nr->your_index == index && nr->your_id == id &&
5618     !ax25cmp(&nr->dest_addr, dest)) {
5619     - bh_lock_sock(s);
5620     + sock_hold(s);
5621     goto found;
5622     }
5623     }
5624     @@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
5625     if (i != 0 && j != 0) {
5626     if ((sk=nr_find_socket(i, j)) == NULL)
5627     break;
5628     - bh_unlock_sock(sk);
5629     + sock_put(sk);
5630     }
5631    
5632     id++;
5633     @@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5634     }
5635    
5636     if (sk != NULL) {
5637     + bh_lock_sock(sk);
5638     skb_reset_transport_header(skb);
5639    
5640     if (frametype == NR_CONNACK && skb->len == 22)
5641     @@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5642    
5643     ret = nr_process_rx_frame(sk, skb);
5644     bh_unlock_sock(sk);
5645     + sock_put(sk);
5646     return ret;
5647     }
5648    
5649     @@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5650     (make = nr_make_new(sk)) == NULL) {
5651     nr_transmit_refusal(skb, 0);
5652     if (sk)
5653     - bh_unlock_sock(sk);
5654     + sock_put(sk);
5655     return 0;
5656     }
5657    
5658     + bh_lock_sock(sk);
5659     +
5660     window = skb->data[20];
5661    
5662     skb->sk = make;
5663     @@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5664     sk->sk_data_ready(sk);
5665    
5666     bh_unlock_sock(sk);
5667     + sock_put(sk);
5668    
5669     nr_insert_socket(make);
5670    
5671     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5672     index 5dda263b4a0a..eedacdebcd4c 100644
5673     --- a/net/packet/af_packet.c
5674     +++ b/net/packet/af_packet.c
5675     @@ -2625,7 +2625,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
5676     sll_addr)))
5677     goto out;
5678     proto = saddr->sll_protocol;
5679     - addr = saddr->sll_addr;
5680     + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5681     dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
5682     if (addr && dev && saddr->sll_halen < dev->addr_len)
5683     goto out;
5684     @@ -2825,7 +2825,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5685     if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
5686     goto out;
5687     proto = saddr->sll_protocol;
5688     - addr = saddr->sll_addr;
5689     + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5690     dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
5691     if (addr && dev && saddr->sll_halen < dev->addr_len)
5692     goto out;
5693     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5694     index 986f3ed7d1a2..b7e67310ec37 100644
5695     --- a/net/sunrpc/svcsock.c
5696     +++ b/net/sunrpc/svcsock.c
5697     @@ -549,7 +549,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
5698     /* Don't enable netstamp, sunrpc doesn't
5699     need that much accuracy */
5700     }
5701     - svsk->sk_sk->sk_stamp = skb->tstamp;
5702     + sock_write_timestamp(svsk->sk_sk, skb->tstamp);
5703     set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
5704    
5705     len = skb->len;
5706     diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
5707     index e65c3a8551e4..040153ffc357 100644
5708     --- a/net/tipc/bearer.c
5709     +++ b/net/tipc/bearer.c
5710     @@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5711     res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
5712     if (res) {
5713     bearer_disable(net, b);
5714     - kfree(b);
5715     errstr = "failed to create discoverer";
5716     goto rejected;
5717     }
5718     diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
5719     index 70e65a2ff207..8bdea5abad11 100644
5720     --- a/security/keys/keyctl_pkey.c
5721     +++ b/security/keys/keyctl_pkey.c
5722     @@ -50,6 +50,8 @@ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
5723     if (*p == '\0' || *p == ' ' || *p == '\t')
5724     continue;
5725     token = match_token(p, param_keys, args);
5726     + if (token == Opt_err)
5727     + return -EINVAL;
5728     if (__test_and_set_bit(token, &token_mask))
5729     return -EINVAL;
5730     q = args[0].from;
5731     diff --git a/sound/core/pcm.c b/sound/core/pcm.c
5732     index fdb9b92fc8d6..01b9d62eef14 100644
5733     --- a/sound/core/pcm.c
5734     +++ b/sound/core/pcm.c
5735     @@ -25,6 +25,7 @@
5736     #include <linux/time.h>
5737     #include <linux/mutex.h>
5738     #include <linux/device.h>
5739     +#include <linux/nospec.h>
5740     #include <sound/core.h>
5741     #include <sound/minors.h>
5742     #include <sound/pcm.h>
5743     @@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
5744     return -EFAULT;
5745     if (stream < 0 || stream > 1)
5746     return -EINVAL;
5747     + stream = array_index_nospec(stream, 2);
5748     if (get_user(subdevice, &info->subdevice))
5749     return -EFAULT;
5750     mutex_lock(&register_mutex);
5751     diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
5752     index 54cdd4ffa9ce..ac20acf48fc6 100644
5753     --- a/sound/firewire/amdtp-stream-trace.h
5754     +++ b/sound/firewire/amdtp-stream-trace.h
5755     @@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
5756     __entry->index = index;
5757     ),
5758     TP_printk(
5759     - "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
5760     + "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
5761     __entry->second,
5762     __entry->cycle,
5763     __entry->src,
5764     @@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
5765     __entry->dest = fw_parent_device(s->unit)->node_id;
5766     __entry->payload_quadlets = payload_length / 4;
5767     __entry->data_blocks = data_blocks,
5768     - __entry->data_blocks = s->data_block_counter,
5769     + __entry->data_block_counter = s->data_block_counter,
5770     __entry->packet_index = s->packet_index;
5771     __entry->irq = !!in_interrupt();
5772     __entry->index = index;
5773     diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
5774     index 9be76c808fcc..3ada55ed5381 100644
5775     --- a/sound/firewire/amdtp-stream.c
5776     +++ b/sound/firewire/amdtp-stream.c
5777     @@ -654,15 +654,17 @@ end:
5778     }
5779    
5780     static int handle_in_packet_without_header(struct amdtp_stream *s,
5781     - unsigned int payload_quadlets, unsigned int cycle,
5782     + unsigned int payload_length, unsigned int cycle,
5783     unsigned int index)
5784     {
5785     __be32 *buffer;
5786     + unsigned int payload_quadlets;
5787     unsigned int data_blocks;
5788     struct snd_pcm_substream *pcm;
5789     unsigned int pcm_frames;
5790    
5791     buffer = s->buffer.packets[s->packet_index].buffer;
5792     + payload_quadlets = payload_length / 4;
5793     data_blocks = payload_quadlets / s->data_block_quadlets;
5794    
5795     trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
5796     diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
5797     index 654a50319198..4d191172fe3f 100644
5798     --- a/sound/firewire/fireface/ff-protocol-ff400.c
5799     +++ b/sound/firewire/fireface/ff-protocol-ff400.c
5800     @@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
5801     if (reg == NULL)
5802     return -ENOMEM;
5803    
5804     - if (enable) {
5805     + if (!enable) {
5806     /*
5807     * Each quadlet is corresponding to data channels in a data
5808     * blocks in reverse order. Precisely, quadlets for available
5809     diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
5810     index 6ebe817801ea..1f25e6d029d8 100644
5811     --- a/sound/pci/emu10k1/emufx.c
5812     +++ b/sound/pci/emu10k1/emufx.c
5813     @@ -36,6 +36,7 @@
5814     #include <linux/init.h>
5815     #include <linux/mutex.h>
5816     #include <linux/moduleparam.h>
5817     +#include <linux/nospec.h>
5818    
5819     #include <sound/core.h>
5820     #include <sound/tlv.h>
5821     @@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
5822    
5823     if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
5824     return -EINVAL;
5825     + ipcm->substream = array_index_nospec(ipcm->substream,
5826     + EMU10K1_FX8010_PCM_COUNT);
5827     if (ipcm->channels > 32)
5828     return -EINVAL;
5829     pcm = &emu->fx8010.pcm[ipcm->substream];
5830     @@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
5831    
5832     if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
5833     return -EINVAL;
5834     + ipcm->substream = array_index_nospec(ipcm->substream,
5835     + EMU10K1_FX8010_PCM_COUNT);
5836     pcm = &emu->fx8010.pcm[ipcm->substream];
5837     mutex_lock(&emu->fx8010.lock);
5838     spin_lock_irq(&emu->reg_lock);
5839     diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
5840     index dd7d4242d6d2..86841d46a8fc 100644
5841     --- a/sound/pci/hda/hda_tegra.c
5842     +++ b/sound/pci/hda/hda_tegra.c
5843     @@ -233,10 +233,12 @@ static int hda_tegra_suspend(struct device *dev)
5844     struct snd_card *card = dev_get_drvdata(dev);
5845     struct azx *chip = card->private_data;
5846     struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
5847     + struct hdac_bus *bus = azx_bus(chip);
5848    
5849     snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
5850    
5851     azx_stop_chip(chip);
5852     + synchronize_irq(bus->irq);
5853     azx_enter_link_reset(chip);
5854     hda_tegra_disable_clocks(hda);
5855    
5856     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
5857     index 950e02e71766..51cc6589443f 100644
5858     --- a/sound/pci/hda/patch_conexant.c
5859     +++ b/sound/pci/hda/patch_conexant.c
5860     @@ -923,6 +923,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
5861     SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
5862     SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
5863     SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
5864     + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
5865     SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
5866     SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
5867     SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
5868     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5869     index 15021c839372..54fc9c0f07de 100644
5870     --- a/sound/pci/hda/patch_realtek.c
5871     +++ b/sound/pci/hda/patch_realtek.c
5872     @@ -6424,7 +6424,7 @@ static const struct hda_fixup alc269_fixups[] = {
5873     [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
5874     .type = HDA_FIXUP_PINS,
5875     .v.pins = (const struct hda_pintbl[]) {
5876     - { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
5877     + { 0x19, 0x01a1103c }, /* use as headset mic */
5878     { }
5879     },
5880     .chained = true,
5881     @@ -6573,6 +6573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5882     SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5883     SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5884     SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5885     + SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
5886     SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5887     SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5888     SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5889     diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
5890     index 1bff4b1b39cd..ba99ff0e93e0 100644
5891     --- a/sound/pci/rme9652/hdsp.c
5892     +++ b/sound/pci/rme9652/hdsp.c
5893     @@ -30,6 +30,7 @@
5894     #include <linux/math64.h>
5895     #include <linux/vmalloc.h>
5896     #include <linux/io.h>
5897     +#include <linux/nospec.h>
5898    
5899     #include <sound/core.h>
5900     #include <sound/control.h>
5901     @@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
5902     struct snd_pcm_channel_info *info)
5903     {
5904     struct hdsp *hdsp = snd_pcm_substream_chip(substream);
5905     - int mapped_channel;
5906     + unsigned int channel = info->channel;
5907    
5908     - if (snd_BUG_ON(info->channel >= hdsp->max_channels))
5909     + if (snd_BUG_ON(channel >= hdsp->max_channels))
5910     return -EINVAL;
5911     + channel = array_index_nospec(channel, hdsp->max_channels);
5912    
5913     - if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
5914     + if (hdsp->channel_map[channel] < 0)
5915     return -EINVAL;
5916    
5917     - info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
5918     + info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
5919     info->first = 0;
5920     info->step = 32;
5921     return 0;
5922     diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5923     index 9d9f6e41d81c..08a5152e635a 100644
5924     --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5925     +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
5926     @@ -389,6 +389,20 @@ static struct snd_soc_card snd_soc_card_cht = {
5927     };
5928    
5929     static const struct dmi_system_id cht_max98090_quirk_table[] = {
5930     + {
5931     + /* Clapper model Chromebook */
5932     + .matches = {
5933     + DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
5934     + },
5935     + .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
5936     + },
5937     + {
5938     + /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
5939     + .matches = {
5940     + DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
5941     + },
5942     + .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
5943     + },
5944     {
5945     /* Swanky model Chromebook (Toshiba Chromebook 2) */
5946     .matches = {
5947     diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
5948     index e557946718a9..d9fcae071b47 100644
5949     --- a/sound/synth/emux/emux_hwdep.c
5950     +++ b/sound/synth/emux/emux_hwdep.c
5951     @@ -22,9 +22,9 @@
5952     #include <sound/core.h>
5953     #include <sound/hwdep.h>
5954     #include <linux/uaccess.h>
5955     +#include <linux/nospec.h>
5956     #include "emux_voice.h"
5957    
5958     -
5959     #define TMP_CLIENT_ID 0x1001
5960    
5961     /*
5962     @@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
5963     return -EFAULT;
5964     if (info.mode < 0 || info.mode >= EMUX_MD_END)
5965     return -EINVAL;
5966     + info.mode = array_index_nospec(info.mode, EMUX_MD_END);
5967    
5968     if (info.port < 0) {
5969     for (i = 0; i < emu->num_ports; i++)
5970     emu->portptrs[i]->ctrls[info.mode] = info.value;
5971     } else {
5972     - if (info.port < emu->num_ports)
5973     + if (info.port < emu->num_ports) {
5974     + info.port = array_index_nospec(info.port, emu->num_ports);
5975     emu->portptrs[info.port]->ctrls[info.mode] = info.value;
5976     + }
5977     }
5978     return 0;
5979     }
5980     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
5981     index 3692f29fee46..70144b98141c 100644
5982     --- a/tools/lib/traceevent/event-parse.c
5983     +++ b/tools/lib/traceevent/event-parse.c
5984     @@ -4970,6 +4970,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
5985    
5986     if (arg->type == TEP_PRINT_BSTRING) {
5987     trace_seq_puts(s, arg->string.string);
5988     + arg = arg->next;
5989     break;
5990     }
5991    
5992     diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
5993     index 82657c01a3b8..5f69fd0b745a 100644
5994     --- a/tools/perf/arch/common.c
5995     +++ b/tools/perf/arch/common.c
5996     @@ -200,3 +200,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
5997    
5998     return perf_env__lookup_binutils_path(env, "objdump", path);
5999     }
6000     +
6001     +/*
6002     + * Some architectures have a single address space for kernel and user addresses,
6003     + * which makes it possible to determine if an address is in kernel space or user
6004     + * space.
6005     + */
6006     +bool perf_env__single_address_space(struct perf_env *env)
6007     +{
6008     + return strcmp(perf_env__arch(env), "sparc");
6009     +}
6010     diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
6011     index 2167001b18c5..c298a446d1f6 100644
6012     --- a/tools/perf/arch/common.h
6013     +++ b/tools/perf/arch/common.h
6014     @@ -5,5 +5,6 @@
6015     #include "../util/env.h"
6016    
6017     int perf_env__lookup_objdump(struct perf_env *env, const char **path);
6018     +bool perf_env__single_address_space(struct perf_env *env);
6019    
6020     #endif /* ARCH_PERF_COMMON_H */
6021     diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
6022     index b5bc85bd0bbe..a7b4d3f611c5 100644
6023     --- a/tools/perf/builtin-script.c
6024     +++ b/tools/perf/builtin-script.c
6025     @@ -728,8 +728,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
6026     if (PRINT_FIELD(DSO)) {
6027     memset(&alf, 0, sizeof(alf));
6028     memset(&alt, 0, sizeof(alt));
6029     - thread__find_map(thread, sample->cpumode, from, &alf);
6030     - thread__find_map(thread, sample->cpumode, to, &alt);
6031     + thread__find_map_fb(thread, sample->cpumode, from, &alf);
6032     + thread__find_map_fb(thread, sample->cpumode, to, &alt);
6033     }
6034    
6035     printed += fprintf(fp, " 0x%"PRIx64, from);
6036     @@ -775,8 +775,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
6037     from = br->entries[i].from;
6038     to = br->entries[i].to;
6039    
6040     - thread__find_symbol(thread, sample->cpumode, from, &alf);
6041     - thread__find_symbol(thread, sample->cpumode, to, &alt);
6042     + thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
6043     + thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
6044    
6045     printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
6046     if (PRINT_FIELD(DSO)) {
6047     @@ -820,11 +820,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
6048     from = br->entries[i].from;
6049     to = br->entries[i].to;
6050    
6051     - if (thread__find_map(thread, sample->cpumode, from, &alf) &&
6052     + if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
6053     !alf.map->dso->adjust_symbols)
6054     from = map__map_ip(alf.map, from);
6055    
6056     - if (thread__find_map(thread, sample->cpumode, to, &alt) &&
6057     + if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
6058     !alt.map->dso->adjust_symbols)
6059     to = map__map_ip(alt.map, to);
6060    
6061     diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
6062     index 59f38c7693f8..4c23779e271a 100644
6063     --- a/tools/perf/util/env.c
6064     +++ b/tools/perf/util/env.c
6065     @@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
6066     struct utsname uts;
6067     char *arch_name;
6068    
6069     - if (!env) { /* Assume local operation */
6070     + if (!env || !env->arch) { /* Assume local operation */
6071     if (uname(&uts) < 0)
6072     return NULL;
6073     arch_name = uts.machine;
6074     diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
6075     index e9c108a6b1c3..24493200cf80 100644
6076     --- a/tools/perf/util/event.c
6077     +++ b/tools/perf/util/event.c
6078     @@ -1577,6 +1577,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6079     return al->map;
6080     }
6081    
6082     +/*
6083     + * For branch stacks or branch samples, the sample cpumode might not be correct
6084     + * because it applies only to the sample 'ip' and not necessary to 'addr' or
6085     + * branch stack addresses. If possible, use a fallback to deal with those cases.
6086     + */
6087     +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6088     + struct addr_location *al)
6089     +{
6090     + struct map *map = thread__find_map(thread, cpumode, addr, al);
6091     + struct machine *machine = thread->mg->machine;
6092     + u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
6093     +
6094     + if (map || addr_cpumode == cpumode)
6095     + return map;
6096     +
6097     + return thread__find_map(thread, addr_cpumode, addr, al);
6098     +}
6099     +
6100     struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6101     u64 addr, struct addr_location *al)
6102     {
6103     @@ -1586,6 +1604,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6104     return al->sym;
6105     }
6106    
6107     +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6108     + u64 addr, struct addr_location *al)
6109     +{
6110     + al->sym = NULL;
6111     + if (thread__find_map_fb(thread, cpumode, addr, al))
6112     + al->sym = map__find_symbol(al->map, al->addr);
6113     + return al->sym;
6114     +}
6115     +
6116     /*
6117     * Callers need to drop the reference to al->thread, obtained in
6118     * machine__findnew_thread()
6119     @@ -1679,7 +1706,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
6120     void thread__resolve(struct thread *thread, struct addr_location *al,
6121     struct perf_sample *sample)
6122     {
6123     - thread__find_map(thread, sample->cpumode, sample->addr, al);
6124     + thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
6125    
6126     al->cpu = sample->cpu;
6127     al->sym = NULL;
6128     diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
6129     index 8f36ce813bc5..9397e3f2444d 100644
6130     --- a/tools/perf/util/machine.c
6131     +++ b/tools/perf/util/machine.c
6132     @@ -2592,6 +2592,33 @@ int machine__get_kernel_start(struct machine *machine)
6133     return err;
6134     }
6135    
6136     +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
6137     +{
6138     + u8 addr_cpumode = cpumode;
6139     + bool kernel_ip;
6140     +
6141     + if (!machine->single_address_space)
6142     + goto out;
6143     +
6144     + kernel_ip = machine__kernel_ip(machine, addr);
6145     + switch (cpumode) {
6146     + case PERF_RECORD_MISC_KERNEL:
6147     + case PERF_RECORD_MISC_USER:
6148     + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
6149     + PERF_RECORD_MISC_USER;
6150     + break;
6151     + case PERF_RECORD_MISC_GUEST_KERNEL:
6152     + case PERF_RECORD_MISC_GUEST_USER:
6153     + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
6154     + PERF_RECORD_MISC_GUEST_USER;
6155     + break;
6156     + default:
6157     + break;
6158     + }
6159     +out:
6160     + return addr_cpumode;
6161     +}
6162     +
6163     struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
6164     {
6165     return dsos__findnew(&machine->dsos, filename);
6166     diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
6167     index d856b85862e2..ebde3ea70225 100644
6168     --- a/tools/perf/util/machine.h
6169     +++ b/tools/perf/util/machine.h
6170     @@ -42,6 +42,7 @@ struct machine {
6171     u16 id_hdr_size;
6172     bool comm_exec;
6173     bool kptr_restrict_warned;
6174     + bool single_address_space;
6175     char *root_dir;
6176     char *mmap_name;
6177     struct threads threads[THREADS__TABLE_SIZE];
6178     @@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
6179     return ip >= kernel_start;
6180     }
6181    
6182     +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
6183     +
6184     struct thread *machine__find_thread(struct machine *machine, pid_t pid,
6185     pid_t tid);
6186     struct comm *machine__thread_exec_comm(struct machine *machine,
6187     diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
6188     index 7e49baad304d..7348eea0248f 100644
6189     --- a/tools/perf/util/pmu.c
6190     +++ b/tools/perf/util/pmu.c
6191     @@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
6192     int fd, ret = -1;
6193     char path[PATH_MAX];
6194    
6195     - snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6196     + scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6197    
6198     fd = open(path, O_RDONLY);
6199     if (fd == -1)
6200     @@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
6201     ssize_t sret;
6202     int fd;
6203    
6204     - snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6205     + scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6206    
6207     fd = open(path, O_RDONLY);
6208     if (fd == -1)
6209     @@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
6210     char path[PATH_MAX];
6211     int fd;
6212    
6213     - snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6214     + scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6215    
6216     fd = open(path, O_RDONLY);
6217     if (fd == -1)
6218     @@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
6219     char path[PATH_MAX];
6220     int fd;
6221    
6222     - snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6223     + scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6224    
6225     fd = open(path, O_RDONLY);
6226     if (fd == -1)
6227     diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
6228     index 69aa93d4ee99..0c4b050f6fc2 100644
6229     --- a/tools/perf/util/scripting-engines/trace-event-python.c
6230     +++ b/tools/perf/util/scripting-engines/trace-event-python.c
6231     @@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
6232     pydict_set_item_string_decref(pyelem, "cycles",
6233     PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
6234    
6235     - thread__find_map(thread, sample->cpumode,
6236     - br->entries[i].from, &al);
6237     + thread__find_map_fb(thread, sample->cpumode,
6238     + br->entries[i].from, &al);
6239     dsoname = get_dsoname(al.map);
6240     pydict_set_item_string_decref(pyelem, "from_dsoname",
6241     _PyUnicode_FromString(dsoname));
6242    
6243     - thread__find_map(thread, sample->cpumode,
6244     - br->entries[i].to, &al);
6245     + thread__find_map_fb(thread, sample->cpumode,
6246     + br->entries[i].to, &al);
6247     dsoname = get_dsoname(al.map);
6248     pydict_set_item_string_decref(pyelem, "to_dsoname",
6249     _PyUnicode_FromString(dsoname));
6250     @@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
6251     if (!pyelem)
6252     Py_FatalError("couldn't create Python dictionary");
6253    
6254     - thread__find_symbol(thread, sample->cpumode,
6255     - br->entries[i].from, &al);
6256     + thread__find_symbol_fb(thread, sample->cpumode,
6257     + br->entries[i].from, &al);
6258     get_symoff(al.sym, &al, true, bf, sizeof(bf));
6259     pydict_set_item_string_decref(pyelem, "from",
6260     _PyUnicode_FromString(bf));
6261    
6262     - thread__find_symbol(thread, sample->cpumode,
6263     - br->entries[i].to, &al);
6264     + thread__find_symbol_fb(thread, sample->cpumode,
6265     + br->entries[i].to, &al);
6266     get_symoff(al.sym, &al, true, bf, sizeof(bf));
6267     pydict_set_item_string_decref(pyelem, "to",
6268     _PyUnicode_FromString(bf));
6269     diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
6270     index 7d2c8ce6cfad..f8eab197f35c 100644
6271     --- a/tools/perf/util/session.c
6272     +++ b/tools/perf/util/session.c
6273     @@ -24,6 +24,7 @@
6274     #include "thread.h"
6275     #include "thread-stack.h"
6276     #include "stat.h"
6277     +#include "arch/common.h"
6278    
6279     static int perf_session__deliver_event(struct perf_session *session,
6280     union perf_event *event,
6281     @@ -150,6 +151,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
6282     session->machines.host.env = &perf_env;
6283     }
6284    
6285     + session->machines.host.single_address_space =
6286     + perf_env__single_address_space(session->machines.host.env);
6287     +
6288     if (!data || perf_data__is_write(data)) {
6289     /*
6290     * In O_RDONLY mode this will be performed when reading the
6291     diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
6292     index 30e2b4c165fe..5920c3bb8ffe 100644
6293     --- a/tools/perf/util/thread.h
6294     +++ b/tools/perf/util/thread.h
6295     @@ -96,9 +96,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
6296    
6297     struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6298     struct addr_location *al);
6299     +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6300     + struct addr_location *al);
6301    
6302     struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6303     u64 addr, struct addr_location *al);
6304     +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6305     + u64 addr, struct addr_location *al);
6306    
6307     void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
6308     struct addr_location *al);
6309     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
6310     index 23774970c9df..abcd29db2d7a 100644
6311     --- a/virt/kvm/arm/arm.c
6312     +++ b/virt/kvm/arm/arm.c
6313     @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
6314     static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
6315     static u32 kvm_next_vmid;
6316     static unsigned int kvm_vmid_bits __read_mostly;
6317     -static DEFINE_RWLOCK(kvm_vmid_lock);
6318     +static DEFINE_SPINLOCK(kvm_vmid_lock);
6319    
6320     static bool vgic_present;
6321    
6322     @@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask)
6323     */
6324     static bool need_new_vmid_gen(struct kvm *kvm)
6325     {
6326     - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
6327     + u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
6328     + smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
6329     + return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
6330     }
6331    
6332     /**
6333     @@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm)
6334     {
6335     phys_addr_t pgd_phys;
6336     u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
6337     - bool new_gen;
6338    
6339     - read_lock(&kvm_vmid_lock);
6340     - new_gen = need_new_vmid_gen(kvm);
6341     - read_unlock(&kvm_vmid_lock);
6342     -
6343     - if (!new_gen)
6344     + if (!need_new_vmid_gen(kvm))
6345     return;
6346    
6347     - write_lock(&kvm_vmid_lock);
6348     + spin_lock(&kvm_vmid_lock);
6349    
6350     /*
6351     * We need to re-check the vmid_gen here to ensure that if another vcpu
6352     @@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm)
6353     * use the same vmid.
6354     */
6355     if (!need_new_vmid_gen(kvm)) {
6356     - write_unlock(&kvm_vmid_lock);
6357     + spin_unlock(&kvm_vmid_lock);
6358     return;
6359     }
6360    
6361     @@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm)
6362     kvm_call_hyp(__kvm_flush_vm_context);
6363     }
6364    
6365     - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
6366     kvm->arch.vmid = kvm_next_vmid;
6367     kvm_next_vmid++;
6368     kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
6369     @@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm)
6370     vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
6371     kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
6372    
6373     - write_unlock(&kvm_vmid_lock);
6374     + smp_wmb();
6375     + WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
6376     +
6377     + spin_unlock(&kvm_vmid_lock);
6378     }
6379    
6380     static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
6381     diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
6382     index f56ff1cf52ec..ceeda7e04a4d 100644
6383     --- a/virt/kvm/arm/vgic/vgic-mmio.c
6384     +++ b/virt/kvm/arm/vgic/vgic-mmio.c
6385     @@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6386    
6387     spin_lock_irqsave(&irq->irq_lock, flags);
6388    
6389     - /*
6390     - * If this virtual IRQ was written into a list register, we
6391     - * have to make sure the CPU that runs the VCPU thread has
6392     - * synced back the LR state to the struct vgic_irq.
6393     - *
6394     - * As long as the conditions below are true, we know the VCPU thread
6395     - * may be on its way back from the guest (we kicked the VCPU thread in
6396     - * vgic_change_active_prepare) and still has to sync back this IRQ,
6397     - * so we release and re-acquire the spin_lock to let the other thread
6398     - * sync back the IRQ.
6399     - *
6400     - * When accessing VGIC state from user space, requester_vcpu is
6401     - * NULL, which is fine, because we guarantee that no VCPUs are running
6402     - * when accessing VGIC state from user space so irq->vcpu->cpu is
6403     - * always -1.
6404     - */
6405     - while (irq->vcpu && /* IRQ may have state in an LR somewhere */
6406     - irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
6407     - irq->vcpu->cpu != -1) /* VCPU thread is running */
6408     - cond_resched_lock(&irq->irq_lock);
6409     -
6410     if (irq->hw) {
6411     vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
6412     } else {
6413     u32 model = vcpu->kvm->arch.vgic.vgic_model;
6414     + u8 active_source;
6415    
6416     irq->active = active;
6417     +
6418     + /*
6419     + * The GICv2 architecture indicates that the source CPUID for
6420     + * an SGI should be provided during an EOI which implies that
6421     + * the active state is stored somewhere, but at the same time
6422     + * this state is not architecturally exposed anywhere and we
6423     + * have no way of knowing the right source.
6424     + *
6425     + * This may lead to a VCPU not being able to receive
6426     + * additional instances of a particular SGI after migration
6427     + * for a GICv2 VM on some GIC implementations. Oh well.
6428     + */
6429     + active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
6430     +
6431     if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
6432     active && vgic_irq_is_sgi(irq->intid))
6433     - irq->active_source = requester_vcpu->vcpu_id;
6434     + irq->active_source = active_source;
6435     }
6436    
6437     if (irq->active)
6438     @@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6439     */
6440     static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
6441     {
6442     - if (intid > VGIC_NR_PRIVATE_IRQS)
6443     + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6444     + intid > VGIC_NR_PRIVATE_IRQS)
6445     kvm_arm_halt_guest(vcpu->kvm);
6446     }
6447    
6448     /* See vgic_change_active_prepare */
6449     static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
6450     {
6451     - if (intid > VGIC_NR_PRIVATE_IRQS)
6452     + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6453     + intid > VGIC_NR_PRIVATE_IRQS)
6454     kvm_arm_resume_guest(vcpu->kvm);
6455     }
6456    
6457     diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
6458     index 7cfdfbc910e0..f884a54b2601 100644
6459     --- a/virt/kvm/arm/vgic/vgic.c
6460     +++ b/virt/kvm/arm/vgic/vgic.c
6461     @@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
6462     {
6463     /* SGIs and PPIs */
6464     if (intid <= VGIC_MAX_PRIVATE) {
6465     - intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
6466     + intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
6467     return &vcpu->arch.vgic_cpu.private_irqs[intid];
6468     }
6469    
6470     /* SPIs */
6471     - if (intid <= VGIC_MAX_SPI) {
6472     - intid = array_index_nospec(intid, VGIC_MAX_SPI);
6473     + if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
6474     + intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
6475     return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
6476     }
6477