Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0113-4.19.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3392 - (hide annotations) (download)
Fri Aug 2 11:47:27 2019 UTC (4 years, 10 months ago) by niro
File size: 230504 byte(s)
-linux-4.19.14
1 niro 3392 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index 0c404cda531a..f5acf35c712f 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -2073,6 +2073,9 @@
6     off
7     Disables hypervisor mitigations and doesn't
8     emit any warnings.
9     + It also drops the swap size and available
10     + RAM limit restriction on both hypervisor and
11     + bare metal.
12    
13     Default is 'flush'.
14    
15     diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
16     index bae52b845de0..9f5924f81f89 100644
17     --- a/Documentation/admin-guide/l1tf.rst
18     +++ b/Documentation/admin-guide/l1tf.rst
19     @@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
20    
21     off Disables hypervisor mitigations and doesn't emit any
22     warnings.
23     + It also drops the swap size and available RAM limit restrictions
24     + on both hypervisor and bare metal.
25     +
26     ============ =============================================================
27    
28     The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
29     @@ -576,7 +579,8 @@ Default mitigations
30     The kernel default mitigations for vulnerable processors are:
31    
32     - PTE inversion to protect against malicious user space. This is done
33     - unconditionally and cannot be controlled.
34     + unconditionally and cannot be controlled. The swap storage is limited
35     + to ~16TB.
36    
37     - L1D conditional flushing on VMENTER when EPT is enabled for
38     a guest.
39     diff --git a/Makefile b/Makefile
40     index 892ff14cbc9d..3324dd0e11a3 100644
41     --- a/Makefile
42     +++ b/Makefile
43     @@ -1,7 +1,7 @@
44     # SPDX-License-Identifier: GPL-2.0
45     VERSION = 4
46     PATCHLEVEL = 19
47     -SUBLEVEL = 13
48     +SUBLEVEL = 14
49     EXTRAVERSION =
50     NAME = "People's Front"
51    
52     diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
53     index a90c4f126050..ac69f307dcfe 100644
54     --- a/arch/arc/Kconfig
55     +++ b/arch/arc/Kconfig
56     @@ -26,6 +26,7 @@ config ARC
57     select GENERIC_IRQ_SHOW
58     select GENERIC_PCI_IOMAP
59     select GENERIC_PENDING_IRQ if SMP
60     + select GENERIC_SCHED_CLOCK
61     select GENERIC_SMP_IDLE_THREAD
62     select HAVE_ARCH_KGDB
63     select HAVE_ARCH_TRACEHOOK
64     diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
65     index 03611d50c5a9..e84544b220b9 100644
66     --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
67     +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
68     @@ -26,8 +26,7 @@
69     "Speakers", "SPKL",
70     "Speakers", "SPKR";
71    
72     - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
73     - <&clock CLK_MOUT_EPLL>,
74     + assigned-clocks = <&clock CLK_MOUT_EPLL>,
75     <&clock CLK_MOUT_MAU_EPLL>,
76     <&clock CLK_MOUT_USER_MAU_EPLL>,
77     <&clock_audss EXYNOS_MOUT_AUDSS>,
78     @@ -36,15 +35,13 @@
79     <&clock_audss EXYNOS_DOUT_AUD_BUS>,
80     <&clock_audss EXYNOS_DOUT_I2S>;
81    
82     - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
83     - <&clock CLK_FOUT_EPLL>,
84     + assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
85     <&clock CLK_MOUT_EPLL>,
86     <&clock CLK_MOUT_MAU_EPLL>,
87     <&clock CLK_MAU_EPLL>,
88     <&clock_audss EXYNOS_MOUT_AUDSS>;
89    
90     assigned-clock-rates = <0>,
91     - <0>,
92     <0>,
93     <0>,
94     <0>,
95     @@ -84,4 +81,6 @@
96    
97     &i2s0 {
98     status = "okay";
99     + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
100     + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
101     };
102     diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
103     index 4a30cc849b00..122174ea9e0a 100644
104     --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
105     +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
106     @@ -33,8 +33,7 @@
107     compatible = "samsung,odroid-xu3-audio";
108     model = "Odroid-XU4";
109    
110     - assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
111     - <&clock CLK_MOUT_EPLL>,
112     + assigned-clocks = <&clock CLK_MOUT_EPLL>,
113     <&clock CLK_MOUT_MAU_EPLL>,
114     <&clock CLK_MOUT_USER_MAU_EPLL>,
115     <&clock_audss EXYNOS_MOUT_AUDSS>,
116     @@ -43,15 +42,13 @@
117     <&clock_audss EXYNOS_DOUT_AUD_BUS>,
118     <&clock_audss EXYNOS_DOUT_I2S>;
119    
120     - assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
121     - <&clock CLK_FOUT_EPLL>,
122     + assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
123     <&clock CLK_MOUT_EPLL>,
124     <&clock CLK_MOUT_MAU_EPLL>,
125     <&clock CLK_MAU_EPLL>,
126     <&clock_audss EXYNOS_MOUT_AUDSS>;
127    
128     assigned-clock-rates = <0>,
129     - <0>,
130     <0>,
131     <0>,
132     <0>,
133     @@ -79,6 +76,8 @@
134    
135     &i2s0 {
136     status = "okay";
137     + assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
138     + assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
139     };
140    
141     &pwm {
142     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
143     index aa45df752a16..95e3fa7ded8b 100644
144     --- a/arch/arm64/include/asm/kvm_arm.h
145     +++ b/arch/arm64/include/asm/kvm_arm.h
146     @@ -104,7 +104,7 @@
147     TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
148    
149     /* VTCR_EL2 Registers bits */
150     -#define VTCR_EL2_RES1 (1 << 31)
151     +#define VTCR_EL2_RES1 (1U << 31)
152     #define VTCR_EL2_HD (1 << 22)
153     #define VTCR_EL2_HA (1 << 21)
154     #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
155     diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
156     index e0d0f5b856e7..d52051879ffe 100644
157     --- a/arch/arm64/include/asm/unistd.h
158     +++ b/arch/arm64/include/asm/unistd.h
159     @@ -40,8 +40,9 @@
160     * The following SVCs are ARM private.
161     */
162     #define __ARM_NR_COMPAT_BASE 0x0f0000
163     -#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
164     -#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
165     +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
166     +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
167     +#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
168    
169     #define __NR_compat_syscalls 399
170     #endif
171     diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
172     index a6109825eeb9..7be666062c7c 100644
173     --- a/arch/arm64/kernel/sys_compat.c
174     +++ b/arch/arm64/kernel/sys_compat.c
175     @@ -102,12 +102,12 @@ long compat_arm_syscall(struct pt_regs *regs)
176    
177     default:
178     /*
179     - * Calls 9f00xx..9f07ff are defined to return -ENOSYS
180     + * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
181     * if not implemented, rather than raising SIGILL. This
182     * way the calling program can gracefully determine whether
183     * a feature is supported.
184     */
185     - if ((no & 0xffff) <= 0x7ff)
186     + if (no < __ARM_NR_COMPAT_END)
187     return -ENOSYS;
188     break;
189     }
190     diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
191     index 131c7772703c..c041eab3dce0 100644
192     --- a/arch/arm64/kvm/hyp/tlb.c
193     +++ b/arch/arm64/kvm/hyp/tlb.c
194     @@ -15,14 +15,19 @@
195     * along with this program. If not, see <http://www.gnu.org/licenses/>.
196     */
197    
198     +#include <linux/irqflags.h>
199     +
200     #include <asm/kvm_hyp.h>
201     #include <asm/kvm_mmu.h>
202     #include <asm/tlbflush.h>
203    
204     -static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
205     +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
206     + unsigned long *flags)
207     {
208     u64 val;
209    
210     + local_irq_save(*flags);
211     +
212     /*
213     * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
214     * most TLB operations target EL2/EL0. In order to affect the
215     @@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
216     isb();
217     }
218    
219     -static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
220     +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
221     + unsigned long *flags)
222     {
223     write_sysreg(kvm->arch.vttbr, vttbr_el2);
224     isb();
225     @@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
226     __tlb_switch_to_guest_vhe,
227     ARM64_HAS_VIRT_HOST_EXTN);
228    
229     -static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
230     +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
231     + unsigned long flags)
232     {
233     /*
234     * We're done with the TLB operation, let's restore the host's
235     @@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
236     */
237     write_sysreg(0, vttbr_el2);
238     write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
239     + isb();
240     + local_irq_restore(flags);
241     }
242    
243     -static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
244     +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
245     + unsigned long flags)
246     {
247     write_sysreg(0, vttbr_el2);
248     }
249     @@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
250    
251     void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
252     {
253     + unsigned long flags;
254     +
255     dsb(ishst);
256    
257     /* Switch to requested VMID */
258     kvm = kern_hyp_va(kvm);
259     - __tlb_switch_to_guest()(kvm);
260     + __tlb_switch_to_guest()(kvm, &flags);
261    
262     /*
263     * We could do so much better if we had the VA as well.
264     @@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
265     if (!has_vhe() && icache_is_vpipt())
266     __flush_icache_all();
267    
268     - __tlb_switch_to_host()(kvm);
269     + __tlb_switch_to_host()(kvm, flags);
270     }
271    
272     void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
273     {
274     + unsigned long flags;
275     +
276     dsb(ishst);
277    
278     /* Switch to requested VMID */
279     kvm = kern_hyp_va(kvm);
280     - __tlb_switch_to_guest()(kvm);
281     + __tlb_switch_to_guest()(kvm, &flags);
282    
283     __tlbi(vmalls12e1is);
284     dsb(ish);
285     isb();
286    
287     - __tlb_switch_to_host()(kvm);
288     + __tlb_switch_to_host()(kvm, flags);
289     }
290    
291     void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
292     {
293     struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
294     + unsigned long flags;
295    
296     /* Switch to requested VMID */
297     - __tlb_switch_to_guest()(kvm);
298     + __tlb_switch_to_guest()(kvm, &flags);
299    
300     __tlbi(vmalle1);
301     dsb(nsh);
302     isb();
303    
304     - __tlb_switch_to_host()(kvm);
305     + __tlb_switch_to_host()(kvm, flags);
306     }
307    
308     void __hyp_text __kvm_flush_vm_context(void)
309     diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
310     index 37fe58c19a90..542c3ede9722 100644
311     --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
312     +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
313     @@ -13,6 +13,7 @@
314     #include <stdint.h>
315     #include <stdio.h>
316     #include <stdlib.h>
317     +#include "../../../../include/linux/sizes.h"
318    
319     int main(int argc, char *argv[])
320     {
321     @@ -45,11 +46,11 @@ int main(int argc, char *argv[])
322     vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
323    
324     /*
325     - * Align with 16 bytes: "greater than that used for any standard data
326     - * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
327     + * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
328     + * which may be as large as 64KB depending on the kernel configuration.
329     */
330    
331     - vmlinuz_load_addr += (16 - vmlinux_size % 16);
332     + vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
333    
334     printf("0x%llx\n", vmlinuz_load_addr);
335    
336     diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
337     index 6c79e8a16a26..3ddbb98dff84 100644
338     --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
339     +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
340     @@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
341     case 3:
342     return CVMX_HELPER_INTERFACE_MODE_LOOP;
343     case 4:
344     - return CVMX_HELPER_INTERFACE_MODE_RGMII;
345     + /* TODO: Implement support for AGL (RGMII). */
346     + return CVMX_HELPER_INTERFACE_MODE_DISABLED;
347     default:
348     return CVMX_HELPER_INTERFACE_MODE_DISABLED;
349     }
350     diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
351     index d4ea7a5b60cf..9e805317847d 100644
352     --- a/arch/mips/include/asm/atomic.h
353     +++ b/arch/mips/include/asm/atomic.h
354     @@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
355     { \
356     long result; \
357     \
358     - if (kernel_uses_llsc && R10000_LLSC_WAR) { \
359     + if (kernel_uses_llsc) { \
360     long temp; \
361     \
362     __asm__ __volatile__( \
363     diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
364     index a41059d47d31..ed7ffe4e63a3 100644
365     --- a/arch/mips/include/asm/cpu-info.h
366     +++ b/arch/mips/include/asm/cpu-info.h
367     @@ -50,7 +50,7 @@ struct guest_info {
368     #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
369    
370     struct cpuinfo_mips {
371     - unsigned long asid_cache;
372     + u64 asid_cache;
373     #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
374     unsigned long asid_mask;
375     #endif
376     diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
377     index c9f7e231e66b..59c8b11c090e 100644
378     --- a/arch/mips/include/asm/mach-loongson64/mmzone.h
379     +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
380     @@ -21,6 +21,7 @@
381     #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
382    
383     #define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
384     +#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
385    
386     #define LEVELS_PER_SLICE 128
387    
388     diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
389     index 0740be7d5d4a..24d6b42345fb 100644
390     --- a/arch/mips/include/asm/mmu.h
391     +++ b/arch/mips/include/asm/mmu.h
392     @@ -7,7 +7,7 @@
393     #include <linux/wait.h>
394    
395     typedef struct {
396     - unsigned long asid[NR_CPUS];
397     + u64 asid[NR_CPUS];
398     void *vdso;
399     atomic_t fp_mode_switching;
400    
401     diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
402     index 94414561de0e..a589585be21b 100644
403     --- a/arch/mips/include/asm/mmu_context.h
404     +++ b/arch/mips/include/asm/mmu_context.h
405     @@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
406     * All unused by hardware upper bits will be considered
407     * as a software asid extension.
408     */
409     -static unsigned long asid_version_mask(unsigned int cpu)
410     +static inline u64 asid_version_mask(unsigned int cpu)
411     {
412     unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
413    
414     - return ~(asid_mask | (asid_mask - 1));
415     + return ~(u64)(asid_mask | (asid_mask - 1));
416     }
417    
418     -static unsigned long asid_first_version(unsigned int cpu)
419     +static inline u64 asid_first_version(unsigned int cpu)
420     {
421     return ~asid_version_mask(cpu) + 1;
422     }
423     @@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
424     static inline void
425     get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
426     {
427     - unsigned long asid = asid_cache(cpu);
428     + u64 asid = asid_cache(cpu);
429    
430     if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
431     if (cpu_has_vtag_icache)
432     flush_icache_all();
433     local_flush_tlb_all(); /* start new asid cycle */
434     - if (!asid) /* fix version if needed */
435     - asid = asid_first_version(cpu);
436     }
437    
438     cpu_context(cpu, mm) = asid_cache(cpu) = asid;
439     diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
440     index f085fba41da5..b826b8473e95 100644
441     --- a/arch/mips/include/asm/mmzone.h
442     +++ b/arch/mips/include/asm/mmzone.h
443     @@ -7,7 +7,18 @@
444     #define _ASM_MMZONE_H_
445    
446     #include <asm/page.h>
447     -#include <mmzone.h>
448     +
449     +#ifdef CONFIG_NEED_MULTIPLE_NODES
450     +# include <mmzone.h>
451     +#endif
452     +
453     +#ifndef pa_to_nid
454     +#define pa_to_nid(addr) 0
455     +#endif
456     +
457     +#ifndef nid_to_addrbase
458     +#define nid_to_addrbase(nid) 0
459     +#endif
460    
461     #ifdef CONFIG_DISCONTIGMEM
462    
463     diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
464     index 0036ea0c7173..93a9dce31f25 100644
465     --- a/arch/mips/include/asm/pgtable-64.h
466     +++ b/arch/mips/include/asm/pgtable-64.h
467     @@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
468    
469     static inline int pmd_present(pmd_t pmd)
470     {
471     +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
472     + if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
473     + return pmd_val(pmd) & _PAGE_PRESENT;
474     +#endif
475     +
476     return pmd_val(pmd) != (unsigned long) invalid_pte_table;
477     }
478    
479     diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
480     index 7f12d7e27c94..e5190126080e 100644
481     --- a/arch/mips/include/asm/r4kcache.h
482     +++ b/arch/mips/include/asm/r4kcache.h
483     @@ -20,6 +20,7 @@
484     #include <asm/cpu-features.h>
485     #include <asm/cpu-type.h>
486     #include <asm/mipsmtregs.h>
487     +#include <asm/mmzone.h>
488     #include <linux/uaccess.h> /* for uaccess_kernel() */
489    
490     extern void (*r4k_blast_dcache)(void);
491     @@ -747,4 +748,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
492     __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
493     __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
494    
495     +/* Currently, this is very specific to Loongson-3 */
496     +#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
497     +static inline void blast_##pfx##cache##lsize##_node(long node) \
498     +{ \
499     + unsigned long start = CAC_BASE | nid_to_addrbase(node); \
500     + unsigned long end = start + current_cpu_data.desc.waysize; \
501     + unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
502     + unsigned long ws_end = current_cpu_data.desc.ways << \
503     + current_cpu_data.desc.waybit; \
504     + unsigned long ws, addr; \
505     + \
506     + for (ws = 0; ws < ws_end; ws += ws_inc) \
507     + for (addr = start; addr < end; addr += lsize * 32) \
508     + cache##lsize##_unroll32(addr|ws, indexop); \
509     +}
510     +
511     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
512     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
513     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
514     +__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
515     +
516     #endif /* _ASM_R4KCACHE_H */
517     diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
518     index 48a9c6b90e07..9df3ebdc7b0f 100644
519     --- a/arch/mips/kernel/vdso.c
520     +++ b/arch/mips/kernel/vdso.c
521     @@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
522    
523     /* Map delay slot emulation page */
524     base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
525     - VM_READ|VM_WRITE|VM_EXEC|
526     - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
527     + VM_READ | VM_EXEC |
528     + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
529     0, NULL);
530     if (IS_ERR_VALUE(base)) {
531     ret = base;
532     diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
533     index 5450f4d1c920..e2d46cb93ca9 100644
534     --- a/arch/mips/math-emu/dsemul.c
535     +++ b/arch/mips/math-emu/dsemul.c
536     @@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
537     {
538     int isa16 = get_isa16_mode(regs->cp0_epc);
539     mips_instruction break_math;
540     - struct emuframe __user *fr;
541     - int err, fr_idx;
542     + unsigned long fr_uaddr;
543     + struct emuframe fr;
544     + int fr_idx, ret;
545    
546     /* NOP is easy */
547     if (ir == 0)
548     @@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
549     fr_idx = alloc_emuframe();
550     if (fr_idx == BD_EMUFRAME_NONE)
551     return SIGBUS;
552     - fr = &dsemul_page()[fr_idx];
553    
554     /* Retrieve the appropriately encoded break instruction */
555     break_math = BREAK_MATH(isa16);
556    
557     /* Write the instructions to the frame */
558     if (isa16) {
559     - err = __put_user(ir >> 16,
560     - (u16 __user *)(&fr->emul));
561     - err |= __put_user(ir & 0xffff,
562     - (u16 __user *)((long)(&fr->emul) + 2));
563     - err |= __put_user(break_math >> 16,
564     - (u16 __user *)(&fr->badinst));
565     - err |= __put_user(break_math & 0xffff,
566     - (u16 __user *)((long)(&fr->badinst) + 2));
567     + union mips_instruction _emul = {
568     + .halfword = { ir >> 16, ir }
569     + };
570     + union mips_instruction _badinst = {
571     + .halfword = { break_math >> 16, break_math }
572     + };
573     +
574     + fr.emul = _emul.word;
575     + fr.badinst = _badinst.word;
576     } else {
577     - err = __put_user(ir, &fr->emul);
578     - err |= __put_user(break_math, &fr->badinst);
579     + fr.emul = ir;
580     + fr.badinst = break_math;
581     }
582    
583     - if (unlikely(err)) {
584     + /* Write the frame to user memory */
585     + fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
586     + ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
587     + FOLL_FORCE | FOLL_WRITE);
588     + if (unlikely(ret != sizeof(fr))) {
589     MIPS_FPU_EMU_INC_STATS(errors);
590     free_emuframe(fr_idx, current->mm);
591     return SIGBUS;
592     @@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
593     atomic_set(&current->thread.bd_emu_frame, fr_idx);
594    
595     /* Change user register context to execute the frame */
596     - regs->cp0_epc = (unsigned long)&fr->emul | isa16;
597     -
598     - /* Ensure the icache observes our newly written frame */
599     - flush_cache_sigtramp((unsigned long)&fr->emul);
600     + regs->cp0_epc = fr_uaddr | isa16;
601    
602     return 0;
603     }
604     diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
605     index 3466fcdae0ca..01848cdf2074 100644
606     --- a/arch/mips/mm/c-r3k.c
607     +++ b/arch/mips/mm/c-r3k.c
608     @@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
609     pmd_t *pmdp;
610     pte_t *ptep;
611    
612     - pr_debug("cpage[%08lx,%08lx]\n",
613     + pr_debug("cpage[%08llx,%08lx]\n",
614     cpu_context(smp_processor_id(), mm), addr);
615    
616     /* No ASID => no such page in the cache. */
617     diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
618     index a9ef057c79fe..05a539d3a597 100644
619     --- a/arch/mips/mm/c-r4k.c
620     +++ b/arch/mips/mm/c-r4k.c
621     @@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
622     r4k_blast_scache = blast_scache128;
623     }
624    
625     +static void (*r4k_blast_scache_node)(long node);
626     +
627     +static void r4k_blast_scache_node_setup(void)
628     +{
629     + unsigned long sc_lsize = cpu_scache_line_size();
630     +
631     + if (current_cpu_type() != CPU_LOONGSON3)
632     + r4k_blast_scache_node = (void *)cache_noop;
633     + else if (sc_lsize == 16)
634     + r4k_blast_scache_node = blast_scache16_node;
635     + else if (sc_lsize == 32)
636     + r4k_blast_scache_node = blast_scache32_node;
637     + else if (sc_lsize == 64)
638     + r4k_blast_scache_node = blast_scache64_node;
639     + else if (sc_lsize == 128)
640     + r4k_blast_scache_node = blast_scache128_node;
641     +}
642     +
643     static inline void local_r4k___flush_cache_all(void * args)
644     {
645     switch (current_cpu_type()) {
646     case CPU_LOONGSON2:
647     - case CPU_LOONGSON3:
648     case CPU_R4000SC:
649     case CPU_R4000MC:
650     case CPU_R4400SC:
651     @@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
652     r4k_blast_scache();
653     break;
654    
655     + case CPU_LOONGSON3:
656     + /* Use get_ebase_cpunum() for both NUMA=y/n */
657     + r4k_blast_scache_node(get_ebase_cpunum() >> 2);
658     + break;
659     +
660     case CPU_BMIPS5000:
661     r4k_blast_scache();
662     __sync();
663     @@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
664    
665     preempt_disable();
666     if (cpu_has_inclusive_pcaches) {
667     - if (size >= scache_size)
668     - r4k_blast_scache();
669     - else
670     + if (size >= scache_size) {
671     + if (current_cpu_type() != CPU_LOONGSON3)
672     + r4k_blast_scache();
673     + else
674     + r4k_blast_scache_node(pa_to_nid(addr));
675     + } else {
676     blast_scache_range(addr, addr + size);
677     + }
678     preempt_enable();
679     __sync();
680     return;
681     @@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
682    
683     preempt_disable();
684     if (cpu_has_inclusive_pcaches) {
685     - if (size >= scache_size)
686     - r4k_blast_scache();
687     - else {
688     + if (size >= scache_size) {
689     + if (current_cpu_type() != CPU_LOONGSON3)
690     + r4k_blast_scache();
691     + else
692     + r4k_blast_scache_node(pa_to_nid(addr));
693     + } else {
694     /*
695     * There is no clearly documented alignment requirement
696     * for the cache instruction on MIPS processors and
697     @@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
698     r4k_blast_scache_page_setup();
699     r4k_blast_scache_page_indexed_setup();
700     r4k_blast_scache_setup();
701     + r4k_blast_scache_node_setup();
702     #ifdef CONFIG_EVA
703     r4k_blast_dcache_user_page_setup();
704     r4k_blast_icache_user_page_setup();
705     diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
706     index f6f469fc4073..1b395b85132b 100644
707     --- a/arch/powerpc/kernel/security.c
708     +++ b/arch/powerpc/kernel/security.c
709     @@ -22,7 +22,7 @@ enum count_cache_flush_type {
710     COUNT_CACHE_FLUSH_SW = 0x2,
711     COUNT_CACHE_FLUSH_HW = 0x4,
712     };
713     -static enum count_cache_flush_type count_cache_flush_type;
714     +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
715    
716     bool barrier_nospec_enabled;
717     static bool no_nospec;
718     diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
719     index e6474a45cef5..6327fd79b0fb 100644
720     --- a/arch/powerpc/kernel/signal_32.c
721     +++ b/arch/powerpc/kernel/signal_32.c
722     @@ -1140,11 +1140,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
723     {
724     struct rt_sigframe __user *rt_sf;
725     struct pt_regs *regs = current_pt_regs();
726     + int tm_restore = 0;
727     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
728     struct ucontext __user *uc_transact;
729     unsigned long msr_hi;
730     unsigned long tmp;
731     - int tm_restore = 0;
732     #endif
733     /* Always make any pending restarted system calls return -EINTR */
734     current->restart_block.fn = do_no_restart_syscall;
735     @@ -1192,11 +1192,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
736     goto bad;
737     }
738     }
739     - if (!tm_restore)
740     - /* Fall through, for non-TM restore */
741     + if (!tm_restore) {
742     + /*
743     + * Unset regs->msr because ucontext MSR TS is not
744     + * set, and recheckpoint was not called. This avoid
745     + * hitting a TM Bad thing at RFID
746     + */
747     + regs->msr &= ~MSR_TS_MASK;
748     + }
749     + /* Fall through, for non-TM restore */
750     #endif
751     - if (do_setcontext(&rt_sf->uc, regs, 1))
752     - goto bad;
753     + if (!tm_restore)
754     + if (do_setcontext(&rt_sf->uc, regs, 1))
755     + goto bad;
756    
757     /*
758     * It's not clear whether or why it is desirable to save the
759     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
760     index 83d51bf586c7..daa28cb72272 100644
761     --- a/arch/powerpc/kernel/signal_64.c
762     +++ b/arch/powerpc/kernel/signal_64.c
763     @@ -740,11 +740,23 @@ SYSCALL_DEFINE0(rt_sigreturn)
764     &uc_transact->uc_mcontext))
765     goto badframe;
766     }
767     - else
768     - /* Fall through, for non-TM restore */
769     #endif
770     - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
771     - goto badframe;
772     + /* Fall through, for non-TM restore */
773     + if (!MSR_TM_ACTIVE(msr)) {
774     + /*
775     + * Unset MSR[TS] on the thread regs since MSR from user
776     + * context does not have MSR active, and recheckpoint was
777     + * not called since restore_tm_sigcontexts() was not called
778     + * also.
779     + *
780     + * If not unsetting it, the code can RFID to userspace with
781     + * MSR[TS] set, but without CPU in the proper state,
782     + * causing a TM bad thing.
783     + */
784     + current->thread.regs->msr &= ~MSR_TS_MASK;
785     + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
786     + goto badframe;
787     + }
788    
789     if (restore_altstack(&uc->uc_stack))
790     goto badframe;
791     diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
792     index 19b2d2a9b43d..eeb7450db18c 100644
793     --- a/arch/s390/pci/pci_clp.c
794     +++ b/arch/s390/pci/pci_clp.c
795     @@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
796     struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
797     int rc;
798    
799     - rrb = clp_alloc_block(GFP_KERNEL);
800     + rrb = clp_alloc_block(GFP_ATOMIC);
801     if (!rrb)
802     return -ENOMEM;
803    
804     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
805     index 022845ee0c88..728dc661ebb6 100644
806     --- a/arch/x86/include/asm/kvm_host.h
807     +++ b/arch/x86/include/asm/kvm_host.h
808     @@ -1441,7 +1441,7 @@ asmlinkage void kvm_spurious_fault(void);
809     "cmpb $0, kvm_rebooting \n\t" \
810     "jne 668b \n\t" \
811     __ASM_SIZE(push) " $666b \n\t" \
812     - "call kvm_spurious_fault \n\t" \
813     + "jmp kvm_spurious_fault \n\t" \
814     ".popsection \n\t" \
815     _ASM_EXTABLE(666b, 667b)
816    
817     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
818     index 78928f56cf72..abb92c341693 100644
819     --- a/arch/x86/kernel/cpu/bugs.c
820     +++ b/arch/x86/kernel/cpu/bugs.c
821     @@ -1000,7 +1000,8 @@ static void __init l1tf_select_mitigation(void)
822     #endif
823    
824     half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
825     - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
826     + if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
827     + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
828     pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
829     pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
830     half_pa);
831     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
832     index 33ffb6d17e73..841740045554 100644
833     --- a/arch/x86/kvm/vmx.c
834     +++ b/arch/x86/kvm/vmx.c
835     @@ -8011,13 +8011,16 @@ static __init int hardware_setup(void)
836    
837     kvm_mce_cap_supported |= MCG_LMCE_P;
838    
839     - return alloc_kvm_area();
840     + r = alloc_kvm_area();
841     + if (r)
842     + goto out;
843     + return 0;
844    
845     out:
846     for (i = 0; i < VMX_BITMAP_NR; i++)
847     free_page((unsigned long)vmx_bitmap[i]);
848    
849     - return r;
850     + return r;
851     }
852    
853     static __exit void hardware_unsetup(void)
854     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
855     index faca978ebf9d..d883869437b5 100644
856     --- a/arch/x86/mm/init.c
857     +++ b/arch/x86/mm/init.c
858     @@ -932,7 +932,7 @@ unsigned long max_swapfile_size(void)
859    
860     pages = generic_max_swapfile_size();
861    
862     - if (boot_cpu_has_bug(X86_BUG_L1TF)) {
863     + if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
864     /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
865     unsigned long long l1tf_limit = l1tf_pfn_limit();
866     /*
867     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
868     index dd519f372169..a3e9c6ee3cf2 100644
869     --- a/arch/x86/mm/init_64.c
870     +++ b/arch/x86/mm/init_64.c
871     @@ -585,7 +585,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
872     paddr_end,
873     page_size_mask,
874     prot);
875     - __flush_tlb_all();
876     continue;
877     }
878     /*
879     @@ -628,7 +627,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
880     pud_populate(&init_mm, pud, pmd);
881     spin_unlock(&init_mm.page_table_lock);
882     }
883     - __flush_tlb_all();
884    
885     update_page_count(PG_LEVEL_1G, pages);
886    
887     @@ -669,7 +667,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
888     paddr_last = phys_pud_init(pud, paddr,
889     paddr_end,
890     page_size_mask);
891     - __flush_tlb_all();
892     continue;
893     }
894    
895     @@ -681,7 +678,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
896     p4d_populate(&init_mm, p4d, pud);
897     spin_unlock(&init_mm.page_table_lock);
898     }
899     - __flush_tlb_all();
900    
901     return paddr_last;
902     }
903     @@ -734,8 +730,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
904     if (pgd_changed)
905     sync_global_pgds(vaddr_start, vaddr_end - 1);
906    
907     - __flush_tlb_all();
908     -
909     return paddr_last;
910     }
911    
912     diff --git a/crypto/cfb.c b/crypto/cfb.c
913     index 20987d0e09d8..e81e45673498 100644
914     --- a/crypto/cfb.c
915     +++ b/crypto/cfb.c
916     @@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
917    
918     do {
919     crypto_cfb_encrypt_one(tfm, iv, dst);
920     - crypto_xor(dst, iv, bsize);
921     + crypto_xor(dst, src, bsize);
922     iv = src;
923    
924     src += bsize;
925     diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
926     index 6e0a054bb61d..d332988eb8de 100644
927     --- a/crypto/tcrypt.c
928     +++ b/crypto/tcrypt.c
929     @@ -1736,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
930     ret += tcrypt_test("xts(aes)");
931     ret += tcrypt_test("ctr(aes)");
932     ret += tcrypt_test("rfc3686(ctr(aes))");
933     + ret += tcrypt_test("cfb(aes)");
934     break;
935    
936     case 11:
937     @@ -2062,6 +2063,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
938     speed_template_16_24_32);
939     test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
940     speed_template_16_24_32);
941     + test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
942     + speed_template_16_24_32);
943     + test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
944     + speed_template_16_24_32);
945     break;
946    
947     case 201:
948     diff --git a/crypto/testmgr.c b/crypto/testmgr.c
949     index 1c9bf38e59ea..54d882ffe438 100644
950     --- a/crypto/testmgr.c
951     +++ b/crypto/testmgr.c
952     @@ -2684,6 +2684,13 @@ static const struct alg_test_desc alg_test_descs[] = {
953     .dec = __VECS(aes_ccm_dec_tv_template)
954     }
955     }
956     + }, {
957     + .alg = "cfb(aes)",
958     + .test = alg_test_skcipher,
959     + .fips_allowed = 1,
960     + .suite = {
961     + .cipher = __VECS(aes_cfb_tv_template)
962     + },
963     }, {
964     .alg = "chacha20",
965     .test = alg_test_skcipher,
966     diff --git a/crypto/testmgr.h b/crypto/testmgr.h
967     index 0b3d7cadbe93..11e6f17fe724 100644
968     --- a/crypto/testmgr.h
969     +++ b/crypto/testmgr.h
970     @@ -11343,6 +11343,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
971     },
972     };
973    
974     +static const struct cipher_testvec aes_cfb_tv_template[] = {
975     + { /* From NIST SP800-38A */
976     + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
977     + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
978     + .klen = 16,
979     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
980     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
981     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
982     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
983     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
984     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
985     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
986     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
987     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
988     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
989     + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
990     + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
991     + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
992     + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
993     + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
994     + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
995     + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
996     + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
997     + .len = 64,
998     + }, {
999     + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
1000     + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
1001     + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
1002     + .klen = 24,
1003     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1004     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1005     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1006     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1007     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1008     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1009     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1010     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1011     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1012     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1013     + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
1014     + "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
1015     + "\x67\xce\x7f\x7f\x81\x17\x36\x21"
1016     + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
1017     + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
1018     + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
1019     + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
1020     + "\x42\xae\x8f\xba\x58\x4b\x09\xff",
1021     + .len = 64,
1022     + }, {
1023     + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
1024     + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
1025     + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
1026     + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
1027     + .klen = 32,
1028     + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
1029     + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1030     + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
1031     + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
1032     + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
1033     + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
1034     + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
1035     + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
1036     + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
1037     + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
1038     + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
1039     + "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
1040     + "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
1041     + "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
1042     + "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
1043     + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
1044     + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
1045     + "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
1046     + .len = 64,
1047     + },
1048     +};
1049     +
1050     static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
1051     { /* Input data from RFC 2410 Case 1 */
1052     #ifdef __LITTLE_ENDIAN
1053     diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
1054     index 60d6cc618f1c..6d54905c6263 100644
1055     --- a/drivers/base/platform-msi.c
1056     +++ b/drivers/base/platform-msi.c
1057     @@ -366,14 +366,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
1058     unsigned int nvec)
1059     {
1060     struct platform_msi_priv_data *data = domain->host_data;
1061     - struct msi_desc *desc;
1062     - for_each_msi_entry(desc, data->dev) {
1063     + struct msi_desc *desc, *tmp;
1064     + for_each_msi_entry_safe(desc, tmp, data->dev) {
1065     if (WARN_ON(!desc->irq || desc->nvec_used != 1))
1066     return;
1067     if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
1068     continue;
1069    
1070     irq_domain_free_irqs_common(domain, desc->irq, 1);
1071     + list_del(&desc->list);
1072     + free_msi_entry(desc);
1073     }
1074     }
1075    
1076     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1077     index 7d958ff426e0..1010cb79dcc6 100644
1078     --- a/drivers/char/tpm/tpm-interface.c
1079     +++ b/drivers/char/tpm/tpm-interface.c
1080     @@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
1081    
1082     if (need_locality) {
1083     rc = tpm_request_locality(chip, flags);
1084     - if (rc < 0)
1085     - goto out_no_locality;
1086     + if (rc < 0) {
1087     + need_locality = false;
1088     + goto out_locality;
1089     + }
1090     }
1091    
1092     rc = tpm_cmd_ready(chip, flags);
1093     if (rc)
1094     - goto out;
1095     + goto out_locality;
1096    
1097     rc = tpm2_prepare_space(chip, space, ordinal, buf);
1098     if (rc)
1099     @@ -547,14 +549,13 @@ out_recv:
1100     dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
1101    
1102     out:
1103     - rc = tpm_go_idle(chip, flags);
1104     - if (rc)
1105     - goto out;
1106     + /* may fail but do not override previous error value in rc */
1107     + tpm_go_idle(chip, flags);
1108    
1109     +out_locality:
1110     if (need_locality)
1111     tpm_relinquish_locality(chip, flags);
1112    
1113     -out_no_locality:
1114     if (chip->ops->clk_enable != NULL)
1115     chip->ops->clk_enable(chip, false);
1116    
1117     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1118     index caa86b19c76d..f74f451baf6a 100644
1119     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1120     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1121     @@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1122     struct device *dev = chip->dev.parent;
1123     struct i2c_client *client = to_i2c_client(dev);
1124     u32 ordinal;
1125     + unsigned long duration;
1126     size_t count = 0;
1127     int burst_count, bytes2write, retries, rc = -EIO;
1128    
1129     @@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
1130     return rc;
1131     }
1132     ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
1133     - rc = i2c_nuvoton_wait_for_data_avail(chip,
1134     - tpm_calc_ordinal_duration(chip,
1135     - ordinal),
1136     - &priv->read_queue);
1137     + if (chip->flags & TPM_CHIP_FLAG_TPM2)
1138     + duration = tpm2_calc_ordinal_duration(chip, ordinal);
1139     + else
1140     + duration = tpm_calc_ordinal_duration(chip, ordinal);
1141     +
1142     + rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
1143     if (rc) {
1144     dev_err(dev, "%s() timeout command duration\n", __func__);
1145     i2c_nuvoton_ready(chip);
1146     diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
1147     index 67e73fd71f09..69fb3afc970f 100644
1148     --- a/drivers/clk/rockchip/clk-rk3188.c
1149     +++ b/drivers/clk/rockchip/clk-rk3188.c
1150     @@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
1151     COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
1152     RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
1153     RK2928_CLKGATE_CON(0), 13, GFLAGS),
1154     - COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
1155     + COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
1156     RK2928_CLKSEL_CON(9), 0,
1157     RK2928_CLKGATE_CON(0), 14, GFLAGS,
1158     &common_spdif_fracmux),
1159     diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
1160     index 4e2073307f34..9e3944f868ff 100644
1161     --- a/drivers/clk/sunxi-ng/ccu_nm.c
1162     +++ b/drivers/clk/sunxi-ng/ccu_nm.c
1163     @@ -19,6 +19,17 @@ struct _ccu_nm {
1164     unsigned long m, min_m, max_m;
1165     };
1166    
1167     +static unsigned long ccu_nm_calc_rate(unsigned long parent,
1168     + unsigned long n, unsigned long m)
1169     +{
1170     + u64 rate = parent;
1171     +
1172     + rate *= n;
1173     + do_div(rate, m);
1174     +
1175     + return rate;
1176     +}
1177     +
1178     static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1179     struct _ccu_nm *nm)
1180     {
1181     @@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
1182    
1183     for (_n = nm->min_n; _n <= nm->max_n; _n++) {
1184     for (_m = nm->min_m; _m <= nm->max_m; _m++) {
1185     - unsigned long tmp_rate = parent * _n / _m;
1186     + unsigned long tmp_rate = ccu_nm_calc_rate(parent,
1187     + _n, _m);
1188    
1189     if (tmp_rate > rate)
1190     continue;
1191     @@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
1192     if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
1193     rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
1194     else
1195     - rate = parent_rate * n / m;
1196     + rate = ccu_nm_calc_rate(parent_rate, n, m);
1197    
1198     if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1199     rate /= nm->fixed_post_div;
1200     @@ -142,7 +154,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
1201     _nm.max_m = nm->m.max ?: 1 << nm->m.width;
1202    
1203     ccu_nm_find_best(*parent_rate, rate, &_nm);
1204     - rate = *parent_rate * _nm.n / _nm.m;
1205     + rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
1206    
1207     if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
1208     rate /= nm->fixed_post_div;
1209     diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
1210     index a11f4ba98b05..316d48d7be72 100644
1211     --- a/drivers/clocksource/Kconfig
1212     +++ b/drivers/clocksource/Kconfig
1213     @@ -290,6 +290,7 @@ config CLKSRC_MPS2
1214    
1215     config ARC_TIMERS
1216     bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
1217     + depends on GENERIC_SCHED_CLOCK
1218     select TIMER_OF
1219     help
1220     These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
1221     diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
1222     index 20da9b1d7f7d..b28970ca4a7a 100644
1223     --- a/drivers/clocksource/arc_timer.c
1224     +++ b/drivers/clocksource/arc_timer.c
1225     @@ -23,6 +23,7 @@
1226     #include <linux/cpu.h>
1227     #include <linux/of.h>
1228     #include <linux/of_irq.h>
1229     +#include <linux/sched_clock.h>
1230    
1231     #include <soc/arc/timers.h>
1232     #include <soc/arc/mcip.h>
1233     @@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
1234     return (((u64)h) << 32) | l;
1235     }
1236    
1237     +static notrace u64 arc_gfrc_clock_read(void)
1238     +{
1239     + return arc_read_gfrc(NULL);
1240     +}
1241     +
1242     static struct clocksource arc_counter_gfrc = {
1243     .name = "ARConnect GFRC",
1244     .rating = 400,
1245     @@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
1246     if (ret)
1247     return ret;
1248    
1249     + sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
1250     +
1251     return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
1252     }
1253     TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
1254     @@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
1255     return (((u64)h) << 32) | l;
1256     }
1257    
1258     +static notrace u64 arc_rtc_clock_read(void)
1259     +{
1260     + return arc_read_rtc(NULL);
1261     +}
1262     +
1263     static struct clocksource arc_counter_rtc = {
1264     .name = "ARCv2 RTC",
1265     .rating = 350,
1266     @@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
1267    
1268     write_aux_reg(AUX_RTC_CTRL, 1);
1269    
1270     + sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
1271     +
1272     return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
1273     }
1274     TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
1275     @@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
1276     return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
1277     }
1278    
1279     +static notrace u64 arc_timer1_clock_read(void)
1280     +{
1281     + return arc_read_timer1(NULL);
1282     +}
1283     +
1284     static struct clocksource arc_counter_timer1 = {
1285     .name = "ARC Timer1",
1286     .rating = 300,
1287     @@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
1288     write_aux_reg(ARC_REG_TIMER1_CNT, 0);
1289     write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
1290    
1291     + sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
1292     +
1293     return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
1294     }
1295    
1296     diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1297     index 2ae6124e5da6..5d54ebc20cb3 100644
1298     --- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
1299     +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
1300     @@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
1301     static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1302     {
1303     struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
1304     - void *fctx;
1305     + struct crypto_ctx_hdr *chdr;
1306    
1307     /* get the first device */
1308     nctx->ndev = nitrox_get_first_device();
1309     @@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
1310     return -ENODEV;
1311    
1312     /* allocate nitrox crypto context */
1313     - fctx = crypto_alloc_context(nctx->ndev);
1314     - if (!fctx) {
1315     + chdr = crypto_alloc_context(nctx->ndev);
1316     + if (!chdr) {
1317     nitrox_put_device(nctx->ndev);
1318     return -ENOMEM;
1319     }
1320     - nctx->u.ctx_handle = (uintptr_t)fctx;
1321     + nctx->chdr = chdr;
1322     + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
1323     + sizeof(struct ctx_hdr));
1324     crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
1325     sizeof(struct nitrox_kcrypt_request));
1326     return 0;
1327     @@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
1328    
1329     memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
1330     memset(&fctx->auth, 0, sizeof(struct auth_keys));
1331     - crypto_free_context((void *)fctx);
1332     + crypto_free_context((void *)nctx->chdr);
1333     }
1334     nitrox_put_device(nctx->ndev);
1335    
1336     diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1337     index 4d31df07777f..28baf1a19d0a 100644
1338     --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
1339     +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
1340     @@ -146,12 +146,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
1341     void *crypto_alloc_context(struct nitrox_device *ndev)
1342     {
1343     struct ctx_hdr *ctx;
1344     + struct crypto_ctx_hdr *chdr;
1345     void *vaddr;
1346     dma_addr_t dma;
1347    
1348     + chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
1349     + if (!chdr)
1350     + return NULL;
1351     +
1352     vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
1353     - if (!vaddr)
1354     + if (!vaddr) {
1355     + kfree(chdr);
1356     return NULL;
1357     + }
1358    
1359     /* fill meta data */
1360     ctx = vaddr;
1361     @@ -159,7 +166,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1362     ctx->dma = dma;
1363     ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
1364    
1365     - return ((u8 *)vaddr + sizeof(struct ctx_hdr));
1366     + chdr->pool = ndev->ctx_pool;
1367     + chdr->dma = dma;
1368     + chdr->vaddr = vaddr;
1369     +
1370     + return chdr;
1371     }
1372    
1373     /**
1374     @@ -168,13 +179,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
1375     */
1376     void crypto_free_context(void *ctx)
1377     {
1378     - struct ctx_hdr *ctxp;
1379     + struct crypto_ctx_hdr *ctxp;
1380    
1381     if (!ctx)
1382     return;
1383    
1384     - ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
1385     - dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
1386     + ctxp = ctx;
1387     + dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
1388     + kfree(ctxp);
1389     }
1390    
1391     /**
1392     diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
1393     index d091b6f5f5dd..19f0a20e3bb3 100644
1394     --- a/drivers/crypto/cavium/nitrox/nitrox_req.h
1395     +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
1396     @@ -181,12 +181,19 @@ struct flexi_crypto_context {
1397     struct auth_keys auth;
1398     };
1399    
1400     +struct crypto_ctx_hdr {
1401     + struct dma_pool *pool;
1402     + dma_addr_t dma;
1403     + void *vaddr;
1404     +};
1405     +
1406     struct nitrox_crypto_ctx {
1407     struct nitrox_device *ndev;
1408     union {
1409     u64 ctx_handle;
1410     struct flexi_crypto_context *fctx;
1411     } u;
1412     + struct crypto_ctx_hdr *chdr;
1413     };
1414    
1415     struct nitrox_kcrypt_request {
1416     diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
1417     index 461b97e2f1fd..1ff8738631a3 100644
1418     --- a/drivers/crypto/chelsio/chcr_ipsec.c
1419     +++ b/drivers/crypto/chelsio/chcr_ipsec.c
1420     @@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1421    
1422     static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
1423     {
1424     - int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
1425     + int hdrlen;
1426     +
1427     + hdrlen = sizeof(struct fw_ulptx_wr) +
1428     + sizeof(struct chcr_ipsec_req) + kctx_len;
1429    
1430     hdrlen += sizeof(struct cpl_tx_pkt);
1431     if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
1432     diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
1433     index f455f095a146..1b014d92855b 100644
1434     --- a/drivers/gpu/drm/udl/udl_main.c
1435     +++ b/drivers/gpu/drm/udl/udl_main.c
1436     @@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
1437     if (ret)
1438     goto err;
1439    
1440     - ret = drm_vblank_init(dev, 1);
1441     - if (ret)
1442     - goto err_fb;
1443     -
1444     drm_kms_helper_poll_init(dev);
1445    
1446     return 0;
1447     -err_fb:
1448     - udl_fbdev_cleanup(dev);
1449     +
1450     err:
1451     if (udl->urbs.count)
1452     udl_free_urb_list(dev);
1453     diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
1454     index 4db62c545748..26470c77eb6e 100644
1455     --- a/drivers/gpu/drm/v3d/v3d_debugfs.c
1456     +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
1457     @@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
1458     V3D_READ(v3d_hub_reg_defs[i].reg));
1459     }
1460    
1461     - for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1462     - seq_printf(m, "%s (0x%04x): 0x%08x\n",
1463     - v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
1464     - V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1465     + if (v3d->ver < 41) {
1466     + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
1467     + seq_printf(m, "%s (0x%04x): 0x%08x\n",
1468     + v3d_gca_reg_defs[i].name,
1469     + v3d_gca_reg_defs[i].reg,
1470     + V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
1471     + }
1472     }
1473    
1474     for (core = 0; core < v3d->cores; core++) {
1475     diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1476     index 3dfb4cf2f8c9..48692adbe811 100644
1477     --- a/drivers/infiniband/hw/hfi1/verbs.c
1478     +++ b/drivers/infiniband/hw/hfi1/verbs.c
1479     @@ -1141,6 +1141,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1480    
1481     if (slen > len)
1482     slen = len;
1483     + if (slen > ss->sge.sge_length)
1484     + slen = ss->sge.sge_length;
1485     rvt_update_sge(ss, slen, false);
1486     seg_pio_copy_mid(pbuf, addr, slen);
1487     len -= slen;
1488     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1489     index a94b6494e71a..f322a1768fbb 100644
1490     --- a/drivers/input/mouse/elan_i2c_core.c
1491     +++ b/drivers/input/mouse/elan_i2c_core.c
1492     @@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1493     static const struct acpi_device_id elan_acpi_id[] = {
1494     { "ELAN0000", 0 },
1495     { "ELAN0100", 0 },
1496     + { "ELAN0501", 0 },
1497     { "ELAN0600", 0 },
1498     { "ELAN0602", 0 },
1499     { "ELAN0605", 0 },
1500     diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1501     index 3232af5dcf89..a7ace07e179e 100644
1502     --- a/drivers/input/touchscreen/atmel_mxt_ts.c
1503     +++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1504     @@ -1586,10 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
1505     /* T7 config may have changed */
1506     mxt_init_t7_power_cfg(data);
1507    
1508     -release_raw:
1509     - kfree(cfg.raw);
1510     release_mem:
1511     kfree(cfg.mem);
1512     +release_raw:
1513     + kfree(cfg.raw);
1514     return ret;
1515     }
1516    
1517     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1518     index 5059d09f3202..3e02aace38b1 100644
1519     --- a/drivers/iommu/arm-smmu-v3.c
1520     +++ b/drivers/iommu/arm-smmu-v3.c
1521     @@ -837,7 +837,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
1522     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
1523     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
1524     cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
1525     - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
1526     + /*
1527     + * Commands are written little-endian, but we want the SMMU to
1528     + * receive MSIData, and thus write it back to memory, in CPU
1529     + * byte order, so big-endian needs an extra byteswap here.
1530     + */
1531     + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
1532     + cpu_to_le32(ent->sync.msidata));
1533     cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
1534     break;
1535     default:
1536     diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
1537     index 0ff517d3c98f..a4ceb61c5b60 100644
1538     --- a/drivers/isdn/capi/kcapi.c
1539     +++ b/drivers/isdn/capi/kcapi.c
1540     @@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1541     u16 ret;
1542    
1543     if (contr == 0) {
1544     - strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1545     + strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
1546     return CAPI_NOERROR;
1547     }
1548    
1549     @@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
1550    
1551     ctr = get_capi_ctr_by_nr(contr);
1552     if (ctr && ctr->state == CAPI_CTR_RUNNING) {
1553     - strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1554     + strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
1555     ret = CAPI_NOERROR;
1556     } else
1557     ret = CAPI_REGNOTINSTALLED;
1558     diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
1559     index a537e518384b..a7ea27d2aa8e 100644
1560     --- a/drivers/media/cec/cec-adap.c
1561     +++ b/drivers/media/cec/cec-adap.c
1562     @@ -442,7 +442,7 @@ int cec_thread_func(void *_adap)
1563     (adap->needs_hpd &&
1564     (!adap->is_configured && !adap->is_configuring)) ||
1565     kthread_should_stop() ||
1566     - (!adap->transmitting &&
1567     + (!adap->transmit_in_progress &&
1568     !list_empty(&adap->transmit_queue)),
1569     msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
1570     timeout = err == 0;
1571     @@ -450,7 +450,7 @@ int cec_thread_func(void *_adap)
1572     /* Otherwise we just wait for something to happen. */
1573     wait_event_interruptible(adap->kthread_waitq,
1574     kthread_should_stop() ||
1575     - (!adap->transmitting &&
1576     + (!adap->transmit_in_progress &&
1577     !list_empty(&adap->transmit_queue)));
1578     }
1579    
1580     @@ -475,6 +475,7 @@ int cec_thread_func(void *_adap)
1581     pr_warn("cec-%s: message %*ph timed out\n", adap->name,
1582     adap->transmitting->msg.len,
1583     adap->transmitting->msg.msg);
1584     + adap->transmit_in_progress = false;
1585     adap->tx_timeouts++;
1586     /* Just give up on this. */
1587     cec_data_cancel(adap->transmitting,
1588     @@ -486,7 +487,7 @@ int cec_thread_func(void *_adap)
1589     * If we are still transmitting, or there is nothing new to
1590     * transmit, then just continue waiting.
1591     */
1592     - if (adap->transmitting || list_empty(&adap->transmit_queue))
1593     + if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
1594     goto unlock;
1595    
1596     /* Get a new message to transmit */
1597     @@ -532,6 +533,8 @@ int cec_thread_func(void *_adap)
1598     if (adap->ops->adap_transmit(adap, data->attempts,
1599     signal_free_time, &data->msg))
1600     cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
1601     + else
1602     + adap->transmit_in_progress = true;
1603    
1604     unlock:
1605     mutex_unlock(&adap->lock);
1606     @@ -562,14 +565,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
1607     data = adap->transmitting;
1608     if (!data) {
1609     /*
1610     - * This can happen if a transmit was issued and the cable is
1611     + * This might happen if a transmit was issued and the cable is
1612     * unplugged while the transmit is ongoing. Ignore this
1613     * transmit in that case.
1614     */
1615     - dprintk(1, "%s was called without an ongoing transmit!\n",
1616     - __func__);
1617     - goto unlock;
1618     + if (!adap->transmit_in_progress)
1619     + dprintk(1, "%s was called without an ongoing transmit!\n",
1620     + __func__);
1621     + adap->transmit_in_progress = false;
1622     + goto wake_thread;
1623     }
1624     + adap->transmit_in_progress = false;
1625    
1626     msg = &data->msg;
1627    
1628     @@ -635,7 +641,6 @@ wake_thread:
1629     * for transmitting or to retry the current message.
1630     */
1631     wake_up_interruptible(&adap->kthread_waitq);
1632     -unlock:
1633     mutex_unlock(&adap->lock);
1634     }
1635     EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
1636     @@ -1483,8 +1488,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1637     if (adap->monitor_all_cnt)
1638     WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1639     mutex_lock(&adap->devnode.lock);
1640     - if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1641     + if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
1642     WARN_ON(adap->ops->adap_enable(adap, false));
1643     + adap->transmit_in_progress = false;
1644     + wake_up_interruptible(&adap->kthread_waitq);
1645     + }
1646     mutex_unlock(&adap->devnode.lock);
1647     if (phys_addr == CEC_PHYS_ADDR_INVALID)
1648     return;
1649     @@ -1492,6 +1500,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1650    
1651     mutex_lock(&adap->devnode.lock);
1652     adap->last_initiator = 0xff;
1653     + adap->transmit_in_progress = false;
1654    
1655     if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
1656     adap->ops->adap_enable(adap, true)) {
1657     diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
1658     index 6e311424f0dc..0496d93b2b8f 100644
1659     --- a/drivers/media/cec/cec-pin.c
1660     +++ b/drivers/media/cec/cec-pin.c
1661     @@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
1662     break;
1663     /* Was the message ACKed? */
1664     ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
1665     - if (!ack && !pin->tx_ignore_nack_until_eom &&
1666     - pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
1667     + if (!ack && (!pin->tx_ignore_nack_until_eom ||
1668     + pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
1669     + !pin->tx_post_eom) {
1670     /*
1671     * Note: the CEC spec is ambiguous regarding
1672     * what action to take when a NACK appears
1673     diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1674     index f40ab5704bf0..2036b94269af 100644
1675     --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1676     +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
1677     @@ -1738,7 +1738,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
1678     unsigned s; \
1679     \
1680     for (s = 0; s < len; s++) { \
1681     - u8 chr = font8x16[text[s] * 16 + line]; \
1682     + u8 chr = font8x16[(u8)text[s] * 16 + line]; \
1683     \
1684     if (hdiv == 2 && tpg->hflip) { \
1685     pos[3] = (chr & (0x01 << 6) ? fg : bg); \
1686     diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1687     index 16c7b20cbf61..1faa64abc74f 100644
1688     --- a/drivers/media/common/videobuf2/videobuf2-core.c
1689     +++ b/drivers/media/common/videobuf2/videobuf2-core.c
1690     @@ -800,6 +800,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
1691     memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
1692     q->memory = memory;
1693     q->waiting_for_buffers = !q->is_output;
1694     + } else if (q->memory != memory) {
1695     + dprintk(1, "memory model mismatch\n");
1696     + return -EINVAL;
1697     }
1698    
1699     num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
1700     diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
1701     index f8c70f1a34fe..8cc3bdb7f608 100644
1702     --- a/drivers/media/i2c/imx274.c
1703     +++ b/drivers/media/i2c/imx274.c
1704     @@ -636,16 +636,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
1705    
1706     static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
1707     {
1708     + unsigned int uint_val;
1709     int err;
1710    
1711     - err = regmap_read(priv->regmap, addr, (unsigned int *)val);
1712     + err = regmap_read(priv->regmap, addr, &uint_val);
1713     if (err)
1714     dev_err(&priv->client->dev,
1715     "%s : i2c read failed, addr = %x\n", __func__, addr);
1716     else
1717     dev_dbg(&priv->client->dev,
1718     "%s : addr 0x%x, val=0x%x\n", __func__,
1719     - addr, *val);
1720     + addr, uint_val);
1721     +
1722     + *val = uint_val;
1723     return err;
1724     }
1725    
1726     diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
1727     index 1599159f2574..baa7c83ee6e0 100644
1728     --- a/drivers/media/platform/vivid/vivid-vid-cap.c
1729     +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
1730     @@ -438,6 +438,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
1731     tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
1732     break;
1733     }
1734     + vfree(dev->bitmap_cap);
1735     + dev->bitmap_cap = NULL;
1736     vivid_update_quality(dev);
1737     tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
1738     dev->crop_cap = dev->src_rect;
1739     diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1740     index ca68e1d2b2f9..8b2c16dd58bd 100644
1741     --- a/drivers/media/rc/rc-main.c
1742     +++ b/drivers/media/rc/rc-main.c
1743     @@ -707,7 +707,8 @@ void rc_repeat(struct rc_dev *dev)
1744     (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
1745     };
1746    
1747     - ir_lirc_scancode_event(dev, &sc);
1748     + if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
1749     + ir_lirc_scancode_event(dev, &sc);
1750    
1751     spin_lock_irqsave(&dev->keylock, flags);
1752    
1753     @@ -747,7 +748,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
1754     .keycode = keycode
1755     };
1756    
1757     - ir_lirc_scancode_event(dev, &sc);
1758     + if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
1759     + ir_lirc_scancode_event(dev, &sc);
1760    
1761     if (new_event && dev->keypressed)
1762     ir_do_keyup(dev, false);
1763     diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
1764     index 024c751eb165..2ad2ddeaff51 100644
1765     --- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
1766     +++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
1767     @@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
1768     stream->props.u.bulk.buffersize,
1769     usb_urb_complete, stream);
1770    
1771     - stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
1772     stream->urbs_initialized++;
1773     }
1774     return 0;
1775     @@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
1776     urb->complete = usb_urb_complete;
1777     urb->pipe = usb_rcvisocpipe(stream->udev,
1778     stream->props.endpoint);
1779     - urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
1780     + urb->transfer_flags = URB_ISO_ASAP;
1781     urb->interval = stream->props.u.isoc.interval;
1782     urb->number_of_packets = stream->props.u.isoc.framesperurb;
1783     urb->transfer_buffer_length = stream->props.u.isoc.framesize *
1784     @@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
1785     if (stream->state & USB_STATE_URB_BUF) {
1786     while (stream->buf_num) {
1787     stream->buf_num--;
1788     - stream->buf_list[stream->buf_num] = NULL;
1789     + kfree(stream->buf_list[stream->buf_num]);
1790     }
1791     }
1792    
1793     diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
1794     index 57a6bb1fd3c9..8f2c5d8bd2ee 100644
1795     --- a/drivers/misc/ocxl/config.c
1796     +++ b/drivers/misc/ocxl/config.c
1797     @@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
1798     if (rc)
1799     return rc;
1800     ptr = (u32 *) &afu->name[i];
1801     - *ptr = val;
1802     + *ptr = le32_to_cpu((__force __le32) val);
1803     }
1804     afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
1805     return 0;
1806     diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
1807     index 31695a078485..646d16450066 100644
1808     --- a/drivers/misc/ocxl/link.c
1809     +++ b/drivers/misc/ocxl/link.c
1810     @@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
1811    
1812     mutex_lock(&spa->spa_lock);
1813    
1814     - pe->tid = tid;
1815     + pe->tid = cpu_to_be32(tid);
1816    
1817     /*
1818     * The barrier makes sure the PE is updated
1819     diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
1820     index c7573ccdbacd..9c90695a885f 100644
1821     --- a/drivers/mtd/nand/raw/marvell_nand.c
1822     +++ b/drivers/mtd/nand/raw/marvell_nand.c
1823     @@ -444,9 +444,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
1824     writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
1825     }
1826    
1827     -static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
1828     +static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
1829     {
1830     + u32 reg;
1831     +
1832     + reg = readl_relaxed(nfc->regs + NDSR);
1833     writel_relaxed(int_mask, nfc->regs + NDSR);
1834     +
1835     + return reg & int_mask;
1836     }
1837    
1838     static void marvell_nfc_force_byte_access(struct nand_chip *chip,
1839     @@ -613,6 +618,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
1840     static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
1841     {
1842     struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1843     + u32 pending;
1844     int ret;
1845    
1846     /* Timeout is expressed in ms */
1847     @@ -625,8 +631,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
1848     ret = wait_for_completion_timeout(&nfc->complete,
1849     msecs_to_jiffies(timeout_ms));
1850     marvell_nfc_disable_int(nfc, NDCR_RDYM);
1851     - marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
1852     - if (!ret) {
1853     + pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
1854     +
1855     + /*
1856     + * In case the interrupt was not served in the required time frame,
1857     + * check if the ISR was not served or if something went actually wrong.
1858     + */
1859     + if (ret && !pending) {
1860     dev_err(nfc->dev, "Timeout waiting for RB signal\n");
1861     return -ETIMEDOUT;
1862     }
1863     diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
1864     index 4546ac0bed4a..b1683d7a7e04 100644
1865     --- a/drivers/mtd/nand/raw/omap2.c
1866     +++ b/drivers/mtd/nand/raw/omap2.c
1867     @@ -1938,7 +1938,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
1868     case NAND_OMAP_PREFETCH_DMA:
1869     dma_cap_zero(mask);
1870     dma_cap_set(DMA_SLAVE, mask);
1871     - info->dma = dma_request_chan(dev, "rxtx");
1872     + info->dma = dma_request_chan(dev->parent, "rxtx");
1873    
1874     if (IS_ERR(info->dma)) {
1875     dev_err(dev, "DMA engine request failed\n");
1876     diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
1877     index 6cc9c929ff57..37775fc09e09 100644
1878     --- a/drivers/mtd/spi-nor/Kconfig
1879     +++ b/drivers/mtd/spi-nor/Kconfig
1880     @@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
1881    
1882     config SPI_ATMEL_QUADSPI
1883     tristate "Atmel Quad SPI Controller"
1884     - depends on ARCH_AT91 || (ARM && COMPILE_TEST)
1885     + depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
1886     depends on OF && HAS_IOMEM
1887     help
1888     This enables support for the Quad SPI controller in master mode.
1889     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1890     index 58b9744c4058..240fd36b5074 100644
1891     --- a/drivers/net/ethernet/cadence/macb_main.c
1892     +++ b/drivers/net/ethernet/cadence/macb_main.c
1893     @@ -61,7 +61,8 @@
1894     #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
1895     | MACB_BIT(ISR_RLE) \
1896     | MACB_BIT(TXERR))
1897     -#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
1898     +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
1899     + | MACB_BIT(TXUBR))
1900    
1901     /* Max length of transmit frame must be a multiple of 8 bytes */
1902     #define MACB_TX_LEN_ALIGN 8
1903     @@ -1313,6 +1314,21 @@ static void macb_hresp_error_task(unsigned long data)
1904     netif_tx_start_all_queues(dev);
1905     }
1906    
1907     +static void macb_tx_restart(struct macb_queue *queue)
1908     +{
1909     + unsigned int head = queue->tx_head;
1910     + unsigned int tail = queue->tx_tail;
1911     + struct macb *bp = queue->bp;
1912     +
1913     + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1914     + queue_writel(queue, ISR, MACB_BIT(TXUBR));
1915     +
1916     + if (head == tail)
1917     + return;
1918     +
1919     + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1920     +}
1921     +
1922     static irqreturn_t macb_interrupt(int irq, void *dev_id)
1923     {
1924     struct macb_queue *queue = dev_id;
1925     @@ -1370,6 +1386,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1926     if (status & MACB_BIT(TCOMP))
1927     macb_tx_interrupt(queue);
1928    
1929     + if (status & MACB_BIT(TXUBR))
1930     + macb_tx_restart(queue);
1931     +
1932     /* Link change detection isn't possible with RMII, so we'll
1933     * add that if/when we get our hands on a full-blown MII PHY.
1934     */
1935     diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
1936     index 525d8b89187b..91f48c078073 100644
1937     --- a/drivers/net/ethernet/ibm/ibmveth.c
1938     +++ b/drivers/net/ethernet/ibm/ibmveth.c
1939     @@ -1172,11 +1172,15 @@ out:
1940    
1941     map_failed_frags:
1942     last = i+1;
1943     - for (i = 0; i < last; i++)
1944     + for (i = 1; i < last; i++)
1945     dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1946     descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1947     DMA_TO_DEVICE);
1948    
1949     + dma_unmap_single(&adapter->vdev->dev,
1950     + descs[0].fields.address,
1951     + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1952     + DMA_TO_DEVICE);
1953     map_failed:
1954     if (!firmware_has_feature(FW_FEATURE_CMO))
1955     netdev_err(netdev, "tx: unable to map xmit buffer\n");
1956     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1957     index b4ed7d394d07..a78a39244b79 100644
1958     --- a/drivers/net/ethernet/marvell/mvneta.c
1959     +++ b/drivers/net/ethernet/marvell/mvneta.c
1960     @@ -406,7 +406,6 @@ struct mvneta_port {
1961     struct mvneta_pcpu_stats __percpu *stats;
1962    
1963     int pkt_size;
1964     - unsigned int frag_size;
1965     void __iomem *base;
1966     struct mvneta_rx_queue *rxqs;
1967     struct mvneta_tx_queue *txqs;
1968     @@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
1969     if (!pp->bm_priv) {
1970     /* Set Offset */
1971     mvneta_rxq_offset_set(pp, rxq, 0);
1972     - mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
1973     + mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
1974     + PAGE_SIZE :
1975     + MVNETA_RX_BUF_SIZE(pp->pkt_size));
1976     mvneta_rxq_bm_disable(pp, rxq);
1977     mvneta_rxq_fill(pp, rxq, rxq->size);
1978     } else {
1979     @@ -3749,7 +3750,6 @@ static int mvneta_open(struct net_device *dev)
1980     int ret;
1981    
1982     pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
1983     - pp->frag_size = PAGE_SIZE;
1984    
1985     ret = mvneta_setup_rxqs(pp);
1986     if (ret)
1987     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1988     index 6320e080b831..f8e4808a8317 100644
1989     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1990     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1991     @@ -4292,12 +4292,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
1992     case PHY_INTERFACE_MODE_10GKR:
1993     case PHY_INTERFACE_MODE_XAUI:
1994     case PHY_INTERFACE_MODE_NA:
1995     - phylink_set(mask, 10000baseCR_Full);
1996     - phylink_set(mask, 10000baseSR_Full);
1997     - phylink_set(mask, 10000baseLR_Full);
1998     - phylink_set(mask, 10000baseLRM_Full);
1999     - phylink_set(mask, 10000baseER_Full);
2000     - phylink_set(mask, 10000baseKR_Full);
2001     + if (port->gop_id == 0) {
2002     + phylink_set(mask, 10000baseT_Full);
2003     + phylink_set(mask, 10000baseCR_Full);
2004     + phylink_set(mask, 10000baseSR_Full);
2005     + phylink_set(mask, 10000baseLR_Full);
2006     + phylink_set(mask, 10000baseLRM_Full);
2007     + phylink_set(mask, 10000baseER_Full);
2008     + phylink_set(mask, 10000baseKR_Full);
2009     + }
2010     /* Fall-through */
2011     case PHY_INTERFACE_MODE_RGMII:
2012     case PHY_INTERFACE_MODE_RGMII_ID:
2013     @@ -4308,7 +4311,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
2014     phylink_set(mask, 10baseT_Full);
2015     phylink_set(mask, 100baseT_Half);
2016     phylink_set(mask, 100baseT_Full);
2017     - phylink_set(mask, 10000baseT_Full);
2018     /* Fall-through */
2019     case PHY_INTERFACE_MODE_1000BASEX:
2020     case PHY_INTERFACE_MODE_2500BASEX:
2021     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2022     index 98dd3e0ada72..5e5423076b03 100644
2023     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2024     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2025     @@ -1101,11 +1101,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
2026     struct ethtool_ts_info *info)
2027     {
2028     struct mlx5_core_dev *mdev = priv->mdev;
2029     - int ret;
2030     -
2031     - ret = ethtool_op_get_ts_info(priv->netdev, info);
2032     - if (ret)
2033     - return ret;
2034    
2035     info->phc_index = mlx5_clock_get_ptp_index(mdev);
2036    
2037     @@ -1113,9 +1108,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
2038     info->phc_index == -1)
2039     return 0;
2040    
2041     - info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
2042     - SOF_TIMESTAMPING_RX_HARDWARE |
2043     - SOF_TIMESTAMPING_RAW_HARDWARE;
2044     + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
2045     + SOF_TIMESTAMPING_RX_HARDWARE |
2046     + SOF_TIMESTAMPING_RAW_HARDWARE;
2047    
2048     info->tx_types = BIT(HWTSTAMP_TX_OFF) |
2049     BIT(HWTSTAMP_TX_ON);
2050     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2051     index faa84b45e20a..7365899c3ac9 100644
2052     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2053     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2054     @@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
2055     return !params->lro_en && frag_sz <= PAGE_SIZE;
2056     }
2057    
2058     +#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
2059     + MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
2060     static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
2061     struct mlx5e_params *params)
2062     {
2063     @@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
2064     if (!mlx5e_rx_is_linear_skb(mdev, params))
2065     return false;
2066    
2067     + if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
2068     + return false;
2069     +
2070     if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
2071     return true;
2072    
2073     @@ -1383,6 +1388,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
2074     struct mlx5_core_dev *mdev = c->mdev;
2075     struct mlx5_rate_limit rl = {0};
2076    
2077     + cancel_work_sync(&sq->dim.work);
2078     mlx5e_destroy_sq(mdev, sq->sqn);
2079     if (sq->rate_limit) {
2080     rl.rate = sq->rate_limit;
2081     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2082     index d543a5cff049..8262f093fec4 100644
2083     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2084     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2085     @@ -1150,7 +1150,7 @@ mpwrq_cqe_out:
2086     int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2087     {
2088     struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2089     - struct mlx5e_xdpsq *xdpsq;
2090     + struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
2091     struct mlx5_cqe64 *cqe;
2092     int work_done = 0;
2093    
2094     @@ -1161,10 +1161,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2095     work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
2096    
2097     cqe = mlx5_cqwq_get_cqe(&cq->wq);
2098     - if (!cqe)
2099     + if (!cqe) {
2100     + if (unlikely(work_done))
2101     + goto out;
2102     return 0;
2103     -
2104     - xdpsq = &rq->xdpsq;
2105     + }
2106    
2107     do {
2108     if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2109     @@ -1179,6 +1180,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2110     rq->handle_rx_cqe(rq, cqe);
2111     } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
2112    
2113     +out:
2114     if (xdpsq->doorbell) {
2115     mlx5e_xmit_xdp_doorbell(xdpsq);
2116     xdpsq->doorbell = false;
2117     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2118     index d57d51c4e658..7047cc293545 100644
2119     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2120     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2121     @@ -73,7 +73,6 @@ static const struct counter_desc sw_stats_desc[] = {
2122     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
2123     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
2124     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
2125     - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
2126     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
2127     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
2128     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
2129     @@ -194,7 +193,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
2130     s->tx_nop += sq_stats->nop;
2131     s->tx_queue_stopped += sq_stats->stopped;
2132     s->tx_queue_wake += sq_stats->wake;
2133     - s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
2134     s->tx_queue_dropped += sq_stats->dropped;
2135     s->tx_cqe_err += sq_stats->cqe_err;
2136     s->tx_recover += sq_stats->recover;
2137     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
2138     index c1064af9d54c..0ad7a165443a 100644
2139     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
2140     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
2141     @@ -86,7 +86,6 @@ struct mlx5e_sw_stats {
2142     u64 tx_recover;
2143     u64 tx_cqes;
2144     u64 tx_queue_wake;
2145     - u64 tx_udp_seg_rem;
2146     u64 tx_cqe_err;
2147     u64 tx_xdp_xmit;
2148     u64 tx_xdp_full;
2149     @@ -217,7 +216,6 @@ struct mlx5e_sq_stats {
2150     u64 csum_partial_inner;
2151     u64 added_vlan_packets;
2152     u64 nop;
2153     - u64 udp_seg_rem;
2154     #ifdef CONFIG_MLX5_EN_TLS
2155     u64 tls_ooo;
2156     u64 tls_resync_bytes;
2157     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2158     index 37d114c668b7..d181645fd968 100644
2159     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2160     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2161     @@ -432,7 +432,7 @@ static void del_sw_hw_rule(struct fs_node *node)
2162    
2163     if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
2164     --fte->dests_size) {
2165     - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
2166     + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
2167     update_fte = true;
2168     }
2169     out:
2170     diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
2171     index 30f751e69698..f7154f358f27 100644
2172     --- a/drivers/net/ethernet/mellanox/mlxsw/core.c
2173     +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
2174     @@ -81,6 +81,7 @@ struct mlxsw_core {
2175     struct mlxsw_core_port *ports;
2176     unsigned int max_ports;
2177     bool reload_fail;
2178     + bool fw_flash_in_progress;
2179     unsigned long driver_priv[0];
2180     /* driver_priv has to be always the last item */
2181     };
2182     @@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
2183     struct rcu_head rcu;
2184     };
2185    
2186     -#define MLXSW_EMAD_TIMEOUT_MS 200
2187     +#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
2188     +#define MLXSW_EMAD_TIMEOUT_MS 200
2189    
2190     static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
2191     {
2192     unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
2193    
2194     + if (trans->core->fw_flash_in_progress)
2195     + timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
2196     +
2197     queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
2198     }
2199    
2200     @@ -1854,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
2201     }
2202     EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
2203    
2204     +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
2205     +{
2206     + mlxsw_core->fw_flash_in_progress = true;
2207     +}
2208     +EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
2209     +
2210     +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
2211     +{
2212     + mlxsw_core->fw_flash_in_progress = false;
2213     +}
2214     +EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
2215     +
2216     static int __init mlxsw_core_module_init(void)
2217     {
2218     int err;
2219     diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
2220     index c35be477856f..c4e4971764e5 100644
2221     --- a/drivers/net/ethernet/mellanox/mlxsw/core.h
2222     +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
2223     @@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
2224     u64 *p_single_size, u64 *p_double_size,
2225     u64 *p_linear_size);
2226    
2227     +void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
2228     +void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
2229     +
2230     bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
2231     enum mlxsw_res_id res_id);
2232    
2233     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2234     index ada644d90029..3d1159f8a53f 100644
2235     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2236     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2237     @@ -308,8 +308,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
2238     },
2239     .mlxsw_sp = mlxsw_sp
2240     };
2241     + int err;
2242     +
2243     + mlxsw_core_fw_flash_start(mlxsw_sp->core);
2244     + err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
2245     + mlxsw_core_fw_flash_end(mlxsw_sp->core);
2246    
2247     - return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
2248     + return err;
2249     }
2250    
2251     static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
2252     diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
2253     index ed4e298cd823..0bdd3c400c92 100644
2254     --- a/drivers/net/ethernet/mscc/ocelot.c
2255     +++ b/drivers/net/ethernet/mscc/ocelot.c
2256     @@ -733,7 +733,7 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2257     }
2258    
2259     return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
2260     - ENTRYTYPE_NORMAL);
2261     + ENTRYTYPE_LOCKED);
2262     }
2263    
2264     static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
2265     diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
2266     index bd19624f10cf..90148dbb261b 100644
2267     --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
2268     +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
2269     @@ -375,13 +375,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
2270     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
2271     return -EOPNOTSUPP;
2272    
2273     - /* We need to store TCP flags in the IPv4 key space, thus
2274     - * we need to ensure we include a IPv4 key layer if we have
2275     - * not done so already.
2276     + /* We need to store TCP flags in the either the IPv4 or IPv6 key
2277     + * space, thus we need to ensure we include a IPv4/IPv6 key
2278     + * layer if we have not done so already.
2279     */
2280     - if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
2281     - key_layer |= NFP_FLOWER_LAYER_IPV4;
2282     - key_size += sizeof(struct nfp_flower_ipv4);
2283     + if (!key_basic)
2284     + return -EOPNOTSUPP;
2285     +
2286     + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
2287     + !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
2288     + switch (key_basic->n_proto) {
2289     + case cpu_to_be16(ETH_P_IP):
2290     + key_layer |= NFP_FLOWER_LAYER_IPV4;
2291     + key_size += sizeof(struct nfp_flower_ipv4);
2292     + break;
2293     +
2294     + case cpu_to_be16(ETH_P_IPV6):
2295     + key_layer |= NFP_FLOWER_LAYER_IPV6;
2296     + key_size += sizeof(struct nfp_flower_ipv6);
2297     + break;
2298     +
2299     + default:
2300     + return -EOPNOTSUPP;
2301     + }
2302     }
2303     }
2304    
2305     diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
2306     index a71382687ef2..bed8f48e029a 100644
2307     --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
2308     +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
2309     @@ -12669,8 +12669,9 @@ enum MFW_DRV_MSG_TYPE {
2310     MFW_DRV_MSG_BW_UPDATE10,
2311     MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
2312     MFW_DRV_MSG_BW_UPDATE11,
2313     - MFW_DRV_MSG_OEM_CFG_UPDATE,
2314     + MFW_DRV_MSG_RESERVED,
2315     MFW_DRV_MSG_GET_TLV_REQ,
2316     + MFW_DRV_MSG_OEM_CFG_UPDATE,
2317     MFW_DRV_MSG_MAX
2318     };
2319    
2320     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2321     index 4930e0375c1d..9fc8a2bc0ff1 100644
2322     --- a/drivers/net/ethernet/realtek/r8169.c
2323     +++ b/drivers/net/ethernet/realtek/r8169.c
2324     @@ -1528,6 +1528,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
2325     }
2326    
2327     RTL_W8(tp, Cfg9346, Cfg9346_Lock);
2328     +
2329     + device_set_wakeup_enable(tp_to_dev(tp), wolopts);
2330     }
2331    
2332     static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2333     @@ -1549,8 +1551,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2334    
2335     rtl_unlock_work(tp);
2336    
2337     - device_set_wakeup_enable(d, tp->saved_wolopts);
2338     -
2339     pm_runtime_put_noidle(d);
2340    
2341     return 0;
2342     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2343     index 99ea5c4ce29c..2103b865726a 100644
2344     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2345     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2346     @@ -4247,6 +4247,7 @@ int stmmac_dvr_probe(struct device *device,
2347     priv->wq = create_singlethread_workqueue("stmmac_wq");
2348     if (!priv->wq) {
2349     dev_err(priv->device, "failed to create workqueue\n");
2350     + ret = -ENOMEM;
2351     goto error_wq;
2352     }
2353    
2354     diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
2355     index d79a69dd2146..54e63ec04907 100644
2356     --- a/drivers/net/hamradio/6pack.c
2357     +++ b/drivers/net/hamradio/6pack.c
2358     @@ -524,10 +524,7 @@ static void resync_tnc(struct timer_list *t)
2359    
2360    
2361     /* Start resync timer again -- the TNC might be still absent */
2362     -
2363     - del_timer(&sp->resync_t);
2364     - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2365     - add_timer(&sp->resync_t);
2366     + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2367     }
2368    
2369     static inline int tnc_init(struct sixpack *sp)
2370     @@ -538,9 +535,7 @@ static inline int tnc_init(struct sixpack *sp)
2371    
2372     sp->tty->ops->write(sp->tty, &inbyte, 1);
2373    
2374     - del_timer(&sp->resync_t);
2375     - sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
2376     - add_timer(&sp->resync_t);
2377     + mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
2378    
2379     return 0;
2380     }
2381     @@ -918,11 +913,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
2382     /* if the state byte has been received, the TNC is present,
2383     so the resync timer can be reset. */
2384    
2385     - if (sp->tnc_state == TNC_IN_SYNC) {
2386     - del_timer(&sp->resync_t);
2387     - sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
2388     - add_timer(&sp->resync_t);
2389     - }
2390     + if (sp->tnc_state == TNC_IN_SYNC)
2391     + mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
2392    
2393     sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
2394     }
2395     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2396     index 20d1be2b070b..2c32c795f5dd 100644
2397     --- a/drivers/net/phy/phy_device.c
2398     +++ b/drivers/net/phy/phy_device.c
2399     @@ -164,11 +164,8 @@ static int mdio_bus_phy_restore(struct device *dev)
2400     if (ret < 0)
2401     return ret;
2402    
2403     - /* The PHY needs to renegotiate. */
2404     - phydev->link = 0;
2405     - phydev->state = PHY_UP;
2406     -
2407     - phy_start_machine(phydev);
2408     + if (phydev->attached_dev && phydev->adjust_link)
2409     + phy_start_machine(phydev);
2410    
2411     return 0;
2412     }
2413     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2414     index 72a55b6b4211..f5bac5075386 100644
2415     --- a/drivers/net/usb/qmi_wwan.c
2416     +++ b/drivers/net/usb/qmi_wwan.c
2417     @@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
2418     {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
2419     {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
2420     {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
2421     + {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
2422     {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
2423     {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
2424     {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
2425     @@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
2426     {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
2427     {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
2428     {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
2429     + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
2430     {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
2431     {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
2432     {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
2433     @@ -1263,6 +1265,7 @@ static const struct usb_device_id products[] = {
2434     {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
2435     {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
2436     {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
2437     + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
2438    
2439     /* 4. Gobi 1000 devices */
2440     {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
2441     diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
2442     index 74c06a5f586f..4f25c2d8fff0 100644
2443     --- a/drivers/net/wan/x25_asy.c
2444     +++ b/drivers/net/wan/x25_asy.c
2445     @@ -486,8 +486,10 @@ static int x25_asy_open(struct net_device *dev)
2446    
2447     /* Cleanup */
2448     kfree(sl->xbuff);
2449     + sl->xbuff = NULL;
2450     noxbuff:
2451     kfree(sl->rbuff);
2452     + sl->rbuff = NULL;
2453     norbuff:
2454     return -ENOMEM;
2455     }
2456     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2457     index 64a794be7fcb..6f3faaf1b1cb 100644
2458     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2459     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2460     @@ -5188,10 +5188,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
2461     .del_pmk = brcmf_cfg80211_del_pmk,
2462     };
2463    
2464     -struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
2465     +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
2466     {
2467     - return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2468     + struct cfg80211_ops *ops;
2469     +
2470     + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
2471     GFP_KERNEL);
2472     +
2473     + if (ops && settings->roamoff)
2474     + ops->update_connect_params = NULL;
2475     +
2476     + return ops;
2477     }
2478    
2479     struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2480     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2481     index a4aec0004e4f..9a6287f084a9 100644
2482     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2483     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
2484     @@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
2485     void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
2486     s32 brcmf_cfg80211_up(struct net_device *ndev);
2487     s32 brcmf_cfg80211_down(struct net_device *ndev);
2488     -struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
2489     +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
2490     enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
2491    
2492     struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
2493     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2494     index b1f702faff4f..860a4372cb56 100644
2495     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2496     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
2497     @@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
2498    
2499     brcmf_dbg(TRACE, "Enter\n");
2500    
2501     - ops = brcmf_cfg80211_get_ops();
2502     + ops = brcmf_cfg80211_get_ops(settings);
2503     if (!ops)
2504     return -ENOMEM;
2505    
2506     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2507     index 9095b830ae4d..9927079a9ace 100644
2508     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2509     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
2510     @@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2511     struct brcmf_fw_request *fwreq;
2512     char chipname[12];
2513     const char *mp_path;
2514     + size_t mp_path_len;
2515     u32 i, j;
2516     - char end;
2517     + char end = '\0';
2518     size_t reqsz;
2519    
2520     for (i = 0; i < table_size; i++) {
2521     @@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
2522     mapping_table[i].fw_base, chipname);
2523    
2524     mp_path = brcmf_mp_global.firmware_path;
2525     - end = mp_path[strlen(mp_path) - 1];
2526     + mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
2527     + if (mp_path_len)
2528     + end = mp_path[mp_path_len - 1];
2529     +
2530     fwreq->n_items = n_fwnames;
2531    
2532     for (j = 0; j < n_fwnames; j++) {
2533     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2534     index f17f602e6171..5b97cc946d70 100644
2535     --- a/drivers/net/xen-netfront.c
2536     +++ b/drivers/net/xen-netfront.c
2537     @@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
2538     if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
2539     unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
2540    
2541     - BUG_ON(pull_to <= skb_headlen(skb));
2542     + BUG_ON(pull_to < skb_headlen(skb));
2543     __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
2544     }
2545     if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
2546     diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
2547     index ad03e2f12f5d..5808a1e4c2e9 100644
2548     --- a/drivers/rtc/rtc-m41t80.c
2549     +++ b/drivers/rtc/rtc-m41t80.c
2550     @@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
2551     alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
2552     alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
2553     alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
2554     - alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
2555     + alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
2556    
2557     alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
2558     alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
2559     diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
2560     index f35cc10772f6..25abf2d1732a 100644
2561     --- a/drivers/spi/spi-bcm2835.c
2562     +++ b/drivers/spi/spi-bcm2835.c
2563     @@ -88,7 +88,7 @@ struct bcm2835_spi {
2564     u8 *rx_buf;
2565     int tx_len;
2566     int rx_len;
2567     - bool dma_pending;
2568     + unsigned int dma_pending;
2569     };
2570    
2571     static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
2572     @@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
2573     /* Write as many bytes as possible to FIFO */
2574     bcm2835_wr_fifo(bs);
2575    
2576     - /* based on flags decide if we can finish the transfer */
2577     - if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
2578     + if (!bs->rx_len) {
2579     /* Transfer complete - reset SPI HW */
2580     bcm2835_spi_reset_hw(master);
2581     /* wake up the framework */
2582     @@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
2583     * is called the tx-dma must have finished - can't get to this
2584     * situation otherwise...
2585     */
2586     - dmaengine_terminate_all(master->dma_tx);
2587     -
2588     - /* mark as no longer pending */
2589     - bs->dma_pending = 0;
2590     + if (cmpxchg(&bs->dma_pending, true, false)) {
2591     + dmaengine_terminate_all(master->dma_tx);
2592     + }
2593    
2594     /* and mark as completed */;
2595     complete(&master->xfer_completion);
2596     @@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
2597     if (ret) {
2598     /* need to reset on errors */
2599     dmaengine_terminate_all(master->dma_tx);
2600     + bs->dma_pending = false;
2601     bcm2835_spi_reset_hw(master);
2602     return ret;
2603     }
2604     @@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
2605     struct bcm2835_spi *bs = spi_master_get_devdata(master);
2606    
2607     /* if an error occurred and we have an active dma, then terminate */
2608     - if (bs->dma_pending) {
2609     + if (cmpxchg(&bs->dma_pending, true, false)) {
2610     dmaengine_terminate_all(master->dma_tx);
2611     dmaengine_terminate_all(master->dma_rx);
2612     - bs->dma_pending = 0;
2613     }
2614     /* and reset */
2615     bcm2835_spi_reset_hw(master);
2616     diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
2617     index b2080d8b801f..e52c3bdeaf04 100644
2618     --- a/drivers/staging/wilc1000/wilc_sdio.c
2619     +++ b/drivers/staging/wilc1000/wilc_sdio.c
2620     @@ -831,6 +831,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
2621     if (!g_sdio.irq_gpio) {
2622     int i;
2623    
2624     + cmd.read_write = 0;
2625     cmd.function = 1;
2626     cmd.address = 0x04;
2627     cmd.data = 0;
2628     diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
2629     index a48f19b1b88f..6ed74735b58c 100644
2630     --- a/drivers/tty/serial/xilinx_uartps.c
2631     +++ b/drivers/tty/serial/xilinx_uartps.c
2632     @@ -125,7 +125,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
2633     #define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
2634     #define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
2635     #define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
2636     -#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
2637     +#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
2638    
2639     /*
2640     * Do not enable parity error interrupt for the following
2641     @@ -362,7 +362,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
2642     cdns_uart_handle_tx(dev_id);
2643     isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
2644     }
2645     - if (isrstatus & CDNS_UART_IXR_MASK)
2646     + if (isrstatus & CDNS_UART_IXR_RXMASK)
2647     cdns_uart_handle_rx(dev_id, isrstatus);
2648    
2649     spin_unlock(&port->lock);
2650     diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
2651     index 987fc5ba6321..70e6c956c23c 100644
2652     --- a/drivers/usb/Kconfig
2653     +++ b/drivers/usb/Kconfig
2654     @@ -205,8 +205,4 @@ config USB_ULPI_BUS
2655     To compile this driver as a module, choose M here: the module will
2656     be called ulpi.
2657    
2658     -config USB_ROLE_SWITCH
2659     - tristate
2660     - select USB_COMMON
2661     -
2662     endif # USB_SUPPORT
2663     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2664     index 40c64c7ab5e4..2886b7b477c7 100644
2665     --- a/drivers/usb/class/cdc-acm.c
2666     +++ b/drivers/usb/class/cdc-acm.c
2667     @@ -581,6 +581,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
2668     if (retval)
2669     goto error_init_termios;
2670    
2671     + /*
2672     + * Suppress initial echoing for some devices which might send data
2673     + * immediately after acm driver has been installed.
2674     + */
2675     + if (acm->quirks & DISABLE_ECHO)
2676     + tty->termios.c_lflag &= ~ECHO;
2677     +
2678     tty->driver_data = acm;
2679    
2680     return 0;
2681     @@ -1672,6 +1679,9 @@ static const struct usb_device_id acm_ids[] = {
2682     { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
2683     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2684     },
2685     + { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
2686     + .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
2687     + },
2688     { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
2689     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2690     },
2691     diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
2692     index ca06b20d7af9..515aad0847ee 100644
2693     --- a/drivers/usb/class/cdc-acm.h
2694     +++ b/drivers/usb/class/cdc-acm.h
2695     @@ -140,3 +140,4 @@ struct acm {
2696     #define QUIRK_CONTROL_LINE_STATE BIT(6)
2697     #define CLEAR_HALT_CONDITIONS BIT(7)
2698     #define SEND_ZERO_PACKET BIT(8)
2699     +#define DISABLE_ECHO BIT(9)
2700     diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
2701     index fb4d5ef4165c..0a7c45e85481 100644
2702     --- a/drivers/usb/common/Makefile
2703     +++ b/drivers/usb/common/Makefile
2704     @@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o
2705    
2706     obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
2707     obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
2708     -obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
2709     diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
2710     deleted file mode 100644
2711     index 99116af07f1d..000000000000
2712     --- a/drivers/usb/common/roles.c
2713     +++ /dev/null
2714     @@ -1,314 +0,0 @@
2715     -// SPDX-License-Identifier: GPL-2.0
2716     -/*
2717     - * USB Role Switch Support
2718     - *
2719     - * Copyright (C) 2018 Intel Corporation
2720     - * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
2721     - * Hans de Goede <hdegoede@redhat.com>
2722     - */
2723     -
2724     -#include <linux/usb/role.h>
2725     -#include <linux/device.h>
2726     -#include <linux/module.h>
2727     -#include <linux/mutex.h>
2728     -#include <linux/slab.h>
2729     -
2730     -static struct class *role_class;
2731     -
2732     -struct usb_role_switch {
2733     - struct device dev;
2734     - struct mutex lock; /* device lock*/
2735     - enum usb_role role;
2736     -
2737     - /* From descriptor */
2738     - struct device *usb2_port;
2739     - struct device *usb3_port;
2740     - struct device *udc;
2741     - usb_role_switch_set_t set;
2742     - usb_role_switch_get_t get;
2743     - bool allow_userspace_control;
2744     -};
2745     -
2746     -#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
2747     -
2748     -/**
2749     - * usb_role_switch_set_role - Set USB role for a switch
2750     - * @sw: USB role switch
2751     - * @role: USB role to be switched to
2752     - *
2753     - * Set USB role @role for @sw.
2754     - */
2755     -int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
2756     -{
2757     - int ret;
2758     -
2759     - if (IS_ERR_OR_NULL(sw))
2760     - return 0;
2761     -
2762     - mutex_lock(&sw->lock);
2763     -
2764     - ret = sw->set(sw->dev.parent, role);
2765     - if (!ret)
2766     - sw->role = role;
2767     -
2768     - mutex_unlock(&sw->lock);
2769     -
2770     - return ret;
2771     -}
2772     -EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
2773     -
2774     -/**
2775     - * usb_role_switch_get_role - Get the USB role for a switch
2776     - * @sw: USB role switch
2777     - *
2778     - * Depending on the role-switch-driver this function returns either a cached
2779     - * value of the last set role, or reads back the actual value from the hardware.
2780     - */
2781     -enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
2782     -{
2783     - enum usb_role role;
2784     -
2785     - if (IS_ERR_OR_NULL(sw))
2786     - return USB_ROLE_NONE;
2787     -
2788     - mutex_lock(&sw->lock);
2789     -
2790     - if (sw->get)
2791     - role = sw->get(sw->dev.parent);
2792     - else
2793     - role = sw->role;
2794     -
2795     - mutex_unlock(&sw->lock);
2796     -
2797     - return role;
2798     -}
2799     -EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
2800     -
2801     -static int __switch_match(struct device *dev, const void *name)
2802     -{
2803     - return !strcmp((const char *)name, dev_name(dev));
2804     -}
2805     -
2806     -static void *usb_role_switch_match(struct device_connection *con, int ep,
2807     - void *data)
2808     -{
2809     - struct device *dev;
2810     -
2811     - dev = class_find_device(role_class, NULL, con->endpoint[ep],
2812     - __switch_match);
2813     -
2814     - return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
2815     -}
2816     -
2817     -/**
2818     - * usb_role_switch_get - Find USB role switch linked with the caller
2819     - * @dev: The caller device
2820     - *
2821     - * Finds and returns role switch linked with @dev. The reference count for the
2822     - * found switch is incremented.
2823     - */
2824     -struct usb_role_switch *usb_role_switch_get(struct device *dev)
2825     -{
2826     - struct usb_role_switch *sw;
2827     -
2828     - sw = device_connection_find_match(dev, "usb-role-switch", NULL,
2829     - usb_role_switch_match);
2830     -
2831     - if (!IS_ERR_OR_NULL(sw))
2832     - WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
2833     -
2834     - return sw;
2835     -}
2836     -EXPORT_SYMBOL_GPL(usb_role_switch_get);
2837     -
2838     -/**
2839     - * usb_role_switch_put - Release handle to a switch
2840     - * @sw: USB Role Switch
2841     - *
2842     - * Decrement reference count for @sw.
2843     - */
2844     -void usb_role_switch_put(struct usb_role_switch *sw)
2845     -{
2846     - if (!IS_ERR_OR_NULL(sw)) {
2847     - put_device(&sw->dev);
2848     - module_put(sw->dev.parent->driver->owner);
2849     - }
2850     -}
2851     -EXPORT_SYMBOL_GPL(usb_role_switch_put);
2852     -
2853     -static umode_t
2854     -usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
2855     -{
2856     - struct device *dev = container_of(kobj, typeof(*dev), kobj);
2857     - struct usb_role_switch *sw = to_role_switch(dev);
2858     -
2859     - if (sw->allow_userspace_control)
2860     - return attr->mode;
2861     -
2862     - return 0;
2863     -}
2864     -
2865     -static const char * const usb_roles[] = {
2866     - [USB_ROLE_NONE] = "none",
2867     - [USB_ROLE_HOST] = "host",
2868     - [USB_ROLE_DEVICE] = "device",
2869     -};
2870     -
2871     -static ssize_t
2872     -role_show(struct device *dev, struct device_attribute *attr, char *buf)
2873     -{
2874     - struct usb_role_switch *sw = to_role_switch(dev);
2875     - enum usb_role role = usb_role_switch_get_role(sw);
2876     -
2877     - return sprintf(buf, "%s\n", usb_roles[role]);
2878     -}
2879     -
2880     -static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2881     - const char *buf, size_t size)
2882     -{
2883     - struct usb_role_switch *sw = to_role_switch(dev);
2884     - int ret;
2885     -
2886     - ret = sysfs_match_string(usb_roles, buf);
2887     - if (ret < 0) {
2888     - bool res;
2889     -
2890     - /* Extra check if the user wants to disable the switch */
2891     - ret = kstrtobool(buf, &res);
2892     - if (ret || res)
2893     - return -EINVAL;
2894     - }
2895     -
2896     - ret = usb_role_switch_set_role(sw, ret);
2897     - if (ret)
2898     - return ret;
2899     -
2900     - return size;
2901     -}
2902     -static DEVICE_ATTR_RW(role);
2903     -
2904     -static struct attribute *usb_role_switch_attrs[] = {
2905     - &dev_attr_role.attr,
2906     - NULL,
2907     -};
2908     -
2909     -static const struct attribute_group usb_role_switch_group = {
2910     - .is_visible = usb_role_switch_is_visible,
2911     - .attrs = usb_role_switch_attrs,
2912     -};
2913     -
2914     -static const struct attribute_group *usb_role_switch_groups[] = {
2915     - &usb_role_switch_group,
2916     - NULL,
2917     -};
2918     -
2919     -static int
2920     -usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2921     -{
2922     - int ret;
2923     -
2924     - ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
2925     - if (ret)
2926     - dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
2927     -
2928     - return ret;
2929     -}
2930     -
2931     -static void usb_role_switch_release(struct device *dev)
2932     -{
2933     - struct usb_role_switch *sw = to_role_switch(dev);
2934     -
2935     - kfree(sw);
2936     -}
2937     -
2938     -static const struct device_type usb_role_dev_type = {
2939     - .name = "usb_role_switch",
2940     - .groups = usb_role_switch_groups,
2941     - .uevent = usb_role_switch_uevent,
2942     - .release = usb_role_switch_release,
2943     -};
2944     -
2945     -/**
2946     - * usb_role_switch_register - Register USB Role Switch
2947     - * @parent: Parent device for the switch
2948     - * @desc: Description of the switch
2949     - *
2950     - * USB Role Switch is a device capable or choosing the role for USB connector.
2951     - * On platforms where the USB controller is dual-role capable, the controller
2952     - * driver will need to register the switch. On platforms where the USB host and
2953     - * USB device controllers behind the connector are separate, there will be a
2954     - * mux, and the driver for that mux will need to register the switch.
2955     - *
2956     - * Returns handle to a new role switch or ERR_PTR. The content of @desc is
2957     - * copied.
2958     - */
2959     -struct usb_role_switch *
2960     -usb_role_switch_register(struct device *parent,
2961     - const struct usb_role_switch_desc *desc)
2962     -{
2963     - struct usb_role_switch *sw;
2964     - int ret;
2965     -
2966     - if (!desc || !desc->set)
2967     - return ERR_PTR(-EINVAL);
2968     -
2969     - sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2970     - if (!sw)
2971     - return ERR_PTR(-ENOMEM);
2972     -
2973     - mutex_init(&sw->lock);
2974     -
2975     - sw->allow_userspace_control = desc->allow_userspace_control;
2976     - sw->usb2_port = desc->usb2_port;
2977     - sw->usb3_port = desc->usb3_port;
2978     - sw->udc = desc->udc;
2979     - sw->set = desc->set;
2980     - sw->get = desc->get;
2981     -
2982     - sw->dev.parent = parent;
2983     - sw->dev.class = role_class;
2984     - sw->dev.type = &usb_role_dev_type;
2985     - dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
2986     -
2987     - ret = device_register(&sw->dev);
2988     - if (ret) {
2989     - put_device(&sw->dev);
2990     - return ERR_PTR(ret);
2991     - }
2992     -
2993     - /* TODO: Symlinks for the host port and the device controller. */
2994     -
2995     - return sw;
2996     -}
2997     -EXPORT_SYMBOL_GPL(usb_role_switch_register);
2998     -
2999     -/**
3000     - * usb_role_switch_unregister - Unregsiter USB Role Switch
3001     - * @sw: USB Role Switch
3002     - *
3003     - * Unregister switch that was registered with usb_role_switch_register().
3004     - */
3005     -void usb_role_switch_unregister(struct usb_role_switch *sw)
3006     -{
3007     - if (!IS_ERR_OR_NULL(sw))
3008     - device_unregister(&sw->dev);
3009     -}
3010     -EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
3011     -
3012     -static int __init usb_roles_init(void)
3013     -{
3014     - role_class = class_create(THIS_MODULE, "usb_role");
3015     - return PTR_ERR_OR_ZERO(role_class);
3016     -}
3017     -subsys_initcall(usb_roles_init);
3018     -
3019     -static void __exit usb_roles_exit(void)
3020     -{
3021     - class_destroy(role_class);
3022     -}
3023     -module_exit(usb_roles_exit);
3024     -
3025     -MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
3026     -MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
3027     -MODULE_LICENSE("GPL v2");
3028     -MODULE_DESCRIPTION("USB Role Class");
3029     diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
3030     index 3f9bccc95add..c089ffa1f0a8 100644
3031     --- a/drivers/usb/dwc2/hcd.h
3032     +++ b/drivers/usb/dwc2/hcd.h
3033     @@ -366,7 +366,7 @@ struct dwc2_qh {
3034     u32 desc_list_sz;
3035     u32 *n_bytes;
3036     struct timer_list unreserve_timer;
3037     - struct timer_list wait_timer;
3038     + struct hrtimer wait_timer;
3039     struct dwc2_tt *dwc_tt;
3040     int ttport;
3041     unsigned tt_buffer_dirty:1;
3042     diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
3043     index 40839591d2ec..ea3aa640c15c 100644
3044     --- a/drivers/usb/dwc2/hcd_queue.c
3045     +++ b/drivers/usb/dwc2/hcd_queue.c
3046     @@ -59,7 +59,7 @@
3047     #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
3048    
3049     /* If we get a NAK, wait this long before retrying */
3050     -#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
3051     +#define DWC2_RETRY_WAIT_DELAY 1*1E6L
3052    
3053     /**
3054     * dwc2_periodic_channel_available() - Checks that a channel is available for a
3055     @@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
3056     * qh back to the "inactive" list, then queues transactions.
3057     *
3058     * @t: Pointer to wait_timer in a qh.
3059     + *
3060     + * Return: HRTIMER_NORESTART to not automatically restart this timer.
3061     */
3062     -static void dwc2_wait_timer_fn(struct timer_list *t)
3063     +static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
3064     {
3065     - struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
3066     + struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
3067     struct dwc2_hsotg *hsotg = qh->hsotg;
3068     unsigned long flags;
3069    
3070     @@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t)
3071     }
3072    
3073     spin_unlock_irqrestore(&hsotg->lock, flags);
3074     + return HRTIMER_NORESTART;
3075     }
3076    
3077     /**
3078     @@ -1521,7 +1524,8 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
3079     /* Initialize QH */
3080     qh->hsotg = hsotg;
3081     timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
3082     - timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
3083     + hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3084     + qh->wait_timer.function = &dwc2_wait_timer_fn;
3085     qh->ep_type = ep_type;
3086     qh->ep_is_in = ep_is_in;
3087    
3088     @@ -1690,7 +1694,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3089     * won't do anything anyway, but we want it to finish before we free
3090     * memory.
3091     */
3092     - del_timer_sync(&qh->wait_timer);
3093     + hrtimer_cancel(&qh->wait_timer);
3094    
3095     dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
3096    
3097     @@ -1716,6 +1720,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3098     {
3099     int status;
3100     u32 intr_mask;
3101     + ktime_t delay;
3102    
3103     if (dbg_qh(qh))
3104     dev_vdbg(hsotg->dev, "%s()\n", __func__);
3105     @@ -1734,8 +1739,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
3106     list_add_tail(&qh->qh_list_entry,
3107     &hsotg->non_periodic_sched_waiting);
3108     qh->wait_timer_cancel = false;
3109     - mod_timer(&qh->wait_timer,
3110     - jiffies + DWC2_RETRY_WAIT_DELAY + 1);
3111     + delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
3112     + hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
3113     } else {
3114     list_add_tail(&qh->qh_list_entry,
3115     &hsotg->non_periodic_sched_inactive);
3116     diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
3117     index bf7052e037d6..ef2c199e6059 100644
3118     --- a/drivers/usb/dwc2/params.c
3119     +++ b/drivers/usb/dwc2/params.c
3120     @@ -110,6 +110,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
3121     p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
3122     p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
3123     GAHBCFG_HBSTLEN_SHIFT;
3124     + p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
3125     }
3126    
3127     static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
3128     diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3129     index 842795856bf4..fdc6e4e403e8 100644
3130     --- a/drivers/usb/dwc3/dwc3-pci.c
3131     +++ b/drivers/usb/dwc3/dwc3-pci.c
3132     @@ -170,20 +170,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
3133     * put the gpio descriptors again here because the phy driver
3134     * might want to grab them, too.
3135     */
3136     - gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
3137     - GPIOD_OUT_LOW);
3138     + gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
3139     if (IS_ERR(gpio))
3140     return PTR_ERR(gpio);
3141    
3142     gpiod_set_value_cansleep(gpio, 1);
3143     + gpiod_put(gpio);
3144    
3145     - gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
3146     - GPIOD_OUT_LOW);
3147     + gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
3148     if (IS_ERR(gpio))
3149     return PTR_ERR(gpio);
3150    
3151     if (gpio) {
3152     gpiod_set_value_cansleep(gpio, 1);
3153     + gpiod_put(gpio);
3154     usleep_range(10000, 11000);
3155     }
3156     }
3157     diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
3158     index 984892dd72f5..42668aeca57c 100644
3159     --- a/drivers/usb/host/r8a66597-hcd.c
3160     +++ b/drivers/usb/host/r8a66597-hcd.c
3161     @@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
3162    
3163     static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3164     struct usb_host_endpoint *hep)
3165     +__acquires(r8a66597->lock)
3166     +__releases(r8a66597->lock)
3167     {
3168     struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
3169     struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
3170     @@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
3171     return;
3172     pipenum = pipe->info.pipenum;
3173    
3174     + spin_lock_irqsave(&r8a66597->lock, flags);
3175     if (pipenum == 0) {
3176     kfree(hep->hcpriv);
3177     hep->hcpriv = NULL;
3178     + spin_unlock_irqrestore(&r8a66597->lock, flags);
3179     return;
3180     }
3181    
3182     - spin_lock_irqsave(&r8a66597->lock, flags);
3183     pipe_stop(r8a66597, pipe);
3184     pipe_irq_disable(r8a66597, pipenum);
3185     disable_irq_empty(r8a66597, pipenum);
3186     diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
3187     index f5a5e6f79f1b..e4194ac94510 100644
3188     --- a/drivers/usb/roles/Kconfig
3189     +++ b/drivers/usb/roles/Kconfig
3190     @@ -1,3 +1,16 @@
3191     +config USB_ROLE_SWITCH
3192     + tristate "USB Role Switch Support"
3193     + help
3194     + USB Role Switch is a device that can select the USB role - host or
3195     + device - for a USB port (connector). In most cases dual-role capable
3196     + USB controller will also represent the switch, but on some platforms
3197     + multiplexer/demultiplexer switch is used to route the data lines on
3198     + the USB connector between separate USB host and device controllers.
3199     +
3200     + Say Y here if your USB connectors support both device and host roles.
3201     + To compile the driver as module, choose M here: the module will be
3202     + called roles.ko.
3203     +
3204     if USB_ROLE_SWITCH
3205    
3206     config USB_ROLES_INTEL_XHCI
3207     diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
3208     index e44b179ba275..c02873206fc1 100644
3209     --- a/drivers/usb/roles/Makefile
3210     +++ b/drivers/usb/roles/Makefile
3211     @@ -1 +1,3 @@
3212     -obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3213     +obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
3214     +roles-y := class.o
3215     +obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
3216     diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
3217     new file mode 100644
3218     index 000000000000..99116af07f1d
3219     --- /dev/null
3220     +++ b/drivers/usb/roles/class.c
3221     @@ -0,0 +1,314 @@
3222     +// SPDX-License-Identifier: GPL-2.0
3223     +/*
3224     + * USB Role Switch Support
3225     + *
3226     + * Copyright (C) 2018 Intel Corporation
3227     + * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
3228     + * Hans de Goede <hdegoede@redhat.com>
3229     + */
3230     +
3231     +#include <linux/usb/role.h>
3232     +#include <linux/device.h>
3233     +#include <linux/module.h>
3234     +#include <linux/mutex.h>
3235     +#include <linux/slab.h>
3236     +
3237     +static struct class *role_class;
3238     +
3239     +struct usb_role_switch {
3240     + struct device dev;
3241     + struct mutex lock; /* device lock*/
3242     + enum usb_role role;
3243     +
3244     + /* From descriptor */
3245     + struct device *usb2_port;
3246     + struct device *usb3_port;
3247     + struct device *udc;
3248     + usb_role_switch_set_t set;
3249     + usb_role_switch_get_t get;
3250     + bool allow_userspace_control;
3251     +};
3252     +
3253     +#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
3254     +
3255     +/**
3256     + * usb_role_switch_set_role - Set USB role for a switch
3257     + * @sw: USB role switch
3258     + * @role: USB role to be switched to
3259     + *
3260     + * Set USB role @role for @sw.
3261     + */
3262     +int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
3263     +{
3264     + int ret;
3265     +
3266     + if (IS_ERR_OR_NULL(sw))
3267     + return 0;
3268     +
3269     + mutex_lock(&sw->lock);
3270     +
3271     + ret = sw->set(sw->dev.parent, role);
3272     + if (!ret)
3273     + sw->role = role;
3274     +
3275     + mutex_unlock(&sw->lock);
3276     +
3277     + return ret;
3278     +}
3279     +EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
3280     +
3281     +/**
3282     + * usb_role_switch_get_role - Get the USB role for a switch
3283     + * @sw: USB role switch
3284     + *
3285     + * Depending on the role-switch-driver this function returns either a cached
3286     + * value of the last set role, or reads back the actual value from the hardware.
3287     + */
3288     +enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
3289     +{
3290     + enum usb_role role;
3291     +
3292     + if (IS_ERR_OR_NULL(sw))
3293     + return USB_ROLE_NONE;
3294     +
3295     + mutex_lock(&sw->lock);
3296     +
3297     + if (sw->get)
3298     + role = sw->get(sw->dev.parent);
3299     + else
3300     + role = sw->role;
3301     +
3302     + mutex_unlock(&sw->lock);
3303     +
3304     + return role;
3305     +}
3306     +EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
3307     +
3308     +static int __switch_match(struct device *dev, const void *name)
3309     +{
3310     + return !strcmp((const char *)name, dev_name(dev));
3311     +}
3312     +
3313     +static void *usb_role_switch_match(struct device_connection *con, int ep,
3314     + void *data)
3315     +{
3316     + struct device *dev;
3317     +
3318     + dev = class_find_device(role_class, NULL, con->endpoint[ep],
3319     + __switch_match);
3320     +
3321     + return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
3322     +}
3323     +
3324     +/**
3325     + * usb_role_switch_get - Find USB role switch linked with the caller
3326     + * @dev: The caller device
3327     + *
3328     + * Finds and returns role switch linked with @dev. The reference count for the
3329     + * found switch is incremented.
3330     + */
3331     +struct usb_role_switch *usb_role_switch_get(struct device *dev)
3332     +{
3333     + struct usb_role_switch *sw;
3334     +
3335     + sw = device_connection_find_match(dev, "usb-role-switch", NULL,
3336     + usb_role_switch_match);
3337     +
3338     + if (!IS_ERR_OR_NULL(sw))
3339     + WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
3340     +
3341     + return sw;
3342     +}
3343     +EXPORT_SYMBOL_GPL(usb_role_switch_get);
3344     +
3345     +/**
3346     + * usb_role_switch_put - Release handle to a switch
3347     + * @sw: USB Role Switch
3348     + *
3349     + * Decrement reference count for @sw.
3350     + */
3351     +void usb_role_switch_put(struct usb_role_switch *sw)
3352     +{
3353     + if (!IS_ERR_OR_NULL(sw)) {
3354     + put_device(&sw->dev);
3355     + module_put(sw->dev.parent->driver->owner);
3356     + }
3357     +}
3358     +EXPORT_SYMBOL_GPL(usb_role_switch_put);
3359     +
3360     +static umode_t
3361     +usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
3362     +{
3363     + struct device *dev = container_of(kobj, typeof(*dev), kobj);
3364     + struct usb_role_switch *sw = to_role_switch(dev);
3365     +
3366     + if (sw->allow_userspace_control)
3367     + return attr->mode;
3368     +
3369     + return 0;
3370     +}
3371     +
3372     +static const char * const usb_roles[] = {
3373     + [USB_ROLE_NONE] = "none",
3374     + [USB_ROLE_HOST] = "host",
3375     + [USB_ROLE_DEVICE] = "device",
3376     +};
3377     +
3378     +static ssize_t
3379     +role_show(struct device *dev, struct device_attribute *attr, char *buf)
3380     +{
3381     + struct usb_role_switch *sw = to_role_switch(dev);
3382     + enum usb_role role = usb_role_switch_get_role(sw);
3383     +
3384     + return sprintf(buf, "%s\n", usb_roles[role]);
3385     +}
3386     +
3387     +static ssize_t role_store(struct device *dev, struct device_attribute *attr,
3388     + const char *buf, size_t size)
3389     +{
3390     + struct usb_role_switch *sw = to_role_switch(dev);
3391     + int ret;
3392     +
3393     + ret = sysfs_match_string(usb_roles, buf);
3394     + if (ret < 0) {
3395     + bool res;
3396     +
3397     + /* Extra check if the user wants to disable the switch */
3398     + ret = kstrtobool(buf, &res);
3399     + if (ret || res)
3400     + return -EINVAL;
3401     + }
3402     +
3403     + ret = usb_role_switch_set_role(sw, ret);
3404     + if (ret)
3405     + return ret;
3406     +
3407     + return size;
3408     +}
3409     +static DEVICE_ATTR_RW(role);
3410     +
3411     +static struct attribute *usb_role_switch_attrs[] = {
3412     + &dev_attr_role.attr,
3413     + NULL,
3414     +};
3415     +
3416     +static const struct attribute_group usb_role_switch_group = {
3417     + .is_visible = usb_role_switch_is_visible,
3418     + .attrs = usb_role_switch_attrs,
3419     +};
3420     +
3421     +static const struct attribute_group *usb_role_switch_groups[] = {
3422     + &usb_role_switch_group,
3423     + NULL,
3424     +};
3425     +
3426     +static int
3427     +usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
3428     +{
3429     + int ret;
3430     +
3431     + ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
3432     + if (ret)
3433     + dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
3434     +
3435     + return ret;
3436     +}
3437     +
3438     +static void usb_role_switch_release(struct device *dev)
3439     +{
3440     + struct usb_role_switch *sw = to_role_switch(dev);
3441     +
3442     + kfree(sw);
3443     +}
3444     +
3445     +static const struct device_type usb_role_dev_type = {
3446     + .name = "usb_role_switch",
3447     + .groups = usb_role_switch_groups,
3448     + .uevent = usb_role_switch_uevent,
3449     + .release = usb_role_switch_release,
3450     +};
3451     +
3452     +/**
3453     + * usb_role_switch_register - Register USB Role Switch
3454     + * @parent: Parent device for the switch
3455     + * @desc: Description of the switch
3456     + *
3457     + * USB Role Switch is a device capable or choosing the role for USB connector.
3458     + * On platforms where the USB controller is dual-role capable, the controller
3459     + * driver will need to register the switch. On platforms where the USB host and
3460     + * USB device controllers behind the connector are separate, there will be a
3461     + * mux, and the driver for that mux will need to register the switch.
3462     + *
3463     + * Returns handle to a new role switch or ERR_PTR. The content of @desc is
3464     + * copied.
3465     + */
3466     +struct usb_role_switch *
3467     +usb_role_switch_register(struct device *parent,
3468     + const struct usb_role_switch_desc *desc)
3469     +{
3470     + struct usb_role_switch *sw;
3471     + int ret;
3472     +
3473     + if (!desc || !desc->set)
3474     + return ERR_PTR(-EINVAL);
3475     +
3476     + sw = kzalloc(sizeof(*sw), GFP_KERNEL);
3477     + if (!sw)
3478     + return ERR_PTR(-ENOMEM);
3479     +
3480     + mutex_init(&sw->lock);
3481     +
3482     + sw->allow_userspace_control = desc->allow_userspace_control;
3483     + sw->usb2_port = desc->usb2_port;
3484     + sw->usb3_port = desc->usb3_port;
3485     + sw->udc = desc->udc;
3486     + sw->set = desc->set;
3487     + sw->get = desc->get;
3488     +
3489     + sw->dev.parent = parent;
3490     + sw->dev.class = role_class;
3491     + sw->dev.type = &usb_role_dev_type;
3492     + dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
3493     +
3494     + ret = device_register(&sw->dev);
3495     + if (ret) {
3496     + put_device(&sw->dev);
3497     + return ERR_PTR(ret);
3498     + }
3499     +
3500     + /* TODO: Symlinks for the host port and the device controller. */
3501     +
3502     + return sw;
3503     +}
3504     +EXPORT_SYMBOL_GPL(usb_role_switch_register);
3505     +
3506     +/**
3507     + * usb_role_switch_unregister - Unregsiter USB Role Switch
3508     + * @sw: USB Role Switch
3509     + *
3510     + * Unregister switch that was registered with usb_role_switch_register().
3511     + */
3512     +void usb_role_switch_unregister(struct usb_role_switch *sw)
3513     +{
3514     + if (!IS_ERR_OR_NULL(sw))
3515     + device_unregister(&sw->dev);
3516     +}
3517     +EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
3518     +
3519     +static int __init usb_roles_init(void)
3520     +{
3521     + role_class = class_create(THIS_MODULE, "usb_role");
3522     + return PTR_ERR_OR_ZERO(role_class);
3523     +}
3524     +subsys_initcall(usb_roles_init);
3525     +
3526     +static void __exit usb_roles_exit(void)
3527     +{
3528     + class_destroy(role_class);
3529     +}
3530     +module_exit(usb_roles_exit);
3531     +
3532     +MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
3533     +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
3534     +MODULE_LICENSE("GPL v2");
3535     +MODULE_DESCRIPTION("USB Role Class");
3536     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3537     index 17787dc349f8..fb544340888b 100644
3538     --- a/drivers/usb/serial/option.c
3539     +++ b/drivers/usb/serial/option.c
3540     @@ -1955,6 +1955,10 @@ static const struct usb_device_id option_ids[] = {
3541     { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
3542     { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
3543     .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
3544     + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
3545     + .driver_info = RSVD(4) | RSVD(5) },
3546     + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
3547     + .driver_info = RSVD(6) },
3548     { } /* Terminating entry */
3549     };
3550     MODULE_DEVICE_TABLE(usb, option_ids);
3551     diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3552     index e41f725ac7aa..d5b38f096698 100644
3553     --- a/drivers/usb/serial/pl2303.c
3554     +++ b/drivers/usb/serial/pl2303.c
3555     @@ -91,9 +91,14 @@ static const struct usb_device_id id_table[] = {
3556     { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
3557     { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
3558     { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
3559     + { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
3560     { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
3561     + { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
3562     { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
3563     { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
3564     + { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
3565     + { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
3566     + { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
3567     { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
3568     { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
3569     { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
3570     diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3571     index 26965cc23c17..4e2554d55362 100644
3572     --- a/drivers/usb/serial/pl2303.h
3573     +++ b/drivers/usb/serial/pl2303.h
3574     @@ -119,10 +119,15 @@
3575    
3576     /* Hewlett-Packard POS Pole Displays */
3577     #define HP_VENDOR_ID 0x03f0
3578     +#define HP_LM920_PRODUCT_ID 0x026b
3579     +#define HP_TD620_PRODUCT_ID 0x0956
3580     #define HP_LD960_PRODUCT_ID 0x0b39
3581     #define HP_LCM220_PRODUCT_ID 0x3139
3582     #define HP_LCM960_PRODUCT_ID 0x3239
3583     #define HP_LD220_PRODUCT_ID 0x3524
3584     +#define HP_LD220TA_PRODUCT_ID 0x4349
3585     +#define HP_LD960TA_PRODUCT_ID 0x4439
3586     +#define HP_LM940_PRODUCT_ID 0x5039
3587    
3588     /* Cressi Edy (diving computer) PC interface */
3589     #define CRESSI_VENDOR_ID 0x04b8
3590     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
3591     index eb95daa3e5a5..55e5aa662ad5 100644
3592     --- a/drivers/vhost/vhost.c
3593     +++ b/drivers/vhost/vhost.c
3594     @@ -2233,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
3595     return -EFAULT;
3596     }
3597     if (unlikely(vq->log_used)) {
3598     + /* Make sure used idx is seen before log. */
3599     + smp_wmb();
3600     /* Log used index update. */
3601     log_write(vq->log_base,
3602     vq->log_addr + offsetof(struct vring_used, idx),
3603     diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
3604     index 1343ac57b438..7177d1d33584 100644
3605     --- a/fs/btrfs/btrfs_inode.h
3606     +++ b/fs/btrfs/btrfs_inode.h
3607     @@ -146,6 +146,12 @@ struct btrfs_inode {
3608     */
3609     u64 last_unlink_trans;
3610    
3611     + /*
3612     + * Track the transaction id of the last transaction used to create a
3613     + * hard link for the inode. This is used by the log tree (fsync).
3614     + */
3615     + u64 last_link_trans;
3616     +
3617     /*
3618     * Number of bytes outstanding that are going to need csums. This is
3619     * used in ENOSPC accounting.
3620     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3621     index 089b46c4d97f..fa18520529f3 100644
3622     --- a/fs/btrfs/ctree.c
3623     +++ b/fs/btrfs/ctree.c
3624     @@ -2624,14 +2624,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
3625     root_lock = BTRFS_READ_LOCK;
3626    
3627     if (p->search_commit_root) {
3628     - /* The commit roots are read only so we always do read locks */
3629     - if (p->need_commit_sem)
3630     + /*
3631     + * The commit roots are read only so we always do read locks,
3632     + * and we always must hold the commit_root_sem when doing
3633     + * searches on them, the only exception is send where we don't
3634     + * want to block transaction commits for a long time, so
3635     + * we need to clone the commit root in order to avoid races
3636     + * with transaction commits that create a snapshot of one of
3637     + * the roots used by a send operation.
3638     + */
3639     + if (p->need_commit_sem) {
3640     down_read(&fs_info->commit_root_sem);
3641     - b = root->commit_root;
3642     - extent_buffer_get(b);
3643     - level = btrfs_header_level(b);
3644     - if (p->need_commit_sem)
3645     + b = btrfs_clone_extent_buffer(root->commit_root);
3646     up_read(&fs_info->commit_root_sem);
3647     + if (!b)
3648     + return ERR_PTR(-ENOMEM);
3649     +
3650     + } else {
3651     + b = root->commit_root;
3652     + extent_buffer_get(b);
3653     + }
3654     + level = btrfs_header_level(b);
3655     /*
3656     * Ensure that all callers have set skip_locking when
3657     * p->search_commit_root = 1.
3658     @@ -2757,6 +2770,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3659     again:
3660     prev_cmp = -1;
3661     b = btrfs_search_slot_get_root(root, p, write_lock_level);
3662     + if (IS_ERR(b)) {
3663     + ret = PTR_ERR(b);
3664     + goto done;
3665     + }
3666    
3667     while (b) {
3668     level = btrfs_header_level(b);
3669     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
3670     index 981434764bb9..b2b283e48439 100644
3671     --- a/fs/btrfs/dev-replace.c
3672     +++ b/fs/btrfs/dev-replace.c
3673     @@ -887,6 +887,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3674     "cannot continue dev_replace, tgtdev is missing");
3675     btrfs_info(fs_info,
3676     "you may cancel the operation after 'mount -o degraded'");
3677     + dev_replace->replace_state =
3678     + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3679     btrfs_dev_replace_write_unlock(dev_replace);
3680     return 0;
3681     }
3682     @@ -898,6 +900,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
3683     * dev-replace to start anyway.
3684     */
3685     if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
3686     + btrfs_dev_replace_write_lock(dev_replace);
3687     + dev_replace->replace_state =
3688     + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
3689     + btrfs_dev_replace_write_unlock(dev_replace);
3690     btrfs_info(fs_info,
3691     "cannot resume dev-replace, other exclusive operation running");
3692     return 0;
3693     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3694     index 51e41e53d4ae..a16760b410b1 100644
3695     --- a/fs/btrfs/extent-tree.c
3696     +++ b/fs/btrfs/extent-tree.c
3697     @@ -8911,6 +8911,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
3698     goto out_free;
3699     }
3700    
3701     + err = btrfs_run_delayed_items(trans);
3702     + if (err)
3703     + goto out_end_trans;
3704     +
3705     if (block_rsv)
3706     trans->block_rsv = block_rsv;
3707    
3708     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3709     index 7158b5b77c9d..14c85e61134d 100644
3710     --- a/fs/btrfs/inode.c
3711     +++ b/fs/btrfs/inode.c
3712     @@ -1373,7 +1373,8 @@ next_slot:
3713     * Do the same check as in btrfs_cross_ref_exist but
3714     * without the unnecessary search.
3715     */
3716     - if (btrfs_file_extent_generation(leaf, fi) <=
3717     + if (!nolock &&
3718     + btrfs_file_extent_generation(leaf, fi) <=
3719     btrfs_root_last_snapshot(&root->root_item))
3720     goto out_check;
3721     if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
3722     @@ -3688,6 +3689,21 @@ cache_index:
3723     * inode is not a directory, logging its parent unnecessarily.
3724     */
3725     BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3726     + /*
3727     + * Similar reasoning for last_link_trans, needs to be set otherwise
3728     + * for a case like the following:
3729     + *
3730     + * mkdir A
3731     + * touch foo
3732     + * ln foo A/bar
3733     + * echo 2 > /proc/sys/vm/drop_caches
3734     + * fsync foo
3735     + * <power failure>
3736     + *
3737     + * Would result in link bar and directory A not existing after the power
3738     + * failure.
3739     + */
3740     + BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
3741    
3742     path->slots[0]++;
3743     if (inode->i_nlink != 1 ||
3744     @@ -6646,6 +6662,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3745     if (err)
3746     goto fail;
3747     }
3748     + BTRFS_I(inode)->last_link_trans = trans->transid;
3749     d_instantiate(dentry, inode);
3750     ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
3751     true, NULL);
3752     @@ -9174,6 +9191,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
3753     ei->index_cnt = (u64)-1;
3754     ei->dir_index = 0;
3755     ei->last_unlink_trans = 0;
3756     + ei->last_link_trans = 0;
3757     ei->last_log_commit = 0;
3758    
3759     spin_lock_init(&ei->lock);
3760     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3761     index 16ecb76fa53c..0805f8c5e72d 100644
3762     --- a/fs/btrfs/tree-log.c
3763     +++ b/fs/btrfs/tree-log.c
3764     @@ -5781,6 +5781,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3765     goto end_trans;
3766     }
3767    
3768     + /*
3769     + * If a new hard link was added to the inode in the current transaction
3770     + * and its link count is now greater than 1, we need to fallback to a
3771     + * transaction commit, otherwise we can end up not logging all its new
3772     + * parents for all the hard links. Here just from the dentry used to
3773     + * fsync, we can not visit the ancestor inodes for all the other hard
3774     + * links to figure out if any is new, so we fallback to a transaction
3775     + * commit (instead of adding a lot of complexity of scanning a btree,
3776     + * since this scenario is not a common use case).
3777     + */
3778     + if (inode->vfs_inode.i_nlink > 1 &&
3779     + inode->last_link_trans > last_committed) {
3780     + ret = -EMLINK;
3781     + goto end_trans;
3782     + }
3783     +
3784     while (1) {
3785     if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
3786     break;
3787     diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
3788     index 20a2d304c603..c3ae8c1d6089 100644
3789     --- a/fs/cifs/smb2maperror.c
3790     +++ b/fs/cifs/smb2maperror.c
3791     @@ -379,8 +379,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
3792     {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
3793     {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
3794     {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
3795     - {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
3796     - {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
3797     + {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
3798     + {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
3799     {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
3800     {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
3801     "STATUS_CTL_FILE_NOT_SUPPORTED"},
3802     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3803     index 812da3e56a22..f44bb4a304e9 100644
3804     --- a/fs/cifs/smb2ops.c
3805     +++ b/fs/cifs/smb2ops.c
3806     @@ -3184,8 +3184,10 @@ smb3_receive_transform(struct TCP_Server_Info *server,
3807     }
3808    
3809     /* TODO: add support for compounds containing READ. */
3810     - if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
3811     + if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
3812     + *num_mids = 1;
3813     return receive_encrypted_read(server, &mids[0]);
3814     + }
3815    
3816     return receive_encrypted_standard(server, mids, bufs, num_mids);
3817     }
3818     diff --git a/fs/dax.c b/fs/dax.c
3819     index 3a2682a6c832..09fa70683c41 100644
3820     --- a/fs/dax.c
3821     +++ b/fs/dax.c
3822     @@ -229,8 +229,8 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
3823     *
3824     * Must be called with the i_pages lock held.
3825     */
3826     -static void *__get_unlocked_mapping_entry(struct address_space *mapping,
3827     - pgoff_t index, void ***slotp, bool (*wait_fn)(void))
3828     +static void *get_unlocked_mapping_entry(struct address_space *mapping,
3829     + pgoff_t index, void ***slotp)
3830     {
3831     void *entry, **slot;
3832     struct wait_exceptional_entry_queue ewait;
3833     @@ -240,8 +240,6 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
3834     ewait.wait.func = wake_exceptional_entry_func;
3835    
3836     for (;;) {
3837     - bool revalidate;
3838     -
3839     entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
3840     &slot);
3841     if (!entry ||
3842     @@ -256,30 +254,37 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
3843     prepare_to_wait_exclusive(wq, &ewait.wait,
3844     TASK_UNINTERRUPTIBLE);
3845     xa_unlock_irq(&mapping->i_pages);
3846     - revalidate = wait_fn();
3847     + schedule();
3848     finish_wait(wq, &ewait.wait);
3849     xa_lock_irq(&mapping->i_pages);
3850     - if (revalidate) {
3851     - put_unlocked_mapping_entry(mapping, index, entry);
3852     - return ERR_PTR(-EAGAIN);
3853     - }
3854     }
3855     }
3856    
3857     -static bool entry_wait(void)
3858     +/*
3859     + * The only thing keeping the address space around is the i_pages lock
3860     + * (it's cycled in clear_inode() after removing the entries from i_pages)
3861     + * After we call xas_unlock_irq(), we cannot touch xas->xa.
3862     + */
3863     +static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
3864     + void ***slotp, void *entry)
3865     {
3866     - schedule();
3867     + struct wait_exceptional_entry_queue ewait;
3868     + wait_queue_head_t *wq;
3869     +
3870     + init_wait(&ewait.wait);
3871     + ewait.wait.func = wake_exceptional_entry_func;
3872     +
3873     + wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
3874     /*
3875     - * Never return an ERR_PTR() from
3876     - * __get_unlocked_mapping_entry(), just keep looping.
3877     + * Unlike get_unlocked_entry() there is no guarantee that this
3878     + * path ever successfully retrieves an unlocked entry before an
3879     + * inode dies. Perform a non-exclusive wait in case this path
3880     + * never successfully performs its own wake up.
3881     */
3882     - return false;
3883     -}
3884     -
3885     -static void *get_unlocked_mapping_entry(struct address_space *mapping,
3886     - pgoff_t index, void ***slotp)
3887     -{
3888     - return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
3889     + prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
3890     + xa_unlock_irq(&mapping->i_pages);
3891     + schedule();
3892     + finish_wait(wq, &ewait.wait);
3893     }
3894    
3895     static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
3896     @@ -398,19 +403,6 @@ static struct page *dax_busy_page(void *entry)
3897     return NULL;
3898     }
3899    
3900     -static bool entry_wait_revalidate(void)
3901     -{
3902     - rcu_read_unlock();
3903     - schedule();
3904     - rcu_read_lock();
3905     -
3906     - /*
3907     - * Tell __get_unlocked_mapping_entry() to take a break, we need
3908     - * to revalidate page->mapping after dropping locks
3909     - */
3910     - return true;
3911     -}
3912     -
3913     bool dax_lock_mapping_entry(struct page *page)
3914     {
3915     pgoff_t index;
3916     @@ -446,14 +438,15 @@ bool dax_lock_mapping_entry(struct page *page)
3917     }
3918     index = page->index;
3919    
3920     - entry = __get_unlocked_mapping_entry(mapping, index, &slot,
3921     - entry_wait_revalidate);
3922     + entry = __radix_tree_lookup(&mapping->i_pages, index,
3923     + NULL, &slot);
3924     if (!entry) {
3925     xa_unlock_irq(&mapping->i_pages);
3926     break;
3927     - } else if (IS_ERR(entry)) {
3928     - xa_unlock_irq(&mapping->i_pages);
3929     - WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
3930     + } else if (slot_locked(mapping, slot)) {
3931     + rcu_read_unlock();
3932     + wait_entry_unlocked(mapping, index, &slot, entry);
3933     + rcu_read_lock();
3934     continue;
3935     }
3936     lock_slot(mapping, slot);
3937     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3938     index 5cfb1e2f6a5b..032cf9b92665 100644
3939     --- a/fs/ext4/ext4.h
3940     +++ b/fs/ext4/ext4.h
3941     @@ -2459,8 +2459,19 @@ int do_journal_get_write_access(handle_t *handle,
3942     #define FALL_BACK_TO_NONDELALLOC 1
3943     #define CONVERT_INLINE_DATA 2
3944    
3945     -extern struct inode *ext4_iget(struct super_block *, unsigned long);
3946     -extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
3947     +typedef enum {
3948     + EXT4_IGET_NORMAL = 0,
3949     + EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
3950     + EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
3951     +} ext4_iget_flags;
3952     +
3953     +extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
3954     + ext4_iget_flags flags, const char *function,
3955     + unsigned int line);
3956     +
3957     +#define ext4_iget(sb, ino, flags) \
3958     + __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
3959     +
3960     extern int ext4_write_inode(struct inode *, struct writeback_control *);
3961     extern int ext4_setattr(struct dentry *, struct iattr *);
3962     extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
3963     @@ -2542,6 +2553,8 @@ extern int ext4_group_extend(struct super_block *sb,
3964     extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
3965    
3966     /* super.c */
3967     +extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
3968     + sector_t block, int op_flags);
3969     extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
3970     extern int ext4_calculate_overhead(struct super_block *sb);
3971     extern void ext4_superblock_csum_set(struct super_block *sb);
3972     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3973     index 2addcb8730e1..091a18a51c99 100644
3974     --- a/fs/ext4/ialloc.c
3975     +++ b/fs/ext4/ialloc.c
3976     @@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
3977     if (!ext4_test_bit(bit, bitmap_bh->b_data))
3978     goto bad_orphan;
3979    
3980     - inode = ext4_iget(sb, ino);
3981     + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
3982     if (IS_ERR(inode)) {
3983     err = PTR_ERR(inode);
3984     ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
3985     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
3986     index 9c4bac18cc6c..27373d88b5f0 100644
3987     --- a/fs/ext4/inline.c
3988     +++ b/fs/ext4/inline.c
3989     @@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
3990    
3991     if (!PageUptodate(page)) {
3992     ret = ext4_read_inline_page(inode, page);
3993     - if (ret < 0)
3994     + if (ret < 0) {
3995     + unlock_page(page);
3996     + put_page(page);
3997     goto out_up_read;
3998     + }
3999     }
4000    
4001     ret = 1;
4002     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4003     index 244531d3065a..36abbdafb26e 100644
4004     --- a/fs/ext4/inode.c
4005     +++ b/fs/ext4/inode.c
4006     @@ -4786,7 +4786,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4007     return inode_peek_iversion(inode);
4008     }
4009    
4010     -struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4011     +struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4012     + ext4_iget_flags flags, const char *function,
4013     + unsigned int line)
4014     {
4015     struct ext4_iloc iloc;
4016     struct ext4_inode *raw_inode;
4017     @@ -4800,6 +4802,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4018     gid_t i_gid;
4019     projid_t i_projid;
4020    
4021     + if (((flags & EXT4_IGET_NORMAL) &&
4022     + (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4023     + (ino < EXT4_ROOT_INO) ||
4024     + (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4025     + if (flags & EXT4_IGET_HANDLE)
4026     + return ERR_PTR(-ESTALE);
4027     + __ext4_error(sb, function, line,
4028     + "inode #%lu: comm %s: iget: illegal inode #",
4029     + ino, current->comm);
4030     + return ERR_PTR(-EFSCORRUPTED);
4031     + }
4032     +
4033     inode = iget_locked(sb, ino);
4034     if (!inode)
4035     return ERR_PTR(-ENOMEM);
4036     @@ -4815,18 +4829,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4037     raw_inode = ext4_raw_inode(&iloc);
4038    
4039     if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4040     - EXT4_ERROR_INODE(inode, "root inode unallocated");
4041     + ext4_error_inode(inode, function, line, 0,
4042     + "iget: root inode unallocated");
4043     ret = -EFSCORRUPTED;
4044     goto bad_inode;
4045     }
4046    
4047     + if ((flags & EXT4_IGET_HANDLE) &&
4048     + (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4049     + ret = -ESTALE;
4050     + goto bad_inode;
4051     + }
4052     +
4053     if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4054     ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4055     if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4056     EXT4_INODE_SIZE(inode->i_sb) ||
4057     (ei->i_extra_isize & 3)) {
4058     - EXT4_ERROR_INODE(inode,
4059     - "bad extra_isize %u (inode size %u)",
4060     + ext4_error_inode(inode, function, line, 0,
4061     + "iget: bad extra_isize %u "
4062     + "(inode size %u)",
4063     ei->i_extra_isize,
4064     EXT4_INODE_SIZE(inode->i_sb));
4065     ret = -EFSCORRUPTED;
4066     @@ -4848,7 +4870,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4067     }
4068    
4069     if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4070     - EXT4_ERROR_INODE(inode, "checksum invalid");
4071     + ext4_error_inode(inode, function, line, 0,
4072     + "iget: checksum invalid");
4073     ret = -EFSBADCRC;
4074     goto bad_inode;
4075     }
4076     @@ -4905,7 +4928,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4077     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4078     inode->i_size = ext4_isize(sb, raw_inode);
4079     if ((size = i_size_read(inode)) < 0) {
4080     - EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
4081     + ext4_error_inode(inode, function, line, 0,
4082     + "iget: bad i_size value: %lld", size);
4083     ret = -EFSCORRUPTED;
4084     goto bad_inode;
4085     }
4086     @@ -4981,7 +5005,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4087     ret = 0;
4088     if (ei->i_file_acl &&
4089     !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4090     - EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4091     + ext4_error_inode(inode, function, line, 0,
4092     + "iget: bad extended attribute block %llu",
4093     ei->i_file_acl);
4094     ret = -EFSCORRUPTED;
4095     goto bad_inode;
4096     @@ -5009,8 +5034,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4097     } else if (S_ISLNK(inode->i_mode)) {
4098     /* VFS does not allow setting these so must be corruption */
4099     if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4100     - EXT4_ERROR_INODE(inode,
4101     - "immutable or append flags not allowed on symlinks");
4102     + ext4_error_inode(inode, function, line, 0,
4103     + "iget: immutable or append flags "
4104     + "not allowed on symlinks");
4105     ret = -EFSCORRUPTED;
4106     goto bad_inode;
4107     }
4108     @@ -5040,7 +5066,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4109     make_bad_inode(inode);
4110     } else {
4111     ret = -EFSCORRUPTED;
4112     - EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4113     + ext4_error_inode(inode, function, line, 0,
4114     + "iget: bogus i_mode (%o)", inode->i_mode);
4115     goto bad_inode;
4116     }
4117     brelse(iloc.bh);
4118     @@ -5054,13 +5081,6 @@ bad_inode:
4119     return ERR_PTR(ret);
4120     }
4121    
4122     -struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
4123     -{
4124     - if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4125     - return ERR_PTR(-EFSCORRUPTED);
4126     - return ext4_iget(sb, ino);
4127     -}
4128     -
4129     static int ext4_inode_blocks_set(handle_t *handle,
4130     struct ext4_inode *raw_inode,
4131     struct ext4_inode_info *ei)
4132     @@ -5349,9 +5369,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4133     {
4134     int err;
4135    
4136     - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
4137     + if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
4138     + sb_rdonly(inode->i_sb))
4139     return 0;
4140    
4141     + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4142     + return -EIO;
4143     +
4144     if (EXT4_SB(inode->i_sb)->s_journal) {
4145     if (ext4_journal_current_handle()) {
4146     jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4147     @@ -5367,7 +5391,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4148     if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
4149     return 0;
4150    
4151     - err = ext4_force_commit(inode->i_sb);
4152     + err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4153     + EXT4_I(inode)->i_sync_tid);
4154     } else {
4155     struct ext4_iloc iloc;
4156    
4157     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
4158     index 0edee31913d1..d37dafa1d133 100644
4159     --- a/fs/ext4/ioctl.c
4160     +++ b/fs/ext4/ioctl.c
4161     @@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
4162     !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
4163     return -EPERM;
4164    
4165     - inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
4166     + inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
4167     if (IS_ERR(inode_bl))
4168     return PTR_ERR(inode_bl);
4169     ei_bl = EXT4_I(inode_bl);
4170     diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
4171     index 61a9d1927817..a98bfca9c463 100644
4172     --- a/fs/ext4/migrate.c
4173     +++ b/fs/ext4/migrate.c
4174     @@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
4175     int i, retval = 0;
4176     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4177    
4178     - bh = sb_bread(inode->i_sb, pblock);
4179     - if (!bh)
4180     - return -EIO;
4181     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4182     + if (IS_ERR(bh))
4183     + return PTR_ERR(bh);
4184    
4185     i_data = (__le32 *)bh->b_data;
4186     for (i = 0; i < max_entries; i++) {
4187     @@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
4188     int i, retval = 0;
4189     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4190    
4191     - bh = sb_bread(inode->i_sb, pblock);
4192     - if (!bh)
4193     - return -EIO;
4194     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4195     + if (IS_ERR(bh))
4196     + return PTR_ERR(bh);
4197    
4198     i_data = (__le32 *)bh->b_data;
4199     for (i = 0; i < max_entries; i++) {
4200     @@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
4201     int i, retval = 0;
4202     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4203    
4204     - bh = sb_bread(inode->i_sb, pblock);
4205     - if (!bh)
4206     - return -EIO;
4207     + bh = ext4_sb_bread(inode->i_sb, pblock, 0);
4208     + if (IS_ERR(bh))
4209     + return PTR_ERR(bh);
4210    
4211     i_data = (__le32 *)bh->b_data;
4212     for (i = 0; i < max_entries; i++) {
4213     @@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
4214     struct buffer_head *bh;
4215     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4216    
4217     - bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4218     - if (!bh)
4219     - return -EIO;
4220     + bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4221     + if (IS_ERR(bh))
4222     + return PTR_ERR(bh);
4223    
4224     tmp_idata = (__le32 *)bh->b_data;
4225     for (i = 0; i < max_entries; i++) {
4226     @@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
4227     struct buffer_head *bh;
4228     unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
4229    
4230     - bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
4231     - if (!bh)
4232     - return -EIO;
4233     + bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
4234     + if (IS_ERR(bh))
4235     + return PTR_ERR(bh);
4236    
4237     tmp_idata = (__le32 *)bh->b_data;
4238     for (i = 0; i < max_entries; i++) {
4239     @@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
4240     struct ext4_extent_header *eh;
4241    
4242     block = ext4_idx_pblock(ix);
4243     - bh = sb_bread(inode->i_sb, block);
4244     - if (!bh)
4245     - return -EIO;
4246     + bh = ext4_sb_bread(inode->i_sb, block, 0);
4247     + if (IS_ERR(bh))
4248     + return PTR_ERR(bh);
4249    
4250     eh = (struct ext4_extent_header *)bh->b_data;
4251     if (eh->eh_depth != 0) {
4252     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4253     index ffa25753e929..4f8de2b9e87e 100644
4254     --- a/fs/ext4/namei.c
4255     +++ b/fs/ext4/namei.c
4256     @@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
4257     dentry);
4258     return ERR_PTR(-EFSCORRUPTED);
4259     }
4260     - inode = ext4_iget_normal(dir->i_sb, ino);
4261     + inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
4262     if (inode == ERR_PTR(-ESTALE)) {
4263     EXT4_ERROR_INODE(dir,
4264     "deleted inode referenced: %u",
4265     @@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
4266     return ERR_PTR(-EFSCORRUPTED);
4267     }
4268    
4269     - return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
4270     + return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
4271     }
4272    
4273     /*
4274     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4275     index a5efee34415f..48421de803b7 100644
4276     --- a/fs/ext4/resize.c
4277     +++ b/fs/ext4/resize.c
4278     @@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
4279     else if (free_blocks_count < 0)
4280     ext4_warning(sb, "Bad blocks count %u",
4281     input->blocks_count);
4282     - else if (!(bh = sb_bread(sb, end - 1)))
4283     + else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
4284     + err = PTR_ERR(bh);
4285     + bh = NULL;
4286     ext4_warning(sb, "Cannot read last block (%llu)",
4287     end - 1);
4288     - else if (outside(input->block_bitmap, start, end))
4289     + } else if (outside(input->block_bitmap, start, end))
4290     ext4_warning(sb, "Block bitmap not in group (block %llu)",
4291     (unsigned long long)input->block_bitmap);
4292     else if (outside(input->inode_bitmap, start, end))
4293     @@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4294     struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4295     unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
4296     ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
4297     - struct buffer_head **o_group_desc, **n_group_desc;
4298     - struct buffer_head *dind;
4299     - struct buffer_head *gdb_bh;
4300     + struct buffer_head **o_group_desc, **n_group_desc = NULL;
4301     + struct buffer_head *dind = NULL;
4302     + struct buffer_head *gdb_bh = NULL;
4303     int gdbackups;
4304     - struct ext4_iloc iloc;
4305     + struct ext4_iloc iloc = { .bh = NULL };
4306     __le32 *data;
4307     int err;
4308    
4309     @@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4310     "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
4311     gdb_num);
4312    
4313     - gdb_bh = sb_bread(sb, gdblock);
4314     - if (!gdb_bh)
4315     - return -EIO;
4316     + gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4317     + if (IS_ERR(gdb_bh))
4318     + return PTR_ERR(gdb_bh);
4319    
4320     gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
4321     if (gdbackups < 0) {
4322     err = gdbackups;
4323     - goto exit_bh;
4324     + goto errout;
4325     }
4326    
4327     data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4328     - dind = sb_bread(sb, le32_to_cpu(*data));
4329     - if (!dind) {
4330     - err = -EIO;
4331     - goto exit_bh;
4332     + dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4333     + if (IS_ERR(dind)) {
4334     + err = PTR_ERR(dind);
4335     + dind = NULL;
4336     + goto errout;
4337     }
4338    
4339     data = (__le32 *)dind->b_data;
4340     @@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4341     ext4_warning(sb, "new group %u GDT block %llu not reserved",
4342     group, gdblock);
4343     err = -EINVAL;
4344     - goto exit_dind;
4345     + goto errout;
4346     }
4347    
4348     BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
4349     err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
4350     if (unlikely(err))
4351     - goto exit_dind;
4352     + goto errout;
4353    
4354     BUFFER_TRACE(gdb_bh, "get_write_access");
4355     err = ext4_journal_get_write_access(handle, gdb_bh);
4356     if (unlikely(err))
4357     - goto exit_dind;
4358     + goto errout;
4359    
4360     BUFFER_TRACE(dind, "get_write_access");
4361     err = ext4_journal_get_write_access(handle, dind);
4362     @@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4363     /* ext4_reserve_inode_write() gets a reference on the iloc */
4364     err = ext4_reserve_inode_write(handle, inode, &iloc);
4365     if (unlikely(err))
4366     - goto exit_dind;
4367     + goto errout;
4368    
4369     n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4370     sizeof(struct buffer_head *),
4371     @@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4372     err = -ENOMEM;
4373     ext4_warning(sb, "not enough memory for %lu groups",
4374     gdb_num + 1);
4375     - goto exit_inode;
4376     + goto errout;
4377     }
4378    
4379     /*
4380     @@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4381     err = ext4_handle_dirty_metadata(handle, NULL, dind);
4382     if (unlikely(err)) {
4383     ext4_std_error(sb, err);
4384     - goto exit_inode;
4385     + goto errout;
4386     }
4387     inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
4388     (9 - EXT4_SB(sb)->s_cluster_bits);
4389     @@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4390     err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
4391     if (unlikely(err)) {
4392     ext4_std_error(sb, err);
4393     - iloc.bh = NULL;
4394     - goto exit_inode;
4395     + goto errout;
4396     }
4397     brelse(dind);
4398    
4399     @@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4400     err = ext4_handle_dirty_super(handle, sb);
4401     if (err)
4402     ext4_std_error(sb, err);
4403     -
4404     return err;
4405     -
4406     -exit_inode:
4407     +errout:
4408     kvfree(n_group_desc);
4409     brelse(iloc.bh);
4410     -exit_dind:
4411     brelse(dind);
4412     -exit_bh:
4413     brelse(gdb_bh);
4414    
4415     ext4_debug("leaving with error %d\n", err);
4416     @@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4417    
4418     gdblock = ext4_meta_bg_first_block_no(sb, group) +
4419     ext4_bg_has_super(sb, group);
4420     - gdb_bh = sb_bread(sb, gdblock);
4421     - if (!gdb_bh)
4422     - return -EIO;
4423     + gdb_bh = ext4_sb_bread(sb, gdblock, 0);
4424     + if (IS_ERR(gdb_bh))
4425     + return PTR_ERR(gdb_bh);
4426     n_group_desc = ext4_kvmalloc((gdb_num + 1) *
4427     sizeof(struct buffer_head *),
4428     GFP_NOFS);
4429     @@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4430     return -ENOMEM;
4431    
4432     data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
4433     - dind = sb_bread(sb, le32_to_cpu(*data));
4434     - if (!dind) {
4435     - err = -EIO;
4436     + dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
4437     + if (IS_ERR(dind)) {
4438     + err = PTR_ERR(dind);
4439     + dind = NULL;
4440     goto exit_free;
4441     }
4442    
4443     @@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
4444     err = -EINVAL;
4445     goto exit_bh;
4446     }
4447     - primary[res] = sb_bread(sb, blk);
4448     - if (!primary[res]) {
4449     - err = -EIO;
4450     + primary[res] = ext4_sb_bread(sb, blk, 0);
4451     + if (IS_ERR(primary[res])) {
4452     + err = PTR_ERR(primary[res]);
4453     + primary[res] = NULL;
4454     goto exit_bh;
4455     }
4456     gdbackups = verify_reserved_gdb(sb, group, primary[res]);
4457     @@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
4458     }
4459    
4460     if (reserved_gdb || gdb_off == 0) {
4461     - if (ext4_has_feature_resize_inode(sb) ||
4462     + if (!ext4_has_feature_resize_inode(sb) ||
4463     !le16_to_cpu(es->s_reserved_gdt_blocks)) {
4464     ext4_warning(sb,
4465     "No reserved GDT blocks, can't resize");
4466     return -EPERM;
4467     }
4468     - inode = ext4_iget(sb, EXT4_RESIZE_INO);
4469     + inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
4470     if (IS_ERR(inode)) {
4471     ext4_warning(sb, "Error opening resize inode");
4472     return PTR_ERR(inode);
4473     @@ -1965,7 +1965,8 @@ retry:
4474     }
4475    
4476     if (!resize_inode)
4477     - resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
4478     + resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
4479     + EXT4_IGET_SPECIAL);
4480     if (IS_ERR(resize_inode)) {
4481     ext4_warning(sb, "Error opening resize inode");
4482     return PTR_ERR(resize_inode);
4483     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4484     index 8a149df1c6a1..ee0f30852835 100644
4485     --- a/fs/ext4/super.c
4486     +++ b/fs/ext4/super.c
4487     @@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
4488     MODULE_ALIAS("ext3");
4489     #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
4490    
4491     +/*
4492     + * This works like sb_bread() except it uses ERR_PTR for error
4493     + * returns. Currently with sb_bread it's impossible to distinguish
4494     + * between ENOMEM and EIO situations (since both result in a NULL
4495     + * return.
4496     + */
4497     +struct buffer_head *
4498     +ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
4499     +{
4500     + struct buffer_head *bh = sb_getblk(sb, block);
4501     +
4502     + if (bh == NULL)
4503     + return ERR_PTR(-ENOMEM);
4504     + if (buffer_uptodate(bh))
4505     + return bh;
4506     + ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
4507     + wait_on_buffer(bh);
4508     + if (buffer_uptodate(bh))
4509     + return bh;
4510     + put_bh(bh);
4511     + return ERR_PTR(-EIO);
4512     +}
4513     +
4514     static int ext4_verify_csum_type(struct super_block *sb,
4515     struct ext4_super_block *es)
4516     {
4517     @@ -1150,20 +1173,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
4518     {
4519     struct inode *inode;
4520    
4521     - if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4522     - return ERR_PTR(-ESTALE);
4523     - if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4524     - return ERR_PTR(-ESTALE);
4525     -
4526     - /* iget isn't really right if the inode is currently unallocated!!
4527     - *
4528     - * ext4_read_inode will return a bad_inode if the inode had been
4529     - * deleted, so we should be safe.
4530     - *
4531     + /*
4532     * Currently we don't know the generation for parent directory, so
4533     * a generation of 0 means "accept any"
4534     */
4535     - inode = ext4_iget_normal(sb, ino);
4536     + inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
4537     if (IS_ERR(inode))
4538     return ERR_CAST(inode);
4539     if (generation && inode->i_generation != generation) {
4540     @@ -1188,6 +1202,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
4541     ext4_nfs_get_inode);
4542     }
4543    
4544     +static int ext4_nfs_commit_metadata(struct inode *inode)
4545     +{
4546     + struct writeback_control wbc = {
4547     + .sync_mode = WB_SYNC_ALL
4548     + };
4549     +
4550     + trace_ext4_nfs_commit_metadata(inode);
4551     + return ext4_write_inode(inode, &wbc);
4552     +}
4553     +
4554     /*
4555     * Try to release metadata pages (indirect blocks, directories) which are
4556     * mapped via the block device. Since these pages could have journal heads
4557     @@ -1392,6 +1416,7 @@ static const struct export_operations ext4_export_ops = {
4558     .fh_to_dentry = ext4_fh_to_dentry,
4559     .fh_to_parent = ext4_fh_to_parent,
4560     .get_parent = ext4_get_parent,
4561     + .commit_metadata = ext4_nfs_commit_metadata,
4562     };
4563    
4564     enum {
4565     @@ -4327,7 +4352,7 @@ no_journal:
4566     * so we can safely mount the rest of the filesystem now.
4567     */
4568    
4569     - root = ext4_iget(sb, EXT4_ROOT_INO);
4570     + root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
4571     if (IS_ERR(root)) {
4572     ext4_msg(sb, KERN_ERR, "get root inode failed");
4573     ret = PTR_ERR(root);
4574     @@ -4597,7 +4622,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
4575     * happen if we iget() an unused inode, as the subsequent iput()
4576     * will try to delete it.
4577     */
4578     - journal_inode = ext4_iget(sb, journal_inum);
4579     + journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
4580     if (IS_ERR(journal_inode)) {
4581     ext4_msg(sb, KERN_ERR, "no journal found");
4582     return NULL;
4583     @@ -5679,7 +5704,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4584     if (!qf_inums[type])
4585     return -EPERM;
4586    
4587     - qf_inode = ext4_iget(sb, qf_inums[type]);
4588     + qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
4589     if (IS_ERR(qf_inode)) {
4590     ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
4591     return PTR_ERR(qf_inode);
4592     @@ -5689,9 +5714,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
4593     qf_inode->i_flags |= S_NOQUOTA;
4594     lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
4595     err = dquot_enable(qf_inode, type, format_id, flags);
4596     - iput(qf_inode);
4597     if (err)
4598     lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
4599     + iput(qf_inode);
4600    
4601     return err;
4602     }
4603     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4604     index 4380c8630539..c0ba5206cd9d 100644
4605     --- a/fs/ext4/xattr.c
4606     +++ b/fs/ext4/xattr.c
4607     @@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
4608     struct inode *inode;
4609     int err;
4610    
4611     - inode = ext4_iget(parent->i_sb, ea_ino);
4612     + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
4613     if (IS_ERR(inode)) {
4614     err = PTR_ERR(inode);
4615     ext4_error(parent->i_sb,
4616     @@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4617     ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
4618     name_index, name, buffer, (long)buffer_size);
4619    
4620     - error = -ENODATA;
4621     if (!EXT4_I(inode)->i_file_acl)
4622     - goto cleanup;
4623     + return -ENODATA;
4624     ea_idebug(inode, "reading block %llu",
4625     (unsigned long long)EXT4_I(inode)->i_file_acl);
4626     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4627     - if (!bh)
4628     - goto cleanup;
4629     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4630     + if (IS_ERR(bh))
4631     + return PTR_ERR(bh);
4632     ea_bdebug(bh, "b_count=%d, refcount=%d",
4633     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4634     error = ext4_xattr_check_block(inode, bh);
4635     @@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
4636     ea_idebug(inode, "buffer=%p, buffer_size=%ld",
4637     buffer, (long)buffer_size);
4638    
4639     - error = 0;
4640     if (!EXT4_I(inode)->i_file_acl)
4641     - goto cleanup;
4642     + return 0;
4643     ea_idebug(inode, "reading block %llu",
4644     (unsigned long long)EXT4_I(inode)->i_file_acl);
4645     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4646     - error = -EIO;
4647     - if (!bh)
4648     - goto cleanup;
4649     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4650     + if (IS_ERR(bh))
4651     + return PTR_ERR(bh);
4652     ea_bdebug(bh, "b_count=%d, refcount=%d",
4653     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4654     error = ext4_xattr_check_block(inode, bh);
4655     if (error)
4656     goto cleanup;
4657     ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
4658     - error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
4659     -
4660     + error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
4661     + buffer_size);
4662     cleanup:
4663     brelse(bh);
4664     -
4665     return error;
4666     }
4667    
4668     @@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
4669     }
4670    
4671     if (EXT4_I(inode)->i_file_acl) {
4672     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4673     - if (!bh) {
4674     - ret = -EIO;
4675     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4676     + if (IS_ERR(bh)) {
4677     + ret = PTR_ERR(bh);
4678     goto out;
4679     }
4680    
4681     @@ -1490,7 +1486,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
4682     }
4683    
4684     while (ce) {
4685     - ea_inode = ext4_iget(inode->i_sb, ce->e_value);
4686     + ea_inode = ext4_iget(inode->i_sb, ce->e_value,
4687     + EXT4_IGET_NORMAL);
4688     if (!IS_ERR(ea_inode) &&
4689     !is_bad_inode(ea_inode) &&
4690     (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
4691     @@ -1825,16 +1822,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4692    
4693     if (EXT4_I(inode)->i_file_acl) {
4694     /* The inode already has an extended attribute block. */
4695     - bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
4696     - error = -EIO;
4697     - if (!bs->bh)
4698     - goto cleanup;
4699     + bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4700     + if (IS_ERR(bs->bh))
4701     + return PTR_ERR(bs->bh);
4702     ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
4703     atomic_read(&(bs->bh->b_count)),
4704     le32_to_cpu(BHDR(bs->bh)->h_refcount));
4705     error = ext4_xattr_check_block(inode, bs->bh);
4706     if (error)
4707     - goto cleanup;
4708     + return error;
4709     /* Find the named attribute. */
4710     bs->s.base = BHDR(bs->bh);
4711     bs->s.first = BFIRST(bs->bh);
4712     @@ -1843,13 +1839,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4713     error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
4714     i->name_index, i->name, 1);
4715     if (error && error != -ENODATA)
4716     - goto cleanup;
4717     + return error;
4718     bs->s.not_found = error;
4719     }
4720     - error = 0;
4721     -
4722     -cleanup:
4723     - return error;
4724     + return 0;
4725     }
4726    
4727     static int
4728     @@ -2278,9 +2271,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
4729    
4730     if (!EXT4_I(inode)->i_file_acl)
4731     return NULL;
4732     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4733     - if (!bh)
4734     - return ERR_PTR(-EIO);
4735     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4736     + if (IS_ERR(bh))
4737     + return bh;
4738     error = ext4_xattr_check_block(inode, bh);
4739     if (error) {
4740     brelse(bh);
4741     @@ -2733,7 +2726,7 @@ retry:
4742     base = IFIRST(header);
4743     end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
4744     min_offs = end - base;
4745     - total_ino = sizeof(struct ext4_xattr_ibody_header);
4746     + total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
4747    
4748     error = xattr_check_inode(inode, header, end);
4749     if (error)
4750     @@ -2750,10 +2743,11 @@ retry:
4751     if (EXT4_I(inode)->i_file_acl) {
4752     struct buffer_head *bh;
4753    
4754     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4755     - error = -EIO;
4756     - if (!bh)
4757     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4758     + if (IS_ERR(bh)) {
4759     + error = PTR_ERR(bh);
4760     goto cleanup;
4761     + }
4762     error = ext4_xattr_check_block(inode, bh);
4763     if (error) {
4764     brelse(bh);
4765     @@ -2907,11 +2901,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
4766     }
4767    
4768     if (EXT4_I(inode)->i_file_acl) {
4769     - bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
4770     - if (!bh) {
4771     - EXT4_ERROR_INODE(inode, "block %llu read error",
4772     - EXT4_I(inode)->i_file_acl);
4773     - error = -EIO;
4774     + bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
4775     + if (IS_ERR(bh)) {
4776     + error = PTR_ERR(bh);
4777     + if (error == -EIO)
4778     + EXT4_ERROR_INODE(inode, "block %llu read error",
4779     + EXT4_I(inode)->i_file_acl);
4780     goto cleanup;
4781     }
4782     error = ext4_xattr_check_block(inode, bh);
4783     @@ -3064,8 +3059,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
4784     while (ce) {
4785     struct buffer_head *bh;
4786    
4787     - bh = sb_bread(inode->i_sb, ce->e_value);
4788     - if (!bh) {
4789     + bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
4790     + if (IS_ERR(bh)) {
4791     + if (PTR_ERR(bh) == -ENOMEM)
4792     + return NULL;
4793     EXT4_ERROR_INODE(inode, "block %lu read error",
4794     (unsigned long)ce->e_value);
4795     } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
4796     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4797     index 42ea42acb487..19a0d83aae65 100644
4798     --- a/fs/f2fs/node.c
4799     +++ b/fs/f2fs/node.c
4800     @@ -827,6 +827,7 @@ static int truncate_node(struct dnode_of_data *dn)
4801     struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
4802     struct node_info ni;
4803     int err;
4804     + pgoff_t index;
4805    
4806     err = f2fs_get_node_info(sbi, dn->nid, &ni);
4807     if (err)
4808     @@ -846,10 +847,11 @@ static int truncate_node(struct dnode_of_data *dn)
4809     clear_node_page_dirty(dn->node_page);
4810     set_sbi_flag(sbi, SBI_IS_DIRTY);
4811    
4812     + index = dn->node_page->index;
4813     f2fs_put_page(dn->node_page, 1);
4814    
4815     invalidate_mapping_pages(NODE_MAPPING(sbi),
4816     - dn->node_page->index, dn->node_page->index);
4817     + index, index);
4818    
4819     dn->node_page = NULL;
4820     trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
4821     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4822     index 287c9fe9fff9..338138b34993 100644
4823     --- a/fs/f2fs/super.c
4824     +++ b/fs/f2fs/super.c
4825     @@ -2267,10 +2267,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4826     return 1;
4827     }
4828    
4829     - if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
4830     + if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
4831     f2fs_msg(sb, KERN_INFO,
4832     - "Wrong segment_count / block_count (%u > %u)",
4833     - segment_count, le32_to_cpu(raw_super->block_count));
4834     + "Wrong segment_count / block_count (%u > %llu)",
4835     + segment_count, le64_to_cpu(raw_super->block_count));
4836     return 1;
4837     }
4838    
4839     diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
4840     index 77a010e625f5..087e53a2d96c 100644
4841     --- a/fs/f2fs/xattr.c
4842     +++ b/fs/f2fs/xattr.c
4843     @@ -291,7 +291,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
4844     static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4845     unsigned int index, unsigned int len,
4846     const char *name, struct f2fs_xattr_entry **xe,
4847     - void **base_addr)
4848     + void **base_addr, int *base_size)
4849     {
4850     void *cur_addr, *txattr_addr, *last_addr = NULL;
4851     nid_t xnid = F2FS_I(inode)->i_xattr_nid;
4852     @@ -302,8 +302,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4853     if (!size && !inline_size)
4854     return -ENODATA;
4855    
4856     - txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
4857     - inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
4858     + *base_size = inline_size + size + XATTR_PADDING_SIZE;
4859     + txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
4860     if (!txattr_addr)
4861     return -ENOMEM;
4862    
4863     @@ -315,8 +315,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
4864    
4865     *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
4866     index, len, name);
4867     - if (*xe)
4868     + if (*xe) {
4869     + *base_size = inline_size;
4870     goto check;
4871     + }
4872     }
4873    
4874     /* read from xattr node block */
4875     @@ -477,6 +479,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4876     int error = 0;
4877     unsigned int size, len;
4878     void *base_addr = NULL;
4879     + int base_size;
4880    
4881     if (name == NULL)
4882     return -EINVAL;
4883     @@ -487,7 +490,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4884    
4885     down_read(&F2FS_I(inode)->i_xattr_sem);
4886     error = lookup_all_xattrs(inode, ipage, index, len, name,
4887     - &entry, &base_addr);
4888     + &entry, &base_addr, &base_size);
4889     up_read(&F2FS_I(inode)->i_xattr_sem);
4890     if (error)
4891     return error;
4892     @@ -501,6 +504,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
4893    
4894     if (buffer) {
4895     char *pval = entry->e_name + entry->e_name_len;
4896     +
4897     + if (base_size - (pval - (char *)base_addr) < size) {
4898     + error = -ERANGE;
4899     + goto out;
4900     + }
4901     memcpy(buffer, pval, size);
4902     }
4903     error = size;
4904     diff --git a/include/linux/msi.h b/include/linux/msi.h
4905     index 5839d8062dfc..be8ec813dbfb 100644
4906     --- a/include/linux/msi.h
4907     +++ b/include/linux/msi.h
4908     @@ -116,6 +116,8 @@ struct msi_desc {
4909     list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
4910     #define for_each_msi_entry(desc, dev) \
4911     list_for_each_entry((desc), dev_to_msi_list((dev)), list)
4912     +#define for_each_msi_entry_safe(desc, tmp, dev) \
4913     + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
4914    
4915     #ifdef CONFIG_PCI_MSI
4916     #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
4917     diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
4918     index 6894976b54e3..186cd8e970c7 100644
4919     --- a/include/linux/ptr_ring.h
4920     +++ b/include/linux/ptr_ring.h
4921     @@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
4922     else if (destroy)
4923     destroy(ptr);
4924    
4925     + if (producer >= size)
4926     + producer = 0;
4927     __ptr_ring_set_size(r, size);
4928     r->producer = producer;
4929     r->consumer_head = 0;
4930     diff --git a/include/media/cec.h b/include/media/cec.h
4931     index 9b7394a74dca..dc4b412e8fa1 100644
4932     --- a/include/media/cec.h
4933     +++ b/include/media/cec.h
4934     @@ -155,6 +155,7 @@ struct cec_adapter {
4935     unsigned int transmit_queue_sz;
4936     struct list_head wait_queue;
4937     struct cec_data *transmitting;
4938     + bool transmit_in_progress;
4939    
4940     struct task_struct *kthread_config;
4941     struct completion config_completion;
4942     diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
4943     index b0d022ff6ea1..e11423530d64 100644
4944     --- a/include/net/ip_tunnels.h
4945     +++ b/include/net/ip_tunnels.h
4946     @@ -326,6 +326,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
4947     int ip_tunnel_encap_setup(struct ip_tunnel *t,
4948     struct ip_tunnel_encap *ipencap);
4949    
4950     +static inline bool pskb_inet_may_pull(struct sk_buff *skb)
4951     +{
4952     + int nhlen;
4953     +
4954     + switch (skb->protocol) {
4955     +#if IS_ENABLED(CONFIG_IPV6)
4956     + case htons(ETH_P_IPV6):
4957     + nhlen = sizeof(struct ipv6hdr);
4958     + break;
4959     +#endif
4960     + case htons(ETH_P_IP):
4961     + nhlen = sizeof(struct iphdr);
4962     + break;
4963     + default:
4964     + nhlen = 0;
4965     + }
4966     +
4967     + return pskb_network_may_pull(skb, nhlen);
4968     +}
4969     +
4970     static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
4971     {
4972     const struct ip_tunnel_encap_ops *ops;
4973     diff --git a/include/net/sock.h b/include/net/sock.h
4974     index f18dbd6da906..6cb5a545df7d 100644
4975     --- a/include/net/sock.h
4976     +++ b/include/net/sock.h
4977     @@ -298,6 +298,7 @@ struct sock_common {
4978     * @sk_filter: socket filtering instructions
4979     * @sk_timer: sock cleanup timer
4980     * @sk_stamp: time stamp of last packet received
4981     + * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
4982     * @sk_tsflags: SO_TIMESTAMPING socket options
4983     * @sk_tskey: counter to disambiguate concurrent tstamp requests
4984     * @sk_zckey: counter to order MSG_ZEROCOPY notifications
4985     @@ -474,6 +475,9 @@ struct sock {
4986     const struct cred *sk_peer_cred;
4987     long sk_rcvtimeo;
4988     ktime_t sk_stamp;
4989     +#if BITS_PER_LONG==32
4990     + seqlock_t sk_stamp_seq;
4991     +#endif
4992     u16 sk_tsflags;
4993     u8 sk_shutdown;
4994     u32 sk_tskey;
4995     @@ -2290,6 +2294,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
4996     atomic_add(segs, &sk->sk_drops);
4997     }
4998    
4999     +static inline ktime_t sock_read_timestamp(struct sock *sk)
5000     +{
5001     +#if BITS_PER_LONG==32
5002     + unsigned int seq;
5003     + ktime_t kt;
5004     +
5005     + do {
5006     + seq = read_seqbegin(&sk->sk_stamp_seq);
5007     + kt = sk->sk_stamp;
5008     + } while (read_seqretry(&sk->sk_stamp_seq, seq));
5009     +
5010     + return kt;
5011     +#else
5012     + return sk->sk_stamp;
5013     +#endif
5014     +}
5015     +
5016     +static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
5017     +{
5018     +#if BITS_PER_LONG==32
5019     + write_seqlock(&sk->sk_stamp_seq);
5020     + sk->sk_stamp = kt;
5021     + write_sequnlock(&sk->sk_stamp_seq);
5022     +#else
5023     + sk->sk_stamp = kt;
5024     +#endif
5025     +}
5026     +
5027     void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
5028     struct sk_buff *skb);
5029     void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
5030     @@ -2314,7 +2346,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
5031     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
5032     __sock_recv_timestamp(msg, sk, skb);
5033     else
5034     - sk->sk_stamp = kt;
5035     + sock_write_timestamp(sk, kt);
5036    
5037     if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
5038     __sock_recv_wifi_status(msg, sk, skb);
5039     @@ -2335,9 +2367,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
5040     if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
5041     __sock_recv_ts_and_drops(msg, sk, skb);
5042     else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
5043     - sk->sk_stamp = skb->tstamp;
5044     + sock_write_timestamp(sk, skb->tstamp);
5045     else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
5046     - sk->sk_stamp = 0;
5047     + sock_write_timestamp(sk, 0);
5048     }
5049    
5050     void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
5051     diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
5052     index 0e31eb136c57..0dfb174f707e 100644
5053     --- a/include/trace/events/ext4.h
5054     +++ b/include/trace/events/ext4.h
5055     @@ -225,6 +225,26 @@ TRACE_EVENT(ext4_drop_inode,
5056     (unsigned long) __entry->ino, __entry->drop)
5057     );
5058    
5059     +TRACE_EVENT(ext4_nfs_commit_metadata,
5060     + TP_PROTO(struct inode *inode),
5061     +
5062     + TP_ARGS(inode),
5063     +
5064     + TP_STRUCT__entry(
5065     + __field( dev_t, dev )
5066     + __field( ino_t, ino )
5067     + ),
5068     +
5069     + TP_fast_assign(
5070     + __entry->dev = inode->i_sb->s_dev;
5071     + __entry->ino = inode->i_ino;
5072     + ),
5073     +
5074     + TP_printk("dev %d,%d ino %lu",
5075     + MAJOR(__entry->dev), MINOR(__entry->dev),
5076     + (unsigned long) __entry->ino)
5077     +);
5078     +
5079     TRACE_EVENT(ext4_mark_inode_dirty,
5080     TP_PROTO(struct inode *inode, unsigned long IP),
5081    
5082     diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
5083     index 97ff3c17ec4d..e5b39721c6e4 100644
5084     --- a/include/uapi/linux/net_tstamp.h
5085     +++ b/include/uapi/linux/net_tstamp.h
5086     @@ -155,8 +155,8 @@ enum txtime_flags {
5087     };
5088    
5089     struct sock_txtime {
5090     - clockid_t clockid; /* reference clockid */
5091     - __u32 flags; /* as defined by enum txtime_flags */
5092     + __kernel_clockid_t clockid;/* reference clockid */
5093     + __u32 flags; /* as defined by enum txtime_flags */
5094     };
5095    
5096     #endif /* _NET_TIMESTAMPING_H */
5097     diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
5098     index 4a3dae2a8283..1aa517908561 100644
5099     --- a/kernel/cgroup/cgroup.c
5100     +++ b/kernel/cgroup/cgroup.c
5101     @@ -4186,20 +4186,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
5102    
5103     lockdep_assert_held(&css_set_lock);
5104     repeat:
5105     - /*
5106     - * Advance iterator to find next entry. cset->tasks is consumed
5107     - * first and then ->mg_tasks. After ->mg_tasks, we move onto the
5108     - * next cset.
5109     - */
5110     - next = it->task_pos->next;
5111     + if (it->task_pos) {
5112     + /*
5113     + * Advance iterator to find next entry. cset->tasks is
5114     + * consumed first and then ->mg_tasks. After ->mg_tasks,
5115     + * we move onto the next cset.
5116     + */
5117     + next = it->task_pos->next;
5118    
5119     - if (next == it->tasks_head)
5120     - next = it->mg_tasks_head->next;
5121     + if (next == it->tasks_head)
5122     + next = it->mg_tasks_head->next;
5123    
5124     - if (next == it->mg_tasks_head)
5125     + if (next == it->mg_tasks_head)
5126     + css_task_iter_advance_css_set(it);
5127     + else
5128     + it->task_pos = next;
5129     + } else {
5130     + /* called from start, proceed to the first cset */
5131     css_task_iter_advance_css_set(it);
5132     - else
5133     - it->task_pos = next;
5134     + }
5135    
5136     /* if PROCS, skip over tasks which aren't group leaders */
5137     if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
5138     @@ -4239,7 +4244,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
5139    
5140     it->cset_head = it->cset_pos;
5141    
5142     - css_task_iter_advance_css_set(it);
5143     + css_task_iter_advance(it);
5144    
5145     spin_unlock_irq(&css_set_lock);
5146     }
5147     diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
5148     index c603d33d5410..5d01edf8d819 100644
5149     --- a/net/ax25/af_ax25.c
5150     +++ b/net/ax25/af_ax25.c
5151     @@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
5152     break;
5153     }
5154    
5155     - dev = dev_get_by_name(&init_net, devname);
5156     + rtnl_lock();
5157     + dev = __dev_get_by_name(&init_net, devname);
5158     if (!dev) {
5159     + rtnl_unlock();
5160     res = -ENODEV;
5161     break;
5162     }
5163    
5164     ax25->ax25_dev = ax25_dev_ax25dev(dev);
5165     + if (!ax25->ax25_dev) {
5166     + rtnl_unlock();
5167     + res = -ENODEV;
5168     + break;
5169     + }
5170     ax25_fillin_cb(ax25, ax25->ax25_dev);
5171     - dev_put(dev);
5172     + rtnl_unlock();
5173     break;
5174    
5175     default:
5176     diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
5177     index 9a3a301e1e2f..d92195cd7834 100644
5178     --- a/net/ax25/ax25_dev.c
5179     +++ b/net/ax25/ax25_dev.c
5180     @@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
5181     if ((s = ax25_dev_list) == ax25_dev) {
5182     ax25_dev_list = s->next;
5183     spin_unlock_bh(&ax25_dev_lock);
5184     + dev->ax25_ptr = NULL;
5185     dev_put(dev);
5186     kfree(ax25_dev);
5187     return;
5188     @@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
5189     if (s->next == ax25_dev) {
5190     s->next = ax25_dev->next;
5191     spin_unlock_bh(&ax25_dev_lock);
5192     + dev->ax25_ptr = NULL;
5193     dev_put(dev);
5194     kfree(ax25_dev);
5195     return;
5196     diff --git a/net/compat.c b/net/compat.c
5197     index 3b2105f6549d..3c4b0283b29a 100644
5198     --- a/net/compat.c
5199     +++ b/net/compat.c
5200     @@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5201     ctv = (struct compat_timeval __user *) userstamp;
5202     err = -ENOENT;
5203     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5204     - tv = ktime_to_timeval(sk->sk_stamp);
5205     + tv = ktime_to_timeval(sock_read_timestamp(sk));
5206     +
5207     if (tv.tv_sec == -1)
5208     return err;
5209     if (tv.tv_sec == 0) {
5210     - sk->sk_stamp = ktime_get_real();
5211     - tv = ktime_to_timeval(sk->sk_stamp);
5212     + ktime_t kt = ktime_get_real();
5213     + sock_write_timestamp(sk, kt);
5214     + tv = ktime_to_timeval(kt);
5215     }
5216     err = 0;
5217     if (put_user(tv.tv_sec, &ctv->tv_sec) ||
5218     @@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
5219     ctv = (struct compat_timespec __user *) userstamp;
5220     err = -ENOENT;
5221     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5222     - ts = ktime_to_timespec(sk->sk_stamp);
5223     + ts = ktime_to_timespec(sock_read_timestamp(sk));
5224     if (ts.tv_sec == -1)
5225     return err;
5226     if (ts.tv_sec == 0) {
5227     - sk->sk_stamp = ktime_get_real();
5228     - ts = ktime_to_timespec(sk->sk_stamp);
5229     + ktime_t kt = ktime_get_real();
5230     + sock_write_timestamp(sk, kt);
5231     + ts = ktime_to_timespec(kt);
5232     }
5233     err = 0;
5234     if (put_user(ts.tv_sec, &ctv->tv_sec) ||
5235     diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
5236     index 4b54e5f107c6..acf45ddbe924 100644
5237     --- a/net/core/gro_cells.c
5238     +++ b/net/core/gro_cells.c
5239     @@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
5240     for_each_possible_cpu(i) {
5241     struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
5242    
5243     + napi_disable(&cell->napi);
5244     netif_napi_del(&cell->napi);
5245     __skb_queue_purge(&cell->napi_skbs);
5246     }
5247     diff --git a/net/core/sock.c b/net/core/sock.c
5248     index 748765e35423..5a8a3b76832f 100644
5249     --- a/net/core/sock.c
5250     +++ b/net/core/sock.c
5251     @@ -2803,6 +2803,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
5252     sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
5253    
5254     sk->sk_stamp = SK_DEFAULT_STAMP;
5255     +#if BITS_PER_LONG==32
5256     + seqlock_init(&sk->sk_stamp_seq);
5257     +#endif
5258     atomic_set(&sk->sk_zckey, 0);
5259    
5260     #ifdef CONFIG_NET_RX_BUSY_POLL
5261     @@ -2902,12 +2905,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
5262     struct timeval tv;
5263    
5264     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5265     - tv = ktime_to_timeval(sk->sk_stamp);
5266     + tv = ktime_to_timeval(sock_read_timestamp(sk));
5267     if (tv.tv_sec == -1)
5268     return -ENOENT;
5269     if (tv.tv_sec == 0) {
5270     - sk->sk_stamp = ktime_get_real();
5271     - tv = ktime_to_timeval(sk->sk_stamp);
5272     + ktime_t kt = ktime_get_real();
5273     + sock_write_timestamp(sk, kt);
5274     + tv = ktime_to_timeval(kt);
5275     }
5276     return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
5277     }
5278     @@ -2918,11 +2922,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
5279     struct timespec ts;
5280    
5281     sock_enable_timestamp(sk, SOCK_TIMESTAMP);
5282     - ts = ktime_to_timespec(sk->sk_stamp);
5283     + ts = ktime_to_timespec(sock_read_timestamp(sk));
5284     if (ts.tv_sec == -1)
5285     return -ENOENT;
5286     if (ts.tv_sec == 0) {
5287     - sk->sk_stamp = ktime_get_real();
5288     + ktime_t kt = ktime_get_real();
5289     + sock_write_timestamp(sk, kt);
5290     ts = ktime_to_timespec(sk->sk_stamp);
5291     }
5292     return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
5293     diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
5294     index ca53efa17be1..8bec827081cd 100644
5295     --- a/net/ieee802154/6lowpan/tx.c
5296     +++ b/net/ieee802154/6lowpan/tx.c
5297     @@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
5298     const struct ipv6hdr *hdr = ipv6_hdr(skb);
5299     struct neighbour *n;
5300    
5301     + if (!daddr)
5302     + return -EINVAL;
5303     +
5304     /* TODO:
5305     * if this package isn't ipv6 one, where should it be routed?
5306     */
5307     diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
5308     index 4e5bc4b2f14e..1a4e9ff02762 100644
5309     --- a/net/ipv4/inet_diag.c
5310     +++ b/net/ipv4/inet_diag.c
5311     @@ -998,7 +998,9 @@ next_chunk:
5312     if (!inet_diag_bc_sk(bc, sk))
5313     goto next_normal;
5314    
5315     - sock_hold(sk);
5316     + if (!refcount_inc_not_zero(&sk->sk_refcnt))
5317     + goto next_normal;
5318     +
5319     num_arr[accum] = num;
5320     sk_arr[accum] = sk;
5321     if (++accum == SKARR_SZ)
5322     diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
5323     index 32662e9e5d21..d5984d31ab93 100644
5324     --- a/net/ipv4/ip_forward.c
5325     +++ b/net/ipv4/ip_forward.c
5326     @@ -72,6 +72,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
5327     if (unlikely(opt->optlen))
5328     ip_forward_options(skb);
5329    
5330     + skb->tstamp = 0;
5331     return dst_output(net, sk, skb);
5332     }
5333    
5334     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
5335     index f686d7761acb..f8bbd693c19c 100644
5336     --- a/net/ipv4/ip_fragment.c
5337     +++ b/net/ipv4/ip_fragment.c
5338     @@ -347,10 +347,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5339     struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
5340     struct rb_node **rbn, *parent;
5341     struct sk_buff *skb1, *prev_tail;
5342     + int ihl, end, skb1_run_end;
5343     struct net_device *dev;
5344     unsigned int fragsize;
5345     int flags, offset;
5346     - int ihl, end;
5347     int err = -ENOENT;
5348     u8 ecn;
5349    
5350     @@ -420,7 +420,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5351     * overlapping fragment, the entire datagram (and any constituent
5352     * fragments) MUST be silently discarded.
5353     *
5354     - * We do the same here for IPv4 (and increment an snmp counter).
5355     + * We do the same here for IPv4 (and increment an snmp counter) but
5356     + * we do not want to drop the whole queue in response to a duplicate
5357     + * fragment.
5358     */
5359    
5360     /* Find out where to put this fragment. */
5361     @@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
5362     do {
5363     parent = *rbn;
5364     skb1 = rb_to_skb(parent);
5365     + skb1_run_end = skb1->ip_defrag_offset +
5366     + FRAG_CB(skb1)->frag_run_len;
5367     if (end <= skb1->ip_defrag_offset)
5368     rbn = &parent->rb_left;
5369     - else if (offset >= skb1->ip_defrag_offset +
5370     - FRAG_CB(skb1)->frag_run_len)
5371     + else if (offset >= skb1_run_end)
5372     rbn = &parent->rb_right;
5373     - else /* Found an overlap with skb1. */
5374     - goto discard_qp;
5375     + else if (offset >= skb1->ip_defrag_offset &&
5376     + end <= skb1_run_end)
5377     + goto err; /* No new data, potential duplicate */
5378     + else
5379     + goto discard_qp; /* Found an overlap */
5380     } while (*rbn);
5381     /* Here we have parent properly set, and rbn pointing to
5382     * one of its NULL left/right children. Insert skb.
5383     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5384     index 8cce0e9ea08c..5ef5df3a06f1 100644
5385     --- a/net/ipv4/ip_gre.c
5386     +++ b/net/ipv4/ip_gre.c
5387     @@ -677,6 +677,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
5388     struct ip_tunnel *tunnel = netdev_priv(dev);
5389     const struct iphdr *tnl_params;
5390    
5391     + if (!pskb_inet_may_pull(skb))
5392     + goto free_skb;
5393     +
5394     if (tunnel->collect_md) {
5395     gre_fb_xmit(skb, dev, skb->protocol);
5396     return NETDEV_TX_OK;
5397     @@ -720,6 +723,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
5398     struct ip_tunnel *tunnel = netdev_priv(dev);
5399     bool truncate = false;
5400    
5401     + if (!pskb_inet_may_pull(skb))
5402     + goto free_skb;
5403     +
5404     if (tunnel->collect_md) {
5405     erspan_fb_xmit(skb, dev, skb->protocol);
5406     return NETDEV_TX_OK;
5407     @@ -763,6 +769,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
5408     {
5409     struct ip_tunnel *tunnel = netdev_priv(dev);
5410    
5411     + if (!pskb_inet_may_pull(skb))
5412     + goto free_skb;
5413     +
5414     if (tunnel->collect_md) {
5415     gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
5416     return NETDEV_TX_OK;
5417     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
5418     index 284a22154b4e..c4f5602308ed 100644
5419     --- a/net/ipv4/ip_tunnel.c
5420     +++ b/net/ipv4/ip_tunnel.c
5421     @@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5422     const struct iphdr *tnl_params, u8 protocol)
5423     {
5424     struct ip_tunnel *tunnel = netdev_priv(dev);
5425     - unsigned int inner_nhdr_len = 0;
5426     const struct iphdr *inner_iph;
5427     struct flowi4 fl4;
5428     u8 tos, ttl;
5429     @@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
5430     __be32 dst;
5431     bool connected;
5432    
5433     - /* ensure we can access the inner net header, for several users below */
5434     - if (skb->protocol == htons(ETH_P_IP))
5435     - inner_nhdr_len = sizeof(struct iphdr);
5436     - else if (skb->protocol == htons(ETH_P_IPV6))
5437     - inner_nhdr_len = sizeof(struct ipv6hdr);
5438     - if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
5439     - goto tx_error;
5440     -
5441     inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
5442     connected = (tunnel->parms.iph.daddr != 0);
5443    
5444     diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5445     index f38cb21d773d..7f56944b020f 100644
5446     --- a/net/ipv4/ip_vti.c
5447     +++ b/net/ipv4/ip_vti.c
5448     @@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5449     struct ip_tunnel *tunnel = netdev_priv(dev);
5450     struct flowi fl;
5451    
5452     + if (!pskb_inet_may_pull(skb))
5453     + goto tx_err;
5454     +
5455     memset(&fl, 0, sizeof(fl));
5456    
5457     switch (skb->protocol) {
5458     @@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
5459     memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
5460     break;
5461     default:
5462     - dev->stats.tx_errors++;
5463     - dev_kfree_skb(skb);
5464     - return NETDEV_TX_OK;
5465     + goto tx_err;
5466     }
5467    
5468     /* override mark with tunnel output key */
5469     fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
5470    
5471     return vti_xmit(skb, dev, &fl);
5472     +
5473     +tx_err:
5474     + dev->stats.tx_errors++;
5475     + kfree_skb(skb);
5476     + return NETDEV_TX_OK;
5477     }
5478    
5479     static int vti4_err(struct sk_buff *skb, u32 info)
5480     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
5481     index 5660adcf7a04..f6275aa19b6a 100644
5482     --- a/net/ipv4/ipmr.c
5483     +++ b/net/ipv4/ipmr.c
5484     @@ -69,6 +69,8 @@
5485     #include <net/nexthop.h>
5486     #include <net/switchdev.h>
5487    
5488     +#include <linux/nospec.h>
5489     +
5490     struct ipmr_rule {
5491     struct fib_rule common;
5492     };
5493     @@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
5494     return -EFAULT;
5495     if (vr.vifi >= mrt->maxvif)
5496     return -EINVAL;
5497     + vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
5498     read_lock(&mrt_lock);
5499     vif = &mrt->vif_table[vr.vifi];
5500     if (VIF_EXISTS(mrt, vr.vifi)) {
5501     @@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
5502     return -EFAULT;
5503     if (vr.vifi >= mrt->maxvif)
5504     return -EINVAL;
5505     + vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
5506     read_lock(&mrt_lock);
5507     vif = &mrt->vif_table[vr.vifi];
5508     if (VIF_EXISTS(mrt, vr.vifi)) {
5509     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5510     index 4e81ff2f4588..3dfc50cd86d6 100644
5511     --- a/net/ipv6/addrconf.c
5512     +++ b/net/ipv6/addrconf.c
5513     @@ -4711,8 +4711,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
5514     IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
5515    
5516     idev = ipv6_find_idev(dev);
5517     - if (IS_ERR(idev))
5518     - return PTR_ERR(idev);
5519     + if (!idev)
5520     + return -ENOBUFS;
5521    
5522     if (!ipv6_allow_optimistic_dad(net, idev))
5523     cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
5524     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5525     index e493b041d4ac..c270726b01b0 100644
5526     --- a/net/ipv6/ip6_gre.c
5527     +++ b/net/ipv6/ip6_gre.c
5528     @@ -897,6 +897,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
5529     struct net_device_stats *stats = &t->dev->stats;
5530     int ret;
5531    
5532     + if (!pskb_inet_may_pull(skb))
5533     + goto tx_err;
5534     +
5535     if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5536     goto tx_err;
5537    
5538     @@ -939,6 +942,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5539     int nhoff;
5540     int thoff;
5541    
5542     + if (!pskb_inet_may_pull(skb))
5543     + goto tx_err;
5544     +
5545     if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
5546     goto tx_err;
5547    
5548     @@ -1011,8 +1017,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5549     goto tx_err;
5550     }
5551     } else {
5552     - struct ipv6hdr *ipv6h = ipv6_hdr(skb);
5553     -
5554     switch (skb->protocol) {
5555     case htons(ETH_P_IP):
5556     memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5557     @@ -1020,7 +1024,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
5558     &dsfield, &encap_limit);
5559     break;
5560     case htons(ETH_P_IPV6):
5561     - if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
5562     + if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
5563     goto tx_err;
5564     if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
5565     &dsfield, &encap_limit))
5566     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5567     index 2694def1e72c..0bb87f3a10c7 100644
5568     --- a/net/ipv6/ip6_output.c
5569     +++ b/net/ipv6/ip6_output.c
5570     @@ -378,6 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
5571     __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
5572     __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
5573    
5574     + skb->tstamp = 0;
5575     return dst_output(net, sk, skb);
5576     }
5577    
5578     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
5579     index a9d06d4dd057..0c6403cf8b52 100644
5580     --- a/net/ipv6/ip6_tunnel.c
5581     +++ b/net/ipv6/ip6_tunnel.c
5582     @@ -901,6 +901,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
5583     goto drop;
5584     if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
5585     goto drop;
5586     + ipv6h = ipv6_hdr(skb);
5587     if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
5588     goto drop;
5589     if (iptunnel_pull_header(skb, 0, tpi->proto, false))
5590     @@ -1242,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5591     u8 tproto;
5592     int err;
5593    
5594     - /* ensure we can access the full inner ip header */
5595     - if (!pskb_may_pull(skb, sizeof(struct iphdr)))
5596     - return -1;
5597     -
5598     iph = ip_hdr(skb);
5599     memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
5600    
5601     @@ -1320,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5602     u8 tproto;
5603     int err;
5604    
5605     - if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
5606     - return -1;
5607     -
5608     ipv6h = ipv6_hdr(skb);
5609     tproto = READ_ONCE(t->parms.proto);
5610     if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
5611     @@ -1404,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
5612     struct net_device_stats *stats = &t->dev->stats;
5613     int ret;
5614    
5615     + if (!pskb_inet_may_pull(skb))
5616     + goto tx_err;
5617     +
5618     switch (skb->protocol) {
5619     case htons(ETH_P_IP):
5620     ret = ip4ip6_tnl_xmit(skb, dev);
5621     diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
5622     index b283f293ee4a..caad40d6e74d 100644
5623     --- a/net/ipv6/ip6_udp_tunnel.c
5624     +++ b/net/ipv6/ip6_udp_tunnel.c
5625     @@ -15,7 +15,7 @@
5626     int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
5627     struct socket **sockp)
5628     {
5629     - struct sockaddr_in6 udp6_addr;
5630     + struct sockaddr_in6 udp6_addr = {};
5631     int err;
5632     struct socket *sock = NULL;
5633    
5634     @@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
5635     goto error;
5636    
5637     if (cfg->peer_udp_port) {
5638     + memset(&udp6_addr, 0, sizeof(udp6_addr));
5639     udp6_addr.sin6_family = AF_INET6;
5640     memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
5641     sizeof(udp6_addr.sin6_addr));
5642     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5643     index eeaf7455d51e..8b6eefff2f7e 100644
5644     --- a/net/ipv6/ip6_vti.c
5645     +++ b/net/ipv6/ip6_vti.c
5646     @@ -318,6 +318,7 @@ static int vti6_rcv(struct sk_buff *skb)
5647     return 0;
5648     }
5649    
5650     + ipv6h = ipv6_hdr(skb);
5651     if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
5652     t->dev->stats.rx_dropped++;
5653     rcu_read_unlock();
5654     @@ -521,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
5655     {
5656     struct ip6_tnl *t = netdev_priv(dev);
5657     struct net_device_stats *stats = &t->dev->stats;
5658     - struct ipv6hdr *ipv6h;
5659     struct flowi fl;
5660     int ret;
5661    
5662     + if (!pskb_inet_may_pull(skb))
5663     + goto tx_err;
5664     +
5665     memset(&fl, 0, sizeof(fl));
5666    
5667     switch (skb->protocol) {
5668     case htons(ETH_P_IPV6):
5669     - ipv6h = ipv6_hdr(skb);
5670     -
5671     if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
5672     - vti6_addr_conflict(t, ipv6h))
5673     + vti6_addr_conflict(t, ipv6_hdr(skb)))
5674     goto tx_err;
5675    
5676     xfrm_decode_session(skb, &fl, AF_INET6);
5677     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
5678     index d0b7e0249c13..331e6b6dd252 100644
5679     --- a/net/ipv6/ip6mr.c
5680     +++ b/net/ipv6/ip6mr.c
5681     @@ -51,6 +51,9 @@
5682     #include <linux/export.h>
5683     #include <net/ip6_checksum.h>
5684     #include <linux/netconf.h>
5685     +#include <net/ip_tunnels.h>
5686     +
5687     +#include <linux/nospec.h>
5688    
5689     struct ip6mr_rule {
5690     struct fib_rule common;
5691     @@ -591,13 +594,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5692     .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
5693     .flowi6_mark = skb->mark,
5694     };
5695     - int err;
5696    
5697     - err = ip6mr_fib_lookup(net, &fl6, &mrt);
5698     - if (err < 0) {
5699     - kfree_skb(skb);
5700     - return err;
5701     - }
5702     + if (!pskb_inet_may_pull(skb))
5703     + goto tx_err;
5704     +
5705     + if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
5706     + goto tx_err;
5707    
5708     read_lock(&mrt_lock);
5709     dev->stats.tx_bytes += skb->len;
5710     @@ -606,6 +608,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
5711     read_unlock(&mrt_lock);
5712     kfree_skb(skb);
5713     return NETDEV_TX_OK;
5714     +
5715     +tx_err:
5716     + dev->stats.tx_errors++;
5717     + kfree_skb(skb);
5718     + return NETDEV_TX_OK;
5719     }
5720    
5721     static int reg_vif_get_iflink(const struct net_device *dev)
5722     @@ -1831,6 +1838,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
5723     return -EFAULT;
5724     if (vr.mifi >= mrt->maxvif)
5725     return -EINVAL;
5726     + vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
5727     read_lock(&mrt_lock);
5728     vif = &mrt->vif_table[vr.mifi];
5729     if (VIF_EXISTS(mrt, vr.mifi)) {
5730     @@ -1905,6 +1913,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
5731     return -EFAULT;
5732     if (vr.mifi >= mrt->maxvif)
5733     return -EINVAL;
5734     + vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
5735     read_lock(&mrt_lock);
5736     vif = &mrt->vif_table[vr.mifi];
5737     if (VIF_EXISTS(mrt, vr.mifi)) {
5738     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
5739     index d3fd2d7e5aa4..7c943392c128 100644
5740     --- a/net/ipv6/reassembly.c
5741     +++ b/net/ipv6/reassembly.c
5742     @@ -384,6 +384,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5743     if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
5744     kfree_skb_partial(fp, headstolen);
5745     } else {
5746     + fp->sk = NULL;
5747     if (!skb_shinfo(head)->frag_list)
5748     skb_shinfo(head)->frag_list = fp;
5749     head->data_len += fp->len;
5750     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5751     index a33681dc4796..08c4516ae4a4 100644
5752     --- a/net/ipv6/route.c
5753     +++ b/net/ipv6/route.c
5754     @@ -210,7 +210,9 @@ struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
5755     n = __ipv6_neigh_lookup(dev, daddr);
5756     if (n)
5757     return n;
5758     - return neigh_create(&nd_tbl, daddr, dev);
5759     +
5760     + n = neigh_create(&nd_tbl, daddr, dev);
5761     + return IS_ERR(n) ? NULL : n;
5762     }
5763    
5764     static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
5765     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
5766     index e9400ffa7875..eb162bd0e041 100644
5767     --- a/net/ipv6/sit.c
5768     +++ b/net/ipv6/sit.c
5769     @@ -1021,6 +1021,9 @@ tx_error:
5770     static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
5771     struct net_device *dev)
5772     {
5773     + if (!pskb_inet_may_pull(skb))
5774     + goto tx_err;
5775     +
5776     switch (skb->protocol) {
5777     case htons(ETH_P_IP):
5778     sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
5779     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
5780     index 03f37c4e64fe..1d3144d19903 100644
5781     --- a/net/netrom/af_netrom.c
5782     +++ b/net/netrom/af_netrom.c
5783     @@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
5784     sk_for_each(s, &nr_list)
5785     if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
5786     s->sk_state == TCP_LISTEN) {
5787     - bh_lock_sock(s);
5788     + sock_hold(s);
5789     goto found;
5790     }
5791     s = NULL;
5792     @@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
5793     struct nr_sock *nr = nr_sk(s);
5794    
5795     if (nr->my_index == index && nr->my_id == id) {
5796     - bh_lock_sock(s);
5797     + sock_hold(s);
5798     goto found;
5799     }
5800     }
5801     @@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
5802    
5803     if (nr->your_index == index && nr->your_id == id &&
5804     !ax25cmp(&nr->dest_addr, dest)) {
5805     - bh_lock_sock(s);
5806     + sock_hold(s);
5807     goto found;
5808     }
5809     }
5810     @@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
5811     if (i != 0 && j != 0) {
5812     if ((sk=nr_find_socket(i, j)) == NULL)
5813     break;
5814     - bh_unlock_sock(sk);
5815     + sock_put(sk);
5816     }
5817    
5818     id++;
5819     @@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5820     }
5821    
5822     if (sk != NULL) {
5823     + bh_lock_sock(sk);
5824     skb_reset_transport_header(skb);
5825    
5826     if (frametype == NR_CONNACK && skb->len == 22)
5827     @@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5828    
5829     ret = nr_process_rx_frame(sk, skb);
5830     bh_unlock_sock(sk);
5831     + sock_put(sk);
5832     return ret;
5833     }
5834    
5835     @@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5836     (make = nr_make_new(sk)) == NULL) {
5837     nr_transmit_refusal(skb, 0);
5838     if (sk)
5839     - bh_unlock_sock(sk);
5840     + sock_put(sk);
5841     return 0;
5842     }
5843    
5844     + bh_lock_sock(sk);
5845     +
5846     window = skb->data[20];
5847    
5848     skb->sk = make;
5849     @@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
5850     sk->sk_data_ready(sk);
5851    
5852     bh_unlock_sock(sk);
5853     + sock_put(sk);
5854    
5855     nr_insert_socket(make);
5856    
5857     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5858     index 6477b131e809..0541cfc93440 100644
5859     --- a/net/packet/af_packet.c
5860     +++ b/net/packet/af_packet.c
5861     @@ -2625,8 +2625,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
5862     sll_addr)))
5863     goto out;
5864     proto = saddr->sll_protocol;
5865     - addr = saddr->sll_addr;
5866     + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5867     dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
5868     + if (addr && dev && saddr->sll_halen < dev->addr_len)
5869     + goto out;
5870     }
5871    
5872     err = -ENXIO;
5873     @@ -2823,8 +2825,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
5874     if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
5875     goto out;
5876     proto = saddr->sll_protocol;
5877     - addr = saddr->sll_addr;
5878     + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
5879     dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
5880     + if (addr && dev && saddr->sll_halen < dev->addr_len)
5881     + goto out;
5882     }
5883    
5884     err = -ENXIO;
5885     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
5886     index fc6c5e4bffa5..7f0539db5604 100644
5887     --- a/net/sctp/ipv6.c
5888     +++ b/net/sctp/ipv6.c
5889     @@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
5890     if (addr) {
5891     addr->a.v6.sin6_family = AF_INET6;
5892     addr->a.v6.sin6_port = 0;
5893     + addr->a.v6.sin6_flowinfo = 0;
5894     addr->a.v6.sin6_addr = ifa->addr;
5895     addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
5896     addr->valid = 1;
5897     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5898     index 80e2119f1c70..2b8f95290627 100644
5899     --- a/net/smc/af_smc.c
5900     +++ b/net/smc/af_smc.c
5901     @@ -145,8 +145,14 @@ static int smc_release(struct socket *sock)
5902     sk->sk_shutdown |= SHUTDOWN_MASK;
5903     }
5904     if (smc->clcsock) {
5905     + if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
5906     + /* wake up clcsock accept */
5907     + rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
5908     + }
5909     + mutex_lock(&smc->clcsock_release_lock);
5910     sock_release(smc->clcsock);
5911     smc->clcsock = NULL;
5912     + mutex_unlock(&smc->clcsock_release_lock);
5913     }
5914     if (smc->use_fallback) {
5915     if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
5916     @@ -203,6 +209,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
5917     spin_lock_init(&smc->conn.send_lock);
5918     sk->sk_prot->hash(sk);
5919     sk_refcnt_debug_inc(sk);
5920     + mutex_init(&smc->clcsock_release_lock);
5921    
5922     return sk;
5923     }
5924     @@ -818,7 +825,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
5925     struct socket *new_clcsock = NULL;
5926     struct sock *lsk = &lsmc->sk;
5927     struct sock *new_sk;
5928     - int rc;
5929     + int rc = -EINVAL;
5930    
5931     release_sock(lsk);
5932     new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
5933     @@ -831,7 +838,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
5934     }
5935     *new_smc = smc_sk(new_sk);
5936    
5937     - rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
5938     + mutex_lock(&lsmc->clcsock_release_lock);
5939     + if (lsmc->clcsock)
5940     + rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
5941     + mutex_unlock(&lsmc->clcsock_release_lock);
5942     lock_sock(lsk);
5943     if (rc < 0)
5944     lsk->sk_err = -rc;
5945     diff --git a/net/smc/smc.h b/net/smc/smc.h
5946     index 08786ace6010..5721416d0605 100644
5947     --- a/net/smc/smc.h
5948     +++ b/net/smc/smc.h
5949     @@ -219,6 +219,10 @@ struct smc_sock { /* smc sock container */
5950     * started, waiting for unsent
5951     * data to be sent
5952     */
5953     + struct mutex clcsock_release_lock;
5954     + /* protects clcsock of a listen
5955     + * socket
5956     + * */
5957     };
5958    
5959     static inline struct smc_sock *smc_sk(const struct sock *sk)
5960     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5961     index 5445145e639c..fc1c0d9ef57d 100644
5962     --- a/net/sunrpc/svcsock.c
5963     +++ b/net/sunrpc/svcsock.c
5964     @@ -574,7 +574,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
5965     /* Don't enable netstamp, sunrpc doesn't
5966     need that much accuracy */
5967     }
5968     - svsk->sk_sk->sk_stamp = skb->tstamp;
5969     + sock_write_timestamp(svsk->sk_sk, skb->tstamp);
5970     set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
5971    
5972     len = skb->len;
5973     diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
5974     index 645c16052052..2649a0a0d45e 100644
5975     --- a/net/tipc/bearer.c
5976     +++ b/net/tipc/bearer.c
5977     @@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
5978     res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
5979     if (res) {
5980     bearer_disable(net, b);
5981     - kfree(b);
5982     errstr = "failed to create discoverer";
5983     goto rejected;
5984     }
5985     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5986     index 366ce0bf2658..e1bdaf056c8f 100644
5987     --- a/net/tipc/socket.c
5988     +++ b/net/tipc/socket.c
5989     @@ -878,7 +878,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
5990     DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
5991     int blks = tsk_blocks(GROUP_H_SIZE + dlen);
5992     struct tipc_sock *tsk = tipc_sk(sk);
5993     - struct tipc_group *grp = tsk->group;
5994     struct net *net = sock_net(sk);
5995     struct tipc_member *mb = NULL;
5996     u32 node, port;
5997     @@ -892,7 +891,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
5998     /* Block or return if destination link or member is congested */
5999     rc = tipc_wait_for_cond(sock, &timeout,
6000     !tipc_dest_find(&tsk->cong_links, node, 0) &&
6001     - !tipc_group_cong(grp, node, port, blks, &mb));
6002     + tsk->group &&
6003     + !tipc_group_cong(tsk->group, node, port, blks,
6004     + &mb));
6005     if (unlikely(rc))
6006     return rc;
6007    
6008     @@ -922,7 +923,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
6009     struct tipc_sock *tsk = tipc_sk(sk);
6010     struct list_head *cong_links = &tsk->cong_links;
6011     int blks = tsk_blocks(GROUP_H_SIZE + dlen);
6012     - struct tipc_group *grp = tsk->group;
6013     struct tipc_msg *hdr = &tsk->phdr;
6014     struct tipc_member *first = NULL;
6015     struct tipc_member *mbr = NULL;
6016     @@ -939,9 +939,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
6017     type = msg_nametype(hdr);
6018     inst = dest->addr.name.name.instance;
6019     scope = msg_lookup_scope(hdr);
6020     - exclude = tipc_group_exclude(grp);
6021    
6022     while (++lookups < 4) {
6023     + exclude = tipc_group_exclude(tsk->group);
6024     +
6025     first = NULL;
6026    
6027     /* Look for a non-congested destination member, if any */
6028     @@ -950,7 +951,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
6029     &dstcnt, exclude, false))
6030     return -EHOSTUNREACH;
6031     tipc_dest_pop(&dsts, &node, &port);
6032     - cong = tipc_group_cong(grp, node, port, blks, &mbr);
6033     + cong = tipc_group_cong(tsk->group, node, port, blks,
6034     + &mbr);
6035     if (!cong)
6036     break;
6037     if (mbr == first)
6038     @@ -969,7 +971,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
6039     /* Block or return if destination link or member is congested */
6040     rc = tipc_wait_for_cond(sock, &timeout,
6041     !tipc_dest_find(cong_links, node, 0) &&
6042     - !tipc_group_cong(grp, node, port,
6043     + tsk->group &&
6044     + !tipc_group_cong(tsk->group, node, port,
6045     blks, &mbr));
6046     if (unlikely(rc))
6047     return rc;
6048     @@ -1004,8 +1007,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
6049     struct sock *sk = sock->sk;
6050     struct net *net = sock_net(sk);
6051     struct tipc_sock *tsk = tipc_sk(sk);
6052     - struct tipc_group *grp = tsk->group;
6053     - struct tipc_nlist *dsts = tipc_group_dests(grp);
6054     + struct tipc_nlist *dsts;
6055     struct tipc_mc_method *method = &tsk->mc_method;
6056     bool ack = method->mandatory && method->rcast;
6057     int blks = tsk_blocks(MCAST_H_SIZE + dlen);
6058     @@ -1014,15 +1016,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
6059     struct sk_buff_head pkts;
6060     int rc = -EHOSTUNREACH;
6061    
6062     - if (!dsts->local && !dsts->remote)
6063     - return -EHOSTUNREACH;
6064     -
6065     /* Block or return if any destination link or member is congested */
6066     - rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
6067     - !tipc_group_bc_cong(grp, blks));
6068     + rc = tipc_wait_for_cond(sock, &timeout,
6069     + !tsk->cong_link_cnt && tsk->group &&
6070     + !tipc_group_bc_cong(tsk->group, blks));
6071     if (unlikely(rc))
6072     return rc;
6073    
6074     + dsts = tipc_group_dests(tsk->group);
6075     + if (!dsts->local && !dsts->remote)
6076     + return -EHOSTUNREACH;
6077     +
6078     /* Complete message header */
6079     if (dest) {
6080     msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
6081     @@ -1034,7 +1038,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
6082     msg_set_hdr_sz(hdr, GROUP_H_SIZE);
6083     msg_set_destport(hdr, 0);
6084     msg_set_destnode(hdr, 0);
6085     - msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
6086     + msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
6087    
6088     /* Avoid getting stuck with repeated forced replicasts */
6089     msg_set_grp_bc_ack_req(hdr, ack);
6090     @@ -2683,11 +2687,15 @@ void tipc_sk_reinit(struct net *net)
6091     rhashtable_walk_start(&iter);
6092    
6093     while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
6094     - spin_lock_bh(&tsk->sk.sk_lock.slock);
6095     + sock_hold(&tsk->sk);
6096     + rhashtable_walk_stop(&iter);
6097     + lock_sock(&tsk->sk);
6098     msg = &tsk->phdr;
6099     msg_set_prevnode(msg, tipc_own_addr(net));
6100     msg_set_orignode(msg, tipc_own_addr(net));
6101     - spin_unlock_bh(&tsk->sk.sk_lock.slock);
6102     + release_sock(&tsk->sk);
6103     + rhashtable_walk_start(&iter);
6104     + sock_put(&tsk->sk);
6105     }
6106    
6107     rhashtable_walk_stop(&iter);
6108     diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
6109     index 9783101bc4a9..da2d311476ab 100644
6110     --- a/net/tipc/udp_media.c
6111     +++ b/net/tipc/udp_media.c
6112     @@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
6113     }
6114    
6115     err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
6116     - if (err) {
6117     - kfree_skb(_skb);
6118     + if (err)
6119     goto out;
6120     - }
6121     }
6122     err = 0;
6123     out:
6124     @@ -680,6 +678,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
6125     if (err)
6126     goto err;
6127    
6128     + if (remote.proto != local.proto) {
6129     + err = -EINVAL;
6130     + goto err;
6131     + }
6132     +
6133     /* Autoconfigure own node identity if needed */
6134     if (!tipc_own_id(net)) {
6135     memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
6136     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
6137     index 523622dc74f8..7fab2891ce7f 100644
6138     --- a/net/tls/tls_main.c
6139     +++ b/net/tls/tls_main.c
6140     @@ -550,7 +550,7 @@ static struct tls_context *create_ctx(struct sock *sk)
6141     struct inet_connection_sock *icsk = inet_csk(sk);
6142     struct tls_context *ctx;
6143    
6144     - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6145     + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
6146     if (!ctx)
6147     return NULL;
6148    
6149     diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
6150     index cb332adb84cd..c361ce782412 100644
6151     --- a/net/vmw_vsock/vmci_transport.c
6152     +++ b/net/vmw_vsock/vmci_transport.c
6153     @@ -263,6 +263,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
6154     false);
6155     }
6156    
6157     +static int
6158     +vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
6159     + struct sockaddr_vm *dst,
6160     + enum vmci_transport_packet_type type,
6161     + u64 size,
6162     + u64 mode,
6163     + struct vmci_transport_waiting_info *wait,
6164     + u16 proto,
6165     + struct vmci_handle handle)
6166     +{
6167     + struct vmci_transport_packet *pkt;
6168     + int err;
6169     +
6170     + pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
6171     + if (!pkt)
6172     + return -ENOMEM;
6173     +
6174     + err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
6175     + mode, wait, proto, handle,
6176     + true);
6177     + kfree(pkt);
6178     +
6179     + return err;
6180     +}
6181     +
6182     static int
6183     vmci_transport_send_control_pkt(struct sock *sk,
6184     enum vmci_transport_packet_type type,
6185     @@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
6186     u16 proto,
6187     struct vmci_handle handle)
6188     {
6189     - struct vmci_transport_packet *pkt;
6190     struct vsock_sock *vsk;
6191     - int err;
6192    
6193     vsk = vsock_sk(sk);
6194    
6195     @@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
6196     if (!vsock_addr_bound(&vsk->remote_addr))
6197     return -EINVAL;
6198    
6199     - pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
6200     - if (!pkt)
6201     - return -ENOMEM;
6202     -
6203     - err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
6204     - &vsk->remote_addr, type, size,
6205     - mode, wait, proto, handle,
6206     - true);
6207     - kfree(pkt);
6208     -
6209     - return err;
6210     + return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
6211     + &vsk->remote_addr,
6212     + type, size, mode,
6213     + wait, proto, handle);
6214     }
6215    
6216     static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
6217     @@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
6218     static int vmci_transport_send_reset(struct sock *sk,
6219     struct vmci_transport_packet *pkt)
6220     {
6221     + struct sockaddr_vm *dst_ptr;
6222     + struct sockaddr_vm dst;
6223     + struct vsock_sock *vsk;
6224     +
6225     if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
6226     return 0;
6227     - return vmci_transport_send_control_pkt(sk,
6228     - VMCI_TRANSPORT_PACKET_TYPE_RST,
6229     - 0, 0, NULL, VSOCK_PROTO_INVALID,
6230     - VMCI_INVALID_HANDLE);
6231     +
6232     + vsk = vsock_sk(sk);
6233     +
6234     + if (!vsock_addr_bound(&vsk->local_addr))
6235     + return -EINVAL;
6236     +
6237     + if (vsock_addr_bound(&vsk->remote_addr)) {
6238     + dst_ptr = &vsk->remote_addr;
6239     + } else {
6240     + vsock_addr_init(&dst, pkt->dg.src.context,
6241     + pkt->src_port);
6242     + dst_ptr = &dst;
6243     + }
6244     + return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
6245     + VMCI_TRANSPORT_PACKET_TYPE_RST,
6246     + 0, 0, NULL, VSOCK_PROTO_INVALID,
6247     + VMCI_INVALID_HANDLE);
6248     }
6249    
6250     static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
6251     diff --git a/sound/core/pcm.c b/sound/core/pcm.c
6252     index fdb9b92fc8d6..01b9d62eef14 100644
6253     --- a/sound/core/pcm.c
6254     +++ b/sound/core/pcm.c
6255     @@ -25,6 +25,7 @@
6256     #include <linux/time.h>
6257     #include <linux/mutex.h>
6258     #include <linux/device.h>
6259     +#include <linux/nospec.h>
6260     #include <sound/core.h>
6261     #include <sound/minors.h>
6262     #include <sound/pcm.h>
6263     @@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
6264     return -EFAULT;
6265     if (stream < 0 || stream > 1)
6266     return -EINVAL;
6267     + stream = array_index_nospec(stream, 2);
6268     if (get_user(subdevice, &info->subdevice))
6269     return -EFAULT;
6270     mutex_lock(&register_mutex);
6271     diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
6272     index 54cdd4ffa9ce..ac20acf48fc6 100644
6273     --- a/sound/firewire/amdtp-stream-trace.h
6274     +++ b/sound/firewire/amdtp-stream-trace.h
6275     @@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
6276     __entry->index = index;
6277     ),
6278     TP_printk(
6279     - "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
6280     + "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
6281     __entry->second,
6282     __entry->cycle,
6283     __entry->src,
6284     @@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
6285     __entry->dest = fw_parent_device(s->unit)->node_id;
6286     __entry->payload_quadlets = payload_length / 4;
6287     __entry->data_blocks = data_blocks,
6288     - __entry->data_blocks = s->data_block_counter,
6289     + __entry->data_block_counter = s->data_block_counter,
6290     __entry->packet_index = s->packet_index;
6291     __entry->irq = !!in_interrupt();
6292     __entry->index = index;
6293     diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
6294     index cb9acfe60f6a..293933f469d6 100644
6295     --- a/sound/firewire/amdtp-stream.c
6296     +++ b/sound/firewire/amdtp-stream.c
6297     @@ -629,15 +629,17 @@ end:
6298     }
6299    
6300     static int handle_in_packet_without_header(struct amdtp_stream *s,
6301     - unsigned int payload_quadlets, unsigned int cycle,
6302     + unsigned int payload_length, unsigned int cycle,
6303     unsigned int index)
6304     {
6305     __be32 *buffer;
6306     + unsigned int payload_quadlets;
6307     unsigned int data_blocks;
6308     struct snd_pcm_substream *pcm;
6309     unsigned int pcm_frames;
6310    
6311     buffer = s->buffer.packets[s->packet_index].buffer;
6312     + payload_quadlets = payload_length / 4;
6313     data_blocks = payload_quadlets / s->data_block_quadlets;
6314    
6315     trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
6316     diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
6317     index 654a50319198..4d191172fe3f 100644
6318     --- a/sound/firewire/fireface/ff-protocol-ff400.c
6319     +++ b/sound/firewire/fireface/ff-protocol-ff400.c
6320     @@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
6321     if (reg == NULL)
6322     return -ENOMEM;
6323    
6324     - if (enable) {
6325     + if (!enable) {
6326     /*
6327     * Each quadlet is corresponding to data channels in a data
6328     * blocks in reverse order. Precisely, quadlets for available
6329     diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
6330     index 6ebe817801ea..1f25e6d029d8 100644
6331     --- a/sound/pci/emu10k1/emufx.c
6332     +++ b/sound/pci/emu10k1/emufx.c
6333     @@ -36,6 +36,7 @@
6334     #include <linux/init.h>
6335     #include <linux/mutex.h>
6336     #include <linux/moduleparam.h>
6337     +#include <linux/nospec.h>
6338    
6339     #include <sound/core.h>
6340     #include <sound/tlv.h>
6341     @@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
6342    
6343     if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
6344     return -EINVAL;
6345     + ipcm->substream = array_index_nospec(ipcm->substream,
6346     + EMU10K1_FX8010_PCM_COUNT);
6347     if (ipcm->channels > 32)
6348     return -EINVAL;
6349     pcm = &emu->fx8010.pcm[ipcm->substream];
6350     @@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
6351    
6352     if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
6353     return -EINVAL;
6354     + ipcm->substream = array_index_nospec(ipcm->substream,
6355     + EMU10K1_FX8010_PCM_COUNT);
6356     pcm = &emu->fx8010.pcm[ipcm->substream];
6357     mutex_lock(&emu->fx8010.lock);
6358     spin_lock_irq(&emu->reg_lock);
6359     diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
6360     index 0621920f7617..e85fb04ec7be 100644
6361     --- a/sound/pci/hda/hda_tegra.c
6362     +++ b/sound/pci/hda/hda_tegra.c
6363     @@ -249,10 +249,12 @@ static int hda_tegra_suspend(struct device *dev)
6364     struct snd_card *card = dev_get_drvdata(dev);
6365     struct azx *chip = card->private_data;
6366     struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
6367     + struct hdac_bus *bus = azx_bus(chip);
6368    
6369     snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
6370    
6371     azx_stop_chip(chip);
6372     + synchronize_irq(bus->irq);
6373     azx_enter_link_reset(chip);
6374     hda_tegra_disable_clocks(hda);
6375    
6376     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6377     index 3c5f2a603754..f9176e3b4d37 100644
6378     --- a/sound/pci/hda/patch_conexant.c
6379     +++ b/sound/pci/hda/patch_conexant.c
6380     @@ -923,6 +923,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
6381     SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
6382     SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
6383     SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
6384     + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
6385     SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
6386     SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
6387     SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
6388     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6389     index 8a3d0694d2e5..854d63c01dd2 100644
6390     --- a/sound/pci/hda/patch_realtek.c
6391     +++ b/sound/pci/hda/patch_realtek.c
6392     @@ -6424,7 +6424,7 @@ static const struct hda_fixup alc269_fixups[] = {
6393     [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
6394     .type = HDA_FIXUP_PINS,
6395     .v.pins = (const struct hda_pintbl[]) {
6396     - { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
6397     + { 0x19, 0x01a1103c }, /* use as headset mic */
6398     { }
6399     },
6400     .chained = true,
6401     @@ -6573,6 +6573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6402     SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6403     SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6404     SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6405     + SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
6406     SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6407     SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
6408     SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6409     diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
6410     index 1bff4b1b39cd..ba99ff0e93e0 100644
6411     --- a/sound/pci/rme9652/hdsp.c
6412     +++ b/sound/pci/rme9652/hdsp.c
6413     @@ -30,6 +30,7 @@
6414     #include <linux/math64.h>
6415     #include <linux/vmalloc.h>
6416     #include <linux/io.h>
6417     +#include <linux/nospec.h>
6418    
6419     #include <sound/core.h>
6420     #include <sound/control.h>
6421     @@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
6422     struct snd_pcm_channel_info *info)
6423     {
6424     struct hdsp *hdsp = snd_pcm_substream_chip(substream);
6425     - int mapped_channel;
6426     + unsigned int channel = info->channel;
6427    
6428     - if (snd_BUG_ON(info->channel >= hdsp->max_channels))
6429     + if (snd_BUG_ON(channel >= hdsp->max_channels))
6430     return -EINVAL;
6431     + channel = array_index_nospec(channel, hdsp->max_channels);
6432    
6433     - if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
6434     + if (hdsp->channel_map[channel] < 0)
6435     return -EINVAL;
6436    
6437     - info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
6438     + info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
6439     info->first = 0;
6440     info->step = 32;
6441     return 0;
6442     diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
6443     index 9d9f6e41d81c..08a5152e635a 100644
6444     --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
6445     +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
6446     @@ -389,6 +389,20 @@ static struct snd_soc_card snd_soc_card_cht = {
6447     };
6448    
6449     static const struct dmi_system_id cht_max98090_quirk_table[] = {
6450     + {
6451     + /* Clapper model Chromebook */
6452     + .matches = {
6453     + DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
6454     + },
6455     + .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
6456     + },
6457     + {
6458     + /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
6459     + .matches = {
6460     + DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
6461     + },
6462     + .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
6463     + },
6464     {
6465     /* Swanky model Chromebook (Toshiba Chromebook 2) */
6466     .matches = {
6467     diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
6468     index e557946718a9..d9fcae071b47 100644
6469     --- a/sound/synth/emux/emux_hwdep.c
6470     +++ b/sound/synth/emux/emux_hwdep.c
6471     @@ -22,9 +22,9 @@
6472     #include <sound/core.h>
6473     #include <sound/hwdep.h>
6474     #include <linux/uaccess.h>
6475     +#include <linux/nospec.h>
6476     #include "emux_voice.h"
6477    
6478     -
6479     #define TMP_CLIENT_ID 0x1001
6480    
6481     /*
6482     @@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
6483     return -EFAULT;
6484     if (info.mode < 0 || info.mode >= EMUX_MD_END)
6485     return -EINVAL;
6486     + info.mode = array_index_nospec(info.mode, EMUX_MD_END);
6487    
6488     if (info.port < 0) {
6489     for (i = 0; i < emu->num_ports; i++)
6490     emu->portptrs[i]->ctrls[info.mode] = info.value;
6491     } else {
6492     - if (info.port < emu->num_ports)
6493     + if (info.port < emu->num_ports) {
6494     + info.port = array_index_nospec(info.port, emu->num_ports);
6495     emu->portptrs[info.port]->ctrls[info.mode] = info.value;
6496     + }
6497     }
6498     return 0;
6499     }
6500     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
6501     index ce1e20227c64..75de355a63d6 100644
6502     --- a/tools/lib/traceevent/event-parse.c
6503     +++ b/tools/lib/traceevent/event-parse.c
6504     @@ -4968,6 +4968,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
6505    
6506     if (arg->type == PRINT_BSTRING) {
6507     trace_seq_puts(s, arg->string.string);
6508     + arg = arg->next;
6509     break;
6510     }
6511    
6512     diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
6513     index 82657c01a3b8..5f69fd0b745a 100644
6514     --- a/tools/perf/arch/common.c
6515     +++ b/tools/perf/arch/common.c
6516     @@ -200,3 +200,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
6517    
6518     return perf_env__lookup_binutils_path(env, "objdump", path);
6519     }
6520     +
6521     +/*
6522     + * Some architectures have a single address space for kernel and user addresses,
6523     + * which makes it possible to determine if an address is in kernel space or user
6524     + * space.
6525     + */
6526     +bool perf_env__single_address_space(struct perf_env *env)
6527     +{
6528     + return strcmp(perf_env__arch(env), "sparc");
6529     +}
6530     diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
6531     index 2167001b18c5..c298a446d1f6 100644
6532     --- a/tools/perf/arch/common.h
6533     +++ b/tools/perf/arch/common.h
6534     @@ -5,5 +5,6 @@
6535     #include "../util/env.h"
6536    
6537     int perf_env__lookup_objdump(struct perf_env *env, const char **path);
6538     +bool perf_env__single_address_space(struct perf_env *env);
6539    
6540     #endif /* ARCH_PERF_COMMON_H */
6541     diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
6542     index ba481d73f910..6c1e7ceedcf3 100644
6543     --- a/tools/perf/builtin-script.c
6544     +++ b/tools/perf/builtin-script.c
6545     @@ -727,8 +727,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
6546     if (PRINT_FIELD(DSO)) {
6547     memset(&alf, 0, sizeof(alf));
6548     memset(&alt, 0, sizeof(alt));
6549     - thread__find_map(thread, sample->cpumode, from, &alf);
6550     - thread__find_map(thread, sample->cpumode, to, &alt);
6551     + thread__find_map_fb(thread, sample->cpumode, from, &alf);
6552     + thread__find_map_fb(thread, sample->cpumode, to, &alt);
6553     }
6554    
6555     printed += fprintf(fp, " 0x%"PRIx64, from);
6556     @@ -774,8 +774,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
6557     from = br->entries[i].from;
6558     to = br->entries[i].to;
6559    
6560     - thread__find_symbol(thread, sample->cpumode, from, &alf);
6561     - thread__find_symbol(thread, sample->cpumode, to, &alt);
6562     + thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
6563     + thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
6564    
6565     printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
6566     if (PRINT_FIELD(DSO)) {
6567     @@ -819,11 +819,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
6568     from = br->entries[i].from;
6569     to = br->entries[i].to;
6570    
6571     - if (thread__find_map(thread, sample->cpumode, from, &alf) &&
6572     + if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
6573     !alf.map->dso->adjust_symbols)
6574     from = map__map_ip(alf.map, from);
6575    
6576     - if (thread__find_map(thread, sample->cpumode, to, &alt) &&
6577     + if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
6578     !alt.map->dso->adjust_symbols)
6579     to = map__map_ip(alt.map, to);
6580    
6581     diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
6582     index 59f38c7693f8..4c23779e271a 100644
6583     --- a/tools/perf/util/env.c
6584     +++ b/tools/perf/util/env.c
6585     @@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
6586     struct utsname uts;
6587     char *arch_name;
6588    
6589     - if (!env) { /* Assume local operation */
6590     + if (!env || !env->arch) { /* Assume local operation */
6591     if (uname(&uts) < 0)
6592     return NULL;
6593     arch_name = uts.machine;
6594     diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
6595     index bc646185f8d9..aa9c7df120ca 100644
6596     --- a/tools/perf/util/event.c
6597     +++ b/tools/perf/util/event.c
6598     @@ -1576,6 +1576,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6599     return al->map;
6600     }
6601    
6602     +/*
6603     + * For branch stacks or branch samples, the sample cpumode might not be correct
6604     + * because it applies only to the sample 'ip' and not necessary to 'addr' or
6605     + * branch stack addresses. If possible, use a fallback to deal with those cases.
6606     + */
6607     +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6608     + struct addr_location *al)
6609     +{
6610     + struct map *map = thread__find_map(thread, cpumode, addr, al);
6611     + struct machine *machine = thread->mg->machine;
6612     + u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
6613     +
6614     + if (map || addr_cpumode == cpumode)
6615     + return map;
6616     +
6617     + return thread__find_map(thread, addr_cpumode, addr, al);
6618     +}
6619     +
6620     struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6621     u64 addr, struct addr_location *al)
6622     {
6623     @@ -1585,6 +1603,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6624     return al->sym;
6625     }
6626    
6627     +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6628     + u64 addr, struct addr_location *al)
6629     +{
6630     + al->sym = NULL;
6631     + if (thread__find_map_fb(thread, cpumode, addr, al))
6632     + al->sym = map__find_symbol(al->map, al->addr);
6633     + return al->sym;
6634     +}
6635     +
6636     /*
6637     * Callers need to drop the reference to al->thread, obtained in
6638     * machine__findnew_thread()
6639     @@ -1678,7 +1705,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
6640     void thread__resolve(struct thread *thread, struct addr_location *al,
6641     struct perf_sample *sample)
6642     {
6643     - thread__find_map(thread, sample->cpumode, sample->addr, al);
6644     + thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
6645    
6646     al->cpu = sample->cpu;
6647     al->sym = NULL;
6648     diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
6649     index 8ee8ab39d8ac..d7403d1207d7 100644
6650     --- a/tools/perf/util/machine.c
6651     +++ b/tools/perf/util/machine.c
6652     @@ -2575,6 +2575,33 @@ int machine__get_kernel_start(struct machine *machine)
6653     return err;
6654     }
6655    
6656     +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
6657     +{
6658     + u8 addr_cpumode = cpumode;
6659     + bool kernel_ip;
6660     +
6661     + if (!machine->single_address_space)
6662     + goto out;
6663     +
6664     + kernel_ip = machine__kernel_ip(machine, addr);
6665     + switch (cpumode) {
6666     + case PERF_RECORD_MISC_KERNEL:
6667     + case PERF_RECORD_MISC_USER:
6668     + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
6669     + PERF_RECORD_MISC_USER;
6670     + break;
6671     + case PERF_RECORD_MISC_GUEST_KERNEL:
6672     + case PERF_RECORD_MISC_GUEST_USER:
6673     + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
6674     + PERF_RECORD_MISC_GUEST_USER;
6675     + break;
6676     + default:
6677     + break;
6678     + }
6679     +out:
6680     + return addr_cpumode;
6681     +}
6682     +
6683     struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
6684     {
6685     return dsos__findnew(&machine->dsos, filename);
6686     diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
6687     index d856b85862e2..ebde3ea70225 100644
6688     --- a/tools/perf/util/machine.h
6689     +++ b/tools/perf/util/machine.h
6690     @@ -42,6 +42,7 @@ struct machine {
6691     u16 id_hdr_size;
6692     bool comm_exec;
6693     bool kptr_restrict_warned;
6694     + bool single_address_space;
6695     char *root_dir;
6696     char *mmap_name;
6697     struct threads threads[THREADS__TABLE_SIZE];
6698     @@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
6699     return ip >= kernel_start;
6700     }
6701    
6702     +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
6703     +
6704     struct thread *machine__find_thread(struct machine *machine, pid_t pid,
6705     pid_t tid);
6706     struct comm *machine__thread_exec_comm(struct machine *machine,
6707     diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
6708     index 7e49baad304d..7348eea0248f 100644
6709     --- a/tools/perf/util/pmu.c
6710     +++ b/tools/perf/util/pmu.c
6711     @@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
6712     int fd, ret = -1;
6713     char path[PATH_MAX];
6714    
6715     - snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6716     + scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
6717    
6718     fd = open(path, O_RDONLY);
6719     if (fd == -1)
6720     @@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
6721     ssize_t sret;
6722     int fd;
6723    
6724     - snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6725     + scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
6726    
6727     fd = open(path, O_RDONLY);
6728     if (fd == -1)
6729     @@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
6730     char path[PATH_MAX];
6731     int fd;
6732    
6733     - snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6734     + scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
6735    
6736     fd = open(path, O_RDONLY);
6737     if (fd == -1)
6738     @@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
6739     char path[PATH_MAX];
6740     int fd;
6741    
6742     - snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6743     + scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
6744    
6745     fd = open(path, O_RDONLY);
6746     if (fd == -1)
6747     diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
6748     index dfc6093f118c..05d95de14e20 100644
6749     --- a/tools/perf/util/scripting-engines/trace-event-python.c
6750     +++ b/tools/perf/util/scripting-engines/trace-event-python.c
6751     @@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
6752     pydict_set_item_string_decref(pyelem, "cycles",
6753     PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
6754    
6755     - thread__find_map(thread, sample->cpumode,
6756     - br->entries[i].from, &al);
6757     + thread__find_map_fb(thread, sample->cpumode,
6758     + br->entries[i].from, &al);
6759     dsoname = get_dsoname(al.map);
6760     pydict_set_item_string_decref(pyelem, "from_dsoname",
6761     _PyUnicode_FromString(dsoname));
6762    
6763     - thread__find_map(thread, sample->cpumode,
6764     - br->entries[i].to, &al);
6765     + thread__find_map_fb(thread, sample->cpumode,
6766     + br->entries[i].to, &al);
6767     dsoname = get_dsoname(al.map);
6768     pydict_set_item_string_decref(pyelem, "to_dsoname",
6769     _PyUnicode_FromString(dsoname));
6770     @@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
6771     if (!pyelem)
6772     Py_FatalError("couldn't create Python dictionary");
6773    
6774     - thread__find_symbol(thread, sample->cpumode,
6775     - br->entries[i].from, &al);
6776     + thread__find_symbol_fb(thread, sample->cpumode,
6777     + br->entries[i].from, &al);
6778     get_symoff(al.sym, &al, true, bf, sizeof(bf));
6779     pydict_set_item_string_decref(pyelem, "from",
6780     _PyUnicode_FromString(bf));
6781    
6782     - thread__find_symbol(thread, sample->cpumode,
6783     - br->entries[i].to, &al);
6784     + thread__find_symbol_fb(thread, sample->cpumode,
6785     + br->entries[i].to, &al);
6786     get_symoff(al.sym, &al, true, bf, sizeof(bf));
6787     pydict_set_item_string_decref(pyelem, "to",
6788     _PyUnicode_FromString(bf));
6789     diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
6790     index 8b9369303561..11086097fc9f 100644
6791     --- a/tools/perf/util/session.c
6792     +++ b/tools/perf/util/session.c
6793     @@ -24,6 +24,7 @@
6794     #include "thread.h"
6795     #include "thread-stack.h"
6796     #include "stat.h"
6797     +#include "arch/common.h"
6798    
6799     static int perf_session__deliver_event(struct perf_session *session,
6800     union perf_event *event,
6801     @@ -150,6 +151,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
6802     session->machines.host.env = &perf_env;
6803     }
6804    
6805     + session->machines.host.single_address_space =
6806     + perf_env__single_address_space(session->machines.host.env);
6807     +
6808     if (!data || perf_data__is_write(data)) {
6809     /*
6810     * In O_RDONLY mode this will be performed when reading the
6811     diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
6812     index 07606aa6998d..4e2c3cbdea4b 100644
6813     --- a/tools/perf/util/thread.h
6814     +++ b/tools/perf/util/thread.h
6815     @@ -94,9 +94,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
6816    
6817     struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
6818     struct addr_location *al);
6819     +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
6820     + struct addr_location *al);
6821    
6822     struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
6823     u64 addr, struct addr_location *al);
6824     +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
6825     + u64 addr, struct addr_location *al);
6826    
6827     void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
6828     struct addr_location *al);
6829     diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
6830     index f56ff1cf52ec..ceeda7e04a4d 100644
6831     --- a/virt/kvm/arm/vgic/vgic-mmio.c
6832     +++ b/virt/kvm/arm/vgic/vgic-mmio.c
6833     @@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6834    
6835     spin_lock_irqsave(&irq->irq_lock, flags);
6836    
6837     - /*
6838     - * If this virtual IRQ was written into a list register, we
6839     - * have to make sure the CPU that runs the VCPU thread has
6840     - * synced back the LR state to the struct vgic_irq.
6841     - *
6842     - * As long as the conditions below are true, we know the VCPU thread
6843     - * may be on its way back from the guest (we kicked the VCPU thread in
6844     - * vgic_change_active_prepare) and still has to sync back this IRQ,
6845     - * so we release and re-acquire the spin_lock to let the other thread
6846     - * sync back the IRQ.
6847     - *
6848     - * When accessing VGIC state from user space, requester_vcpu is
6849     - * NULL, which is fine, because we guarantee that no VCPUs are running
6850     - * when accessing VGIC state from user space so irq->vcpu->cpu is
6851     - * always -1.
6852     - */
6853     - while (irq->vcpu && /* IRQ may have state in an LR somewhere */
6854     - irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
6855     - irq->vcpu->cpu != -1) /* VCPU thread is running */
6856     - cond_resched_lock(&irq->irq_lock);
6857     -
6858     if (irq->hw) {
6859     vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
6860     } else {
6861     u32 model = vcpu->kvm->arch.vgic.vgic_model;
6862     + u8 active_source;
6863    
6864     irq->active = active;
6865     +
6866     + /*
6867     + * The GICv2 architecture indicates that the source CPUID for
6868     + * an SGI should be provided during an EOI which implies that
6869     + * the active state is stored somewhere, but at the same time
6870     + * this state is not architecturally exposed anywhere and we
6871     + * have no way of knowing the right source.
6872     + *
6873     + * This may lead to a VCPU not being able to receive
6874     + * additional instances of a particular SGI after migration
6875     + * for a GICv2 VM on some GIC implementations. Oh well.
6876     + */
6877     + active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
6878     +
6879     if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
6880     active && vgic_irq_is_sgi(irq->intid))
6881     - irq->active_source = requester_vcpu->vcpu_id;
6882     + irq->active_source = active_source;
6883     }
6884    
6885     if (irq->active)
6886     @@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
6887     */
6888     static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
6889     {
6890     - if (intid > VGIC_NR_PRIVATE_IRQS)
6891     + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6892     + intid > VGIC_NR_PRIVATE_IRQS)
6893     kvm_arm_halt_guest(vcpu->kvm);
6894     }
6895    
6896     /* See vgic_change_active_prepare */
6897     static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
6898     {
6899     - if (intid > VGIC_NR_PRIVATE_IRQS)
6900     + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
6901     + intid > VGIC_NR_PRIVATE_IRQS)
6902     kvm_arm_resume_guest(vcpu->kvm);
6903     }
6904    
6905     diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
6906     index 7cfdfbc910e0..f884a54b2601 100644
6907     --- a/virt/kvm/arm/vgic/vgic.c
6908     +++ b/virt/kvm/arm/vgic/vgic.c
6909     @@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
6910     {
6911     /* SGIs and PPIs */
6912     if (intid <= VGIC_MAX_PRIVATE) {
6913     - intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
6914     + intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
6915     return &vcpu->arch.vgic_cpu.private_irqs[intid];
6916     }
6917    
6918     /* SPIs */
6919     - if (intid <= VGIC_MAX_SPI) {
6920     - intid = array_index_nospec(intid, VGIC_MAX_SPI);
6921     + if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
6922     + intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
6923     return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
6924     }
6925