Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0107-5.4.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3488 - (hide annotations) (download)
Mon May 11 14:36:02 2020 UTC (4 years ago) by niro
File size: 261121 byte(s)
-linux-5.4.8
1 niro 3488 diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
2     index 5138a2f6232a..646cb3525373 100644
3     --- a/Documentation/devicetree/bindings/Makefile
4     +++ b/Documentation/devicetree/bindings/Makefile
5     @@ -12,7 +12,6 @@ $(obj)/%.example.dts: $(src)/%.yaml FORCE
6     $(call if_changed,chk_binding)
7    
8     DT_TMP_SCHEMA := processed-schema.yaml
9     -extra-y += $(DT_TMP_SCHEMA)
10    
11     quiet_cmd_mk_schema = SCHEMA $@
12     cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs)
13     @@ -26,8 +25,12 @@ DT_DOCS = $(shell \
14    
15     DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
16    
17     +ifeq ($(CHECK_DTBS),)
18     extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
19     extra-y += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
20     +endif
21    
22     $(obj)/$(DT_TMP_SCHEMA): $(DT_SCHEMA_FILES) FORCE
23     $(call if_changed,mk_schema)
24     +
25     +extra-y += $(DT_TMP_SCHEMA)
26     diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst
27     index f4a638072262..83e04e5c342d 100644
28     --- a/Documentation/devicetree/writing-schema.rst
29     +++ b/Documentation/devicetree/writing-schema.rst
30     @@ -130,11 +130,13 @@ binding schema. All of the DT binding documents can be validated using the
31    
32     make dt_binding_check
33    
34     -In order to perform validation of DT source files, use the `dtbs_check` target::
35     +In order to perform validation of DT source files, use the ``dtbs_check`` target::
36    
37     make dtbs_check
38    
39     -This will first run the `dt_binding_check` which generates the processed schema.
40     +Note that ``dtbs_check`` will skip any binding schema files with errors. It is
41     +necessary to use ``dt_binding_check`` to get all the validation errors in the
42     +binding schema files.
43    
44     It is also possible to run checks with a single schema file by setting the
45     ``DT_SCHEMA_FILES`` variable to a specific schema file.
46     diff --git a/Makefile b/Makefile
47     index 0e2e0a034064..1adee1b06f3d 100644
48     --- a/Makefile
49     +++ b/Makefile
50     @@ -1,7 +1,7 @@
51     # SPDX-License-Identifier: GPL-2.0
52     VERSION = 5
53     PATCHLEVEL = 4
54     -SUBLEVEL = 7
55     +SUBLEVEL = 8
56     EXTRAVERSION =
57     NAME = Kleptomaniac Octopus
58    
59     diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
60     index b36c0289a308..6a0f1f524466 100644
61     --- a/arch/arm/boot/compressed/libfdt_env.h
62     +++ b/arch/arm/boot/compressed/libfdt_env.h
63     @@ -2,11 +2,13 @@
64     #ifndef _ARM_LIBFDT_ENV_H
65     #define _ARM_LIBFDT_ENV_H
66    
67     +#include <linux/limits.h>
68     #include <linux/types.h>
69     #include <linux/string.h>
70     #include <asm/byteorder.h>
71    
72     -#define INT_MAX ((int)(~0U>>1))
73     +#define INT32_MAX S32_MAX
74     +#define UINT32_MAX U32_MAX
75    
76     typedef __be16 fdt16_t;
77     typedef __be32 fdt32_t;
78     diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
79     index db9247898300..287ef898a55e 100644
80     --- a/arch/arm/mm/dma-mapping-nommu.c
81     +++ b/arch/arm/mm/dma-mapping-nommu.c
82     @@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
83     unsigned long attrs)
84    
85     {
86     - void *ret = dma_alloc_from_global_coherent(size, dma_handle);
87     + void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
88    
89     /*
90     * dma_alloc_from_global_coherent() may fail because:
91     diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
92     index 9a07916af8dd..a6554fdb56c5 100644
93     --- a/arch/arm/mm/proc-v7-bugs.c
94     +++ b/arch/arm/mm/proc-v7-bugs.c
95     @@ -65,6 +65,9 @@ static void cpu_v7_spectre_init(void)
96     break;
97    
98     #ifdef CONFIG_ARM_PSCI
99     + case ARM_CPU_PART_BRAHMA_B53:
100     + /* Requires no workaround */
101     + break;
102     default:
103     /* Other ARM CPUs require no workaround */
104     if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
105     diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
106     index fb842965d541..9228f7386220 100644
107     --- a/arch/mips/include/asm/barrier.h
108     +++ b/arch/mips/include/asm/barrier.h
109     @@ -218,14 +218,13 @@
110     * ordering will be done by smp_llsc_mb() and friends.
111     */
112     #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
113     -# define __WEAK_LLSC_MB sync
114     -# define smp_llsc_mb() \
115     - __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
116     -# define __LLSC_CLOBBER
117     +#define __WEAK_LLSC_MB " sync \n"
118     +#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
119     +#define __LLSC_CLOBBER
120     #else
121     -# define __WEAK_LLSC_MB
122     -# define smp_llsc_mb() do { } while (0)
123     -# define __LLSC_CLOBBER "memory"
124     +#define __WEAK_LLSC_MB " \n"
125     +#define smp_llsc_mb() do { } while (0)
126     +#define __LLSC_CLOBBER "memory"
127     #endif
128    
129     #ifdef CONFIG_CPU_CAVIUM_OCTEON
130     diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
131     index 110220705e97..b83b0397462d 100644
132     --- a/arch/mips/include/asm/futex.h
133     +++ b/arch/mips/include/asm/futex.h
134     @@ -16,7 +16,6 @@
135     #include <asm/barrier.h>
136     #include <asm/compiler.h>
137     #include <asm/errno.h>
138     -#include <asm/sync.h>
139     #include <asm/war.h>
140    
141     #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
142     @@ -33,7 +32,7 @@
143     " .set arch=r4000 \n" \
144     "2: sc $1, %2 \n" \
145     " beqzl $1, 1b \n" \
146     - __stringify(__WEAK_LLSC_MB) " \n" \
147     + __WEAK_LLSC_MB \
148     "3: \n" \
149     " .insn \n" \
150     " .set pop \n" \
151     @@ -51,19 +50,19 @@
152     "i" (-EFAULT) \
153     : "memory"); \
154     } else if (cpu_has_llsc) { \
155     + loongson_llsc_mb(); \
156     __asm__ __volatile__( \
157     " .set push \n" \
158     " .set noat \n" \
159     " .set push \n" \
160     " .set "MIPS_ISA_ARCH_LEVEL" \n" \
161     - " " __SYNC(full, loongson3_war) " \n" \
162     "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
163     " .set pop \n" \
164     " " insn " \n" \
165     " .set "MIPS_ISA_ARCH_LEVEL" \n" \
166     "2: "user_sc("$1", "%2")" \n" \
167     " beqz $1, 1b \n" \
168     - __stringify(__WEAK_LLSC_MB) " \n" \
169     + __WEAK_LLSC_MB \
170     "3: \n" \
171     " .insn \n" \
172     " .set pop \n" \
173     @@ -148,7 +147,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
174     " .set arch=r4000 \n"
175     "2: sc $1, %2 \n"
176     " beqzl $1, 1b \n"
177     - __stringify(__WEAK_LLSC_MB) " \n"
178     + __WEAK_LLSC_MB
179     "3: \n"
180     " .insn \n"
181     " .set pop \n"
182     @@ -165,13 +164,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
183     "i" (-EFAULT)
184     : "memory");
185     } else if (cpu_has_llsc) {
186     + loongson_llsc_mb();
187     __asm__ __volatile__(
188     "# futex_atomic_cmpxchg_inatomic \n"
189     " .set push \n"
190     " .set noat \n"
191     " .set push \n"
192     " .set "MIPS_ISA_ARCH_LEVEL" \n"
193     - " " __SYNC(full, loongson3_war) " \n"
194     "1: "user_ll("%1", "%3")" \n"
195     " bne %1, %z4, 3f \n"
196     " .set pop \n"
197     @@ -179,7 +178,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
198     " .set "MIPS_ISA_ARCH_LEVEL" \n"
199     "2: "user_sc("$1", "%2")" \n"
200     " beqz $1, 1b \n"
201     - "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
202     + __WEAK_LLSC_MB
203     + "3: \n"
204     " .insn \n"
205     " .set pop \n"
206     " .section .fixup,\"ax\" \n"
207     @@ -194,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
208     : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
209     "i" (-EFAULT)
210     : "memory");
211     + loongson_llsc_mb();
212     } else
213     return -ENOSYS;
214    
215     diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
216     index 83522c9fc7b6..37ac731a556b 100644
217     --- a/arch/powerpc/Makefile
218     +++ b/arch/powerpc/Makefile
219     @@ -91,11 +91,13 @@ MULTIPLEWORD := -mmultiple
220     endif
221    
222     ifdef CONFIG_PPC64
223     +ifndef CONFIG_CC_IS_CLANG
224     cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
225     cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
226     aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
227     aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
228     endif
229     +endif
230    
231     ifndef CONFIG_CC_IS_CLANG
232     cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
233     @@ -141,6 +143,7 @@ endif
234     endif
235    
236     CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
237     +ifndef CONFIG_CC_IS_CLANG
238     ifdef CONFIG_CPU_LITTLE_ENDIAN
239     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
240     AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
241     @@ -149,6 +152,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
242     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
243     AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
244     endif
245     +endif
246     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
247     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
248    
249     diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
250     index 2abc8e83b95e..9757d4f6331e 100644
251     --- a/arch/powerpc/boot/libfdt_env.h
252     +++ b/arch/powerpc/boot/libfdt_env.h
253     @@ -6,6 +6,8 @@
254     #include <string.h>
255    
256     #define INT_MAX ((int)(~0U>>1))
257     +#define UINT32_MAX ((u32)~0U)
258     +#define INT32_MAX ((s32)(UINT32_MAX >> 1))
259    
260     #include "of.h"
261    
262     diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
263     index 0cfc365d814b..722289a1d000 100644
264     --- a/arch/powerpc/include/asm/fixmap.h
265     +++ b/arch/powerpc/include/asm/fixmap.h
266     @@ -77,7 +77,12 @@ enum fixed_addresses {
267     static inline void __set_fixmap(enum fixed_addresses idx,
268     phys_addr_t phys, pgprot_t flags)
269     {
270     - map_kernel_page(fix_to_virt(idx), phys, flags);
271     + if (__builtin_constant_p(idx))
272     + BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
273     + else if (WARN_ON(idx >= __end_of_fixed_addresses))
274     + return;
275     +
276     + map_kernel_page(__fix_to_virt(idx), phys, flags);
277     }
278    
279     #endif /* !__ASSEMBLY__ */
280     diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
281     index cac95a3f30c2..e9a960e28f3c 100644
282     --- a/arch/powerpc/include/asm/spinlock.h
283     +++ b/arch/powerpc/include/asm/spinlock.h
284     @@ -36,12 +36,10 @@
285     #endif
286    
287     #ifdef CONFIG_PPC_PSERIES
288     -DECLARE_STATIC_KEY_FALSE(shared_processor);
289     -
290     #define vcpu_is_preempted vcpu_is_preempted
291     static inline bool vcpu_is_preempted(int cpu)
292     {
293     - if (!static_branch_unlikely(&shared_processor))
294     + if (!firmware_has_feature(FW_FEATURE_SPLPAR))
295     return false;
296     return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
297     }
298     diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
299     index 15002b51ff18..c92fe7fe9692 100644
300     --- a/arch/powerpc/include/asm/uaccess.h
301     +++ b/arch/powerpc/include/asm/uaccess.h
302     @@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
303     return n;
304     }
305    
306     -extern unsigned long __clear_user(void __user *addr, unsigned long size);
307     +unsigned long __arch_clear_user(void __user *addr, unsigned long size);
308    
309     static inline unsigned long clear_user(void __user *addr, unsigned long size)
310     {
311     @@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
312     might_fault();
313     if (likely(access_ok(addr, size))) {
314     allow_write_to_user(addr, size);
315     - ret = __clear_user(addr, size);
316     + ret = __arch_clear_user(addr, size);
317     prevent_write_to_user(addr, size);
318     }
319     return ret;
320     }
321    
322     +static inline unsigned long __clear_user(void __user *addr, unsigned long size)
323     +{
324     + return clear_user(addr, size);
325     +}
326     +
327     extern long strncpy_from_user(char *dst, const char __user *src, long count);
328     extern __must_check long strnlen_user(const char __user *str, long n);
329    
330     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
331     index d9279d0ee9f5..c031be8d41ff 100644
332     --- a/arch/powerpc/kernel/eeh_driver.c
333     +++ b/arch/powerpc/kernel/eeh_driver.c
334     @@ -897,12 +897,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
335    
336     /* Log the event */
337     if (pe->type & EEH_PE_PHB) {
338     - pr_err("EEH: PHB#%x failure detected, location: %s\n",
339     + pr_err("EEH: Recovering PHB#%x, location: %s\n",
340     pe->phb->global_number, eeh_pe_loc_get(pe));
341     } else {
342     struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
343    
344     - pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
345     + pr_err("EEH: Recovering PHB#%x-PE#%x\n",
346     pe->phb->global_number, pe->addr);
347     pr_err("EEH: PE location: %s, PHB location: %s\n",
348     eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
349     diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
350     index bd91dceb7010..d341b464f23c 100644
351     --- a/arch/powerpc/kernel/security.c
352     +++ b/arch/powerpc/kernel/security.c
353     @@ -142,32 +142,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
354    
355     thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
356    
357     - if (rfi_flush || thread_priv) {
358     + if (rfi_flush) {
359     struct seq_buf s;
360     seq_buf_init(&s, buf, PAGE_SIZE - 1);
361    
362     - seq_buf_printf(&s, "Mitigation: ");
363     -
364     - if (rfi_flush)
365     - seq_buf_printf(&s, "RFI Flush");
366     -
367     - if (rfi_flush && thread_priv)
368     - seq_buf_printf(&s, ", ");
369     -
370     + seq_buf_printf(&s, "Mitigation: RFI Flush");
371     if (thread_priv)
372     - seq_buf_printf(&s, "L1D private per thread");
373     + seq_buf_printf(&s, ", L1D private per thread");
374    
375     seq_buf_printf(&s, "\n");
376    
377     return s.len;
378     }
379    
380     + if (thread_priv)
381     + return sprintf(buf, "Vulnerable: L1D private per thread\n");
382     +
383     if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
384     !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
385     return sprintf(buf, "Not affected\n");
386    
387     return sprintf(buf, "Vulnerable\n");
388     }
389     +
390     +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
391     +{
392     + return cpu_show_meltdown(dev, attr, buf);
393     +}
394     #endif
395    
396     ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
397     diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
398     index 619447b1b797..11301a1187f3 100644
399     --- a/arch/powerpc/kernel/time.c
400     +++ b/arch/powerpc/kernel/time.c
401     @@ -232,7 +232,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
402     * Accumulate stolen time by scanning the dispatch trace log.
403     * Called on entry from user mode.
404     */
405     -void accumulate_stolen_time(void)
406     +void notrace accumulate_stolen_time(void)
407     {
408     u64 sst, ust;
409     unsigned long save_irq_soft_mask = irq_soft_mask_return();
410     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
411     index 82f43535e686..014ff0701f24 100644
412     --- a/arch/powerpc/kernel/traps.c
413     +++ b/arch/powerpc/kernel/traps.c
414     @@ -250,15 +250,22 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
415     }
416     NOKPROBE_SYMBOL(oops_end);
417    
418     +static char *get_mmu_str(void)
419     +{
420     + if (early_radix_enabled())
421     + return " MMU=Radix";
422     + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
423     + return " MMU=Hash";
424     + return "";
425     +}
426     +
427     static int __die(const char *str, struct pt_regs *regs, long err)
428     {
429     printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
430    
431     - printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
432     + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
433     IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
434     - PAGE_SIZE / 1024,
435     - early_radix_enabled() ? " MMU=Radix" : "",
436     - early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
437     + PAGE_SIZE / 1024, get_mmu_str(),
438     IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
439     IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
440     IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
441     diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
442     index f69a6aab7bfb..1ddb26394e8a 100644
443     --- a/arch/powerpc/lib/string_32.S
444     +++ b/arch/powerpc/lib/string_32.S
445     @@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
446     LG_CACHELINE_BYTES = L1_CACHE_SHIFT
447     CACHELINE_MASK = (L1_CACHE_BYTES-1)
448    
449     -_GLOBAL(__clear_user)
450     +_GLOBAL(__arch_clear_user)
451     /*
452     * Use dcbz on the complete cache lines in the destination
453     * to set them to zero. This requires that the destination
454     @@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
455     EX_TABLE(8b, 91b)
456     EX_TABLE(9b, 91b)
457    
458     -EXPORT_SYMBOL(__clear_user)
459     +EXPORT_SYMBOL(__arch_clear_user)
460     diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
461     index 507b18b1660e..169872bc0892 100644
462     --- a/arch/powerpc/lib/string_64.S
463     +++ b/arch/powerpc/lib/string_64.S
464     @@ -17,7 +17,7 @@ PPC64_CACHES:
465     .section ".text"
466    
467     /**
468     - * __clear_user: - Zero a block of memory in user space, with less checking.
469     + * __arch_clear_user: - Zero a block of memory in user space, with less checking.
470     * @to: Destination address, in user space.
471     * @n: Number of bytes to zero.
472     *
473     @@ -58,7 +58,7 @@ err3; stb r0,0(r3)
474     mr r3,r4
475     blr
476    
477     -_GLOBAL_TOC(__clear_user)
478     +_GLOBAL_TOC(__arch_clear_user)
479     cmpdi r4,32
480     neg r6,r3
481     li r0,0
482     @@ -181,4 +181,4 @@ err1; dcbz 0,r3
483     cmpdi r4,32
484     blt .Lshort_clear
485     b .Lmedium_clear
486     -EXPORT_SYMBOL(__clear_user)
487     +EXPORT_SYMBOL(__arch_clear_user)
488     diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
489     index 6c123760164e..83c51a7d7eee 100644
490     --- a/arch/powerpc/mm/book3s64/hash_utils.c
491     +++ b/arch/powerpc/mm/book3s64/hash_utils.c
492     @@ -294,10 +294,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
493     ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
494     HPTE_V_BOLTED, psize, psize,
495     ssize);
496     -
497     + if (ret == -1) {
498     + /* Try to remove a non bolted entry */
499     + ret = mmu_hash_ops.hpte_remove(hpteg);
500     + if (ret != -1)
501     + ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
502     + HPTE_V_BOLTED, psize, psize,
503     + ssize);
504     + }
505     if (ret < 0)
506     break;
507    
508     + cond_resched();
509     #ifdef CONFIG_DEBUG_PAGEALLOC
510     if (debug_pagealloc_enabled() &&
511     (paddr >> PAGE_SHIFT) < linear_map_hash_count)
512     diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
513     index b33251d75927..572651a5c87b 100644
514     --- a/arch/powerpc/platforms/pseries/cmm.c
515     +++ b/arch/powerpc/platforms/pseries/cmm.c
516     @@ -411,6 +411,10 @@ static struct bus_type cmm_subsys = {
517     .dev_name = "cmm",
518     };
519    
520     +static void cmm_release_device(struct device *dev)
521     +{
522     +}
523     +
524     /**
525     * cmm_sysfs_register - Register with sysfs
526     *
527     @@ -426,6 +430,7 @@ static int cmm_sysfs_register(struct device *dev)
528    
529     dev->id = 0;
530     dev->bus = &cmm_subsys;
531     + dev->release = cmm_release_device;
532    
533     if ((rc = device_register(dev)))
534     goto subsys_unregister;
535     diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
536     index 61883291defc..ee07d0718bf1 100644
537     --- a/arch/powerpc/platforms/pseries/papr_scm.c
538     +++ b/arch/powerpc/platforms/pseries/papr_scm.c
539     @@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p,
540     int len, read;
541     int64_t ret;
542    
543     - if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
544     + if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
545     return -EINVAL;
546    
547     for (len = hdr->in_length; len; len -= read) {
548     @@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
549     __be64 data_be;
550     int64_t ret;
551    
552     - if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
553     + if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
554     return -EINVAL;
555    
556     for (len = hdr->in_length; len; len -= wrote) {
557     diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
558     index 0c8421dd01ab..0a40201f315f 100644
559     --- a/arch/powerpc/platforms/pseries/setup.c
560     +++ b/arch/powerpc/platforms/pseries/setup.c
561     @@ -74,9 +74,6 @@
562     #include "pseries.h"
563     #include "../../../../drivers/pci/pci.h"
564    
565     -DEFINE_STATIC_KEY_FALSE(shared_processor);
566     -EXPORT_SYMBOL_GPL(shared_processor);
567     -
568     int CMO_PrPSP = -1;
569     int CMO_SecPSP = -1;
570     unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
571     @@ -761,10 +758,6 @@ static void __init pSeries_setup_arch(void)
572    
573     if (firmware_has_feature(FW_FEATURE_LPAR)) {
574     vpa_init(boot_cpuid);
575     -
576     - if (lppaca_shared_proc(get_lppaca()))
577     - static_branch_enable(&shared_processor);
578     -
579     ppc_md.power_save = pseries_lpar_idle;
580     ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
581     #ifdef CONFIG_PCI_IOV
582     diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
583     index 2b4e959caa36..7b9fe0a567cf 100755
584     --- a/arch/powerpc/tools/relocs_check.sh
585     +++ b/arch/powerpc/tools/relocs_check.sh
586     @@ -20,7 +20,7 @@ objdump="$1"
587     vmlinux="$2"
588    
589     bad_relocs=$(
590     -"$objdump" -R "$vmlinux" |
591     +$objdump -R "$vmlinux" |
592     # Only look at relocation lines.
593     grep -E '\<R_' |
594     # These relocations are okay
595     diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
596     index 1e972df3107e..77114755dc6f 100755
597     --- a/arch/powerpc/tools/unrel_branch_check.sh
598     +++ b/arch/powerpc/tools/unrel_branch_check.sh
599     @@ -18,14 +18,14 @@ vmlinux="$2"
600     #__end_interrupts should be located within the first 64K
601    
602     end_intr=0x$(
603     -"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
604     +$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
605     --stop-address=0xc000000000010000 |
606     grep '\<__end_interrupts>:' |
607     awk '{print $1}'
608     )
609    
610     BRANCHES=$(
611     -"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
612     +$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
613     --stop-address=${end_intr} |
614     grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
615     grep -v '\<__start_initialization_multiplatform>' |
616     diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
617     index d402ced7f7c3..cb8b1cc285c9 100644
618     --- a/arch/s390/kernel/machine_kexec.c
619     +++ b/arch/s390/kernel/machine_kexec.c
620     @@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image)
621     #ifdef CONFIG_CRASH_DUMP
622     int rc;
623    
624     + preempt_disable();
625     rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
626     + preempt_enable();
627     return rc == 0;
628     #else
629     return false;
630     diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
631     index 3d8b12a9a6ff..7511b71d2931 100644
632     --- a/arch/s390/kernel/perf_cpum_sf.c
633     +++ b/arch/s390/kernel/perf_cpum_sf.c
634     @@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
635     unsigned long num_sdb, gfp_t gfp_flags)
636     {
637     int i, rc;
638     - unsigned long *new, *tail;
639     + unsigned long *new, *tail, *tail_prev = NULL;
640    
641     if (!sfb->sdbt || !sfb->tail)
642     return -EINVAL;
643     @@ -232,6 +232,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
644     sfb->num_sdbt++;
645     /* Link current page to tail of chain */
646     *tail = (unsigned long)(void *) new + 1;
647     + tail_prev = tail;
648     tail = new;
649     }
650    
651     @@ -241,10 +242,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
652     * issue, a new realloc call (if required) might succeed.
653     */
654     rc = alloc_sample_data_block(tail, gfp_flags);
655     - if (rc)
656     + if (rc) {
657     + /* Undo last SDBT. An SDBT with no SDB at its first
658     + * entry but with an SDBT entry instead can not be
659     + * handled by the interrupt handler code.
660     + * Avoid this situation.
661     + */
662     + if (tail_prev) {
663     + sfb->num_sdbt--;
664     + free_page((unsigned long) new);
665     + tail = tail_prev;
666     + }
667     break;
668     + }
669     sfb->num_sdb++;
670     tail++;
671     + tail_prev = new = NULL; /* Allocated at least one SBD */
672     }
673    
674     /* Link sampling buffer to its origin */
675     diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
676     index a8204f952315..6e609b13c0ce 100644
677     --- a/arch/s390/kernel/unwind_bc.c
678     +++ b/arch/s390/kernel/unwind_bc.c
679     @@ -60,6 +60,11 @@ bool unwind_next_frame(struct unwind_state *state)
680     ip = READ_ONCE_NOCHECK(sf->gprs[8]);
681     reliable = false;
682     regs = NULL;
683     + if (!__kernel_text_address(ip)) {
684     + /* skip bogus %r14 */
685     + state->regs = NULL;
686     + return unwind_next_frame(state);
687     + }
688     } else {
689     sf = (struct stack_frame *) state->sp;
690     sp = READ_ONCE_NOCHECK(sf->back_chain);
691     diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
692     index 59ad7997fed1..de7ca4b6718f 100644
693     --- a/arch/s390/mm/maccess.c
694     +++ b/arch/s390/mm/maccess.c
695     @@ -119,9 +119,15 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
696     */
697     int memcpy_real(void *dest, void *src, size_t count)
698     {
699     - if (S390_lowcore.nodat_stack != 0)
700     - return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
701     - 3, dest, src, count);
702     + int rc;
703     +
704     + if (S390_lowcore.nodat_stack != 0) {
705     + preempt_disable();
706     + rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
707     + dest, src, count);
708     + preempt_enable();
709     + return rc;
710     + }
711     /*
712     * This is a really early memcpy_real call, the stacks are
713     * not set up yet. Just call _memcpy_real on the early boot
714     diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
715     index fc8c52cff5aa..c5643a59a8c7 100644
716     --- a/arch/um/drivers/virtio_uml.c
717     +++ b/arch/um/drivers/virtio_uml.c
718     @@ -83,7 +83,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
719     return 0;
720     }
721    
722     -static int full_read(int fd, void *buf, int len)
723     +static int full_read(int fd, void *buf, int len, bool abortable)
724     {
725     int rc;
726    
727     @@ -93,7 +93,7 @@ static int full_read(int fd, void *buf, int len)
728     buf += rc;
729     len -= rc;
730     }
731     - } while (len && (rc > 0 || rc == -EINTR));
732     + } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
733    
734     if (rc < 0)
735     return rc;
736     @@ -104,7 +104,7 @@ static int full_read(int fd, void *buf, int len)
737    
738     static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
739     {
740     - return full_read(fd, msg, sizeof(msg->header));
741     + return full_read(fd, msg, sizeof(msg->header), true);
742     }
743    
744     static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
745     @@ -118,7 +118,7 @@ static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
746     size = msg->header.size;
747     if (size > max_payload_size)
748     return -EPROTO;
749     - return full_read(fd, &msg->payload, size);
750     + return full_read(fd, &msg->payload, size, false);
751     }
752    
753     static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
754     diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
755     index ac42ae4651ce..eebdcbef0578 100644
756     --- a/drivers/cdrom/cdrom.c
757     +++ b/drivers/cdrom/cdrom.c
758     @@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
759     tracks->xa = 0;
760     tracks->error = 0;
761     cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
762     +
763     + if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
764     + tracks->error = CDS_NO_INFO;
765     + return;
766     + }
767     +
768     /* Grab the TOC header so we can see how many tracks there are */
769     ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
770     if (ret) {
771     @@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
772     ret = open_for_data(cdi);
773     if (ret)
774     goto err;
775     - cdrom_mmc3_profile(cdi);
776     + if (CDROM_CAN(CDC_GENERIC_PACKET))
777     + cdrom_mmc3_profile(cdi);
778     if (mode & FMODE_WRITE) {
779     ret = -EROFS;
780     if (cdrom_open_write(cdi))
781     @@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
782     it doesn't give enough information or fails. then we return
783     the toc contents. */
784     use_toc:
785     + if (!CDROM_CAN(CDC_PLAY_AUDIO))
786     + return -ENOSYS;
787     +
788     toc.cdte_format = CDROM_MSF;
789     toc.cdte_track = CDROM_LEADOUT;
790     if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
791     diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
792     index 9d930edd6516..13304cf5f2a8 100644
793     --- a/drivers/clk/clk-gpio.c
794     +++ b/drivers/clk/clk-gpio.c
795     @@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
796     else
797     clk = clk_register_gpio_gate(&pdev->dev, node->name,
798     parent_names ? parent_names[0] : NULL, gpiod,
799     - 0);
800     + CLK_SET_RATE_PARENT);
801     if (IS_ERR(clk))
802     return PTR_ERR(clk);
803    
804     diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
805     index 287fdeae7c7c..7b123105b5de 100644
806     --- a/drivers/clk/pxa/clk-pxa27x.c
807     +++ b/drivers/clk/pxa/clk-pxa27x.c
808     @@ -459,6 +459,7 @@ struct dummy_clk {
809     };
810     static struct dummy_clk dummy_clks[] __initdata = {
811     DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
812     + DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
813     DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
814     DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
815     };
816     diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
817     index b98b81ef43a1..5a89ed88cc27 100644
818     --- a/drivers/clk/qcom/clk-rcg2.c
819     +++ b/drivers/clk/qcom/clk-rcg2.c
820     @@ -220,6 +220,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
821     if (clk_flags & CLK_SET_RATE_PARENT) {
822     rate = f->freq;
823     if (f->pre_div) {
824     + if (!rate)
825     + rate = req->rate;
826     rate /= 2;
827     rate *= f->pre_div + 1;
828     }
829     diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
830     index fef5e8157061..930fa4a4c52a 100644
831     --- a/drivers/clk/qcom/clk-smd-rpm.c
832     +++ b/drivers/clk/qcom/clk-smd-rpm.c
833     @@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
834     };
835    
836     /* msm8998 */
837     +DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
838     DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
839     DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
840     DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
841     @@ -670,6 +671,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
842     DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
843     DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
844     static struct clk_smd_rpm *msm8998_clks[] = {
845     + [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
846     + [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
847     [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
848     [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
849     [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
850     diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
851     index 28ddc747d703..bdeacebbf0e4 100644
852     --- a/drivers/clk/qcom/common.c
853     +++ b/drivers/clk/qcom/common.c
854     @@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
855     if (!f)
856     return NULL;
857    
858     + if (!f->freq)
859     + return f;
860     +
861     for (; f->freq; f++)
862     if (rate <= f->freq)
863     return f;
864     diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
865     index 9f09a59161e7..5b39d3701fa3 100644
866     --- a/drivers/clocksource/asm9260_timer.c
867     +++ b/drivers/clocksource/asm9260_timer.c
868     @@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
869     }
870    
871     clk = of_clk_get(np, 0);
872     + if (IS_ERR(clk)) {
873     + pr_err("Failed to get clk!\n");
874     + return PTR_ERR(clk);
875     + }
876    
877     ret = clk_prepare_enable(clk);
878     if (ret) {
879     diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
880     index 11ff701ff4bb..a3c73e972fce 100644
881     --- a/drivers/clocksource/timer-of.c
882     +++ b/drivers/clocksource/timer-of.c
883     @@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
884     }
885    
886     if (!to->clkevt.name)
887     - to->clkevt.name = np->name;
888     + to->clkevt.name = np->full_name;
889    
890     to->np = np;
891    
892     diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
893     index 06664fbd2d91..89792083d62c 100644
894     --- a/drivers/dma/fsl-qdma.c
895     +++ b/drivers/dma/fsl-qdma.c
896     @@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
897     return ret;
898    
899     fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
900     + if (fsl_qdma->irq_base < 0)
901     + return fsl_qdma->irq_base;
902     +
903     fsl_qdma->feature = of_property_read_bool(np, "big-endian");
904     INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
905    
906     diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
907     index 5d56f1e4d332..43acba2a1c0e 100644
908     --- a/drivers/dma/xilinx/xilinx_dma.c
909     +++ b/drivers/dma/xilinx/xilinx_dma.c
910     @@ -1433,6 +1433,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
911    
912     chan->err = false;
913     chan->idle = true;
914     + chan->desc_pendingcount = 0;
915     chan->desc_submitcount = 0;
916    
917     return err;
918     diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
919     index e9e47c0d5be7..490ce7bae25e 100644
920     --- a/drivers/gpio/gpio-lynxpoint.c
921     +++ b/drivers/gpio/gpio-lynxpoint.c
922     @@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
923     value |= TRIG_SEL_BIT | INT_INV_BIT;
924    
925     outl(value, reg);
926     +
927     + if (type & IRQ_TYPE_EDGE_BOTH)
928     + irq_set_handler_locked(d, handle_edge_irq);
929     + else if (type & IRQ_TYPE_LEVEL_MASK)
930     + irq_set_handler_locked(d, handle_level_irq);
931     +
932     spin_unlock_irqrestore(&lg->lock, flags);
933    
934     return 0;
935     diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
936     index 16a47de29c94..a031cbcdf6ef 100644
937     --- a/drivers/gpio/gpio-mpc8xxx.c
938     +++ b/drivers/gpio/gpio-mpc8xxx.c
939     @@ -377,7 +377,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
940     * It's assumed that only a single type of gpio controller is available
941     * on the current machine, so overwriting global data is fine.
942     */
943     - mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
944     + if (devtype->irq_set_type)
945     + mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
946    
947     if (devtype->gpio_dir_out)
948     gc->direction_output = devtype->gpio_dir_out;
949     @@ -386,6 +387,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
950    
951     gc->to_irq = mpc8xxx_gpio_to_irq;
952    
953     + if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
954     + gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
955     +
956     ret = gpiochip_add_data(gc, mpc8xxx_gc);
957     if (ret) {
958     pr_err("%pOF: GPIO chip registration failed with status %d\n",
959     diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
960     index 7907a8755866..c77d474185f3 100644
961     --- a/drivers/gpio/gpio-mxc.c
962     +++ b/drivers/gpio/gpio-mxc.c
963     @@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
964     {
965     struct device_node *np = pdev->dev.of_node;
966     struct mxc_gpio_port *port;
967     + int irq_count;
968     int irq_base;
969     int err;
970    
971     @@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
972     if (IS_ERR(port->base))
973     return PTR_ERR(port->base);
974    
975     - port->irq_high = platform_get_irq(pdev, 1);
976     - if (port->irq_high < 0)
977     - port->irq_high = 0;
978     + irq_count = platform_irq_count(pdev);
979     + if (irq_count < 0)
980     + return irq_count;
981     +
982     + if (irq_count > 1) {
983     + port->irq_high = platform_get_irq(pdev, 1);
984     + if (port->irq_high < 0)
985     + port->irq_high = 0;
986     + }
987    
988     port->irq = platform_get_irq(pdev, 0);
989     if (port->irq < 0)
990     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
991     index dff41d0a85fe..c0e41f1f0c23 100644
992     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
993     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
994     @@ -35,6 +35,7 @@
995     #include <linux/hmm.h>
996     #include <linux/pagemap.h>
997     #include <linux/sched/task.h>
998     +#include <linux/sched/mm.h>
999     #include <linux/seq_file.h>
1000     #include <linux/slab.h>
1001     #include <linux/swap.h>
1002     @@ -788,7 +789,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
1003     struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
1004     struct ttm_tt *ttm = bo->tbo.ttm;
1005     struct amdgpu_ttm_tt *gtt = (void *)ttm;
1006     - struct mm_struct *mm = gtt->usertask->mm;
1007     + struct mm_struct *mm;
1008     unsigned long start = gtt->userptr;
1009     struct vm_area_struct *vma;
1010     struct hmm_range *range;
1011     @@ -796,25 +797,14 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
1012     uint64_t *pfns;
1013     int r = 0;
1014    
1015     - if (!mm) /* Happens during process shutdown */
1016     - return -ESRCH;
1017     -
1018     if (unlikely(!mirror)) {
1019     DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
1020     - r = -EFAULT;
1021     - goto out;
1022     + return -EFAULT;
1023     }
1024    
1025     - vma = find_vma(mm, start);
1026     - if (unlikely(!vma || start < vma->vm_start)) {
1027     - r = -EFAULT;
1028     - goto out;
1029     - }
1030     - if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
1031     - vma->vm_file)) {
1032     - r = -EPERM;
1033     - goto out;
1034     - }
1035     + mm = mirror->hmm->mmu_notifier.mm;
1036     + if (!mmget_not_zero(mm)) /* Happens during process shutdown */
1037     + return -ESRCH;
1038    
1039     range = kzalloc(sizeof(*range), GFP_KERNEL);
1040     if (unlikely(!range)) {
1041     @@ -847,6 +837,17 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
1042     hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
1043    
1044     down_read(&mm->mmap_sem);
1045     + vma = find_vma(mm, start);
1046     + if (unlikely(!vma || start < vma->vm_start)) {
1047     + r = -EFAULT;
1048     + goto out_unlock;
1049     + }
1050     + if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
1051     + vma->vm_file)) {
1052     + r = -EPERM;
1053     + goto out_unlock;
1054     + }
1055     +
1056     r = hmm_range_fault(range, 0);
1057     up_read(&mm->mmap_sem);
1058    
1059     @@ -865,15 +866,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
1060     }
1061    
1062     gtt->range = range;
1063     + mmput(mm);
1064    
1065     return 0;
1066    
1067     +out_unlock:
1068     + up_read(&mm->mmap_sem);
1069     out_free_pfns:
1070     hmm_range_unregister(range);
1071     kvfree(pfns);
1072     out_free_ranges:
1073     kfree(range);
1074     out:
1075     + mmput(mm);
1076     return r;
1077     }
1078    
1079     diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
1080     index 892ce636ef72..6ee04803c362 100644
1081     --- a/drivers/gpu/drm/drm_property.c
1082     +++ b/drivers/gpu/drm/drm_property.c
1083     @@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
1084     struct drm_property_blob *blob;
1085     int ret;
1086    
1087     - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
1088     + if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
1089     return ERR_PTR(-EINVAL);
1090    
1091     blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
1092     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1093     index 2fa3587d974f..e0b241bd3070 100644
1094     --- a/drivers/hid/hid-core.c
1095     +++ b/drivers/hid/hid-core.c
1096     @@ -781,6 +781,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
1097     if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
1098     parser->global.report_size == 8)
1099     parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
1100     +
1101     + if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
1102     + parser->global.report_size == 8)
1103     + parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
1104     }
1105    
1106     static void hid_scan_collection(struct hid_parser *parser, unsigned type)
1107     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1108     index 447e8db21174..6273e7178e78 100644
1109     --- a/drivers/hid/hid-ids.h
1110     +++ b/drivers/hid/hid-ids.h
1111     @@ -573,6 +573,7 @@
1112     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
1113     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
1114     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
1115     +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a
1116    
1117     #define USB_VENDOR_ID_HUION 0x256c
1118     #define USB_DEVICE_ID_HUION_TABLET 0x006e
1119     @@ -959,6 +960,7 @@
1120    
1121     #define I2C_VENDOR_ID_RAYDIUM 0x2386
1122     #define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
1123     +#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118
1124    
1125     #define USB_VENDOR_ID_RAZER 0x1532
1126     #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
1127     diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
1128     index 8e91e2f06cb4..cd9193078525 100644
1129     --- a/drivers/hid/hid-logitech-hidpp.c
1130     +++ b/drivers/hid/hid-logitech-hidpp.c
1131     @@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
1132     ret = hidpp_send_fap_command_sync(hidpp, feature_index,
1133     CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
1134     NULL, 0, &response);
1135     + /* Ignore these intermittent errors */
1136     + if (ret == HIDPP_ERROR_RESOURCE_ERROR)
1137     + return -EIO;
1138     if (ret > 0) {
1139     hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
1140     __func__, ret);
1141     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1142     index c50bcd967d99..9a35af1e2662 100644
1143     --- a/drivers/hid/hid-quirks.c
1144     +++ b/drivers/hid/hid-quirks.c
1145     @@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
1146     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
1147     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
1148     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
1149     + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
1150     { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
1151     { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
1152     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
1153     diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
1154     index 7c6abd7e0979..9ce22acdfaca 100644
1155     --- a/drivers/hid/hid-rmi.c
1156     +++ b/drivers/hid/hid-rmi.c
1157     @@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev)
1158     {
1159     struct rmi_data *hdata = hid_get_drvdata(hdev);
1160    
1161     - if (hdata->device_flags & RMI_DEVICE) {
1162     + if ((hdata->device_flags & RMI_DEVICE)
1163     + && test_bit(RMI_STARTED, &hdata->flags)) {
1164     clear_bit(RMI_STARTED, &hdata->flags);
1165     cancel_work_sync(&hdata->reset_work);
1166     rmi_unregister_transport_device(&hdata->xport);
1167     diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
1168     index 04c088131e04..7608ee053114 100644
1169     --- a/drivers/hid/i2c-hid/i2c-hid-core.c
1170     +++ b/drivers/hid/i2c-hid/i2c-hid-core.c
1171     @@ -170,6 +170,8 @@ static const struct i2c_hid_quirks {
1172     I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
1173     { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
1174     I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
1175     + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
1176     + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
1177     { USB_VENDOR_ID_ELAN, HID_ANY_ID,
1178     I2C_HID_QUIRK_BOGUS_IRQ },
1179     { 0, 0 }
1180     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
1181     index 53a60c81e220..05ead1735c6e 100644
1182     --- a/drivers/hv/vmbus_drv.c
1183     +++ b/drivers/hv/vmbus_drv.c
1184     @@ -2308,7 +2308,7 @@ static void hv_crash_handler(struct pt_regs *regs)
1185     vmbus_connection.conn_state = DISCONNECTED;
1186     cpu = smp_processor_id();
1187     hv_stimer_cleanup(cpu);
1188     - hv_synic_cleanup(cpu);
1189     + hv_synic_disable_regs(cpu);
1190     hyperv_cleanup();
1191     };
1192    
1193     diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
1194     index b24e7b937f21..84cfed17ff4f 100644
1195     --- a/drivers/i2c/busses/i2c-stm32f7.c
1196     +++ b/drivers/i2c/busses/i2c-stm32f7.c
1197     @@ -1985,6 +1985,11 @@ pm_disable:
1198     pm_runtime_set_suspended(i2c_dev->dev);
1199     pm_runtime_dont_use_autosuspend(i2c_dev->dev);
1200    
1201     + if (i2c_dev->dma) {
1202     + stm32_i2c_dma_free(i2c_dev->dma);
1203     + i2c_dev->dma = NULL;
1204     + }
1205     +
1206     clk_free:
1207     clk_disable_unprepare(i2c_dev->clk);
1208    
1209     @@ -1995,21 +2000,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
1210     {
1211     struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
1212    
1213     - if (i2c_dev->dma) {
1214     - stm32_i2c_dma_free(i2c_dev->dma);
1215     - i2c_dev->dma = NULL;
1216     - }
1217     -
1218     i2c_del_adapter(&i2c_dev->adap);
1219     pm_runtime_get_sync(i2c_dev->dev);
1220    
1221     - clk_disable_unprepare(i2c_dev->clk);
1222     -
1223     pm_runtime_put_noidle(i2c_dev->dev);
1224     pm_runtime_disable(i2c_dev->dev);
1225     pm_runtime_set_suspended(i2c_dev->dev);
1226     pm_runtime_dont_use_autosuspend(i2c_dev->dev);
1227    
1228     + if (i2c_dev->dma) {
1229     + stm32_i2c_dma_free(i2c_dev->dma);
1230     + i2c_dev->dma = NULL;
1231     + }
1232     +
1233     + clk_disable_unprepare(i2c_dev->clk);
1234     +
1235     return 0;
1236     }
1237    
1238     diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1239     index 24c4b691b1c9..ae60442efda0 100644
1240     --- a/drivers/input/touchscreen/atmel_mxt_ts.c
1241     +++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1242     @@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
1243    
1244     mutex_unlock(&input_dev->mutex);
1245    
1246     + disable_irq(data->irq);
1247     +
1248     return 0;
1249     }
1250    
1251     @@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
1252     if (!input_dev)
1253     return 0;
1254    
1255     + enable_irq(data->irq);
1256     +
1257     mutex_lock(&input_dev->mutex);
1258    
1259     if (input_dev->users)
1260     diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
1261     index e9006407c9bc..f4ebdab06280 100644
1262     --- a/drivers/input/touchscreen/ili210x.c
1263     +++ b/drivers/input/touchscreen/ili210x.c
1264     @@ -334,7 +334,12 @@ static int ili210x_i2c_probe(struct i2c_client *client,
1265     input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
1266     input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
1267     touchscreen_parse_properties(input, true, &priv->prop);
1268     - input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
1269     +
1270     + error = input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
1271     + if (error) {
1272     + dev_err(dev, "Unable to set up slots, err: %d\n", error);
1273     + return error;
1274     + }
1275    
1276     error = devm_add_action(dev, ili210x_cancel_work, priv);
1277     if (error)
1278     diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
1279     index 1139714e72e2..1c5f8875cb79 100644
1280     --- a/drivers/input/touchscreen/st1232.c
1281     +++ b/drivers/input/touchscreen/st1232.c
1282     @@ -149,6 +149,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
1283     gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
1284     }
1285    
1286     +static void st1232_ts_power_off(void *data)
1287     +{
1288     + st1232_ts_power(data, false);
1289     +}
1290     +
1291     static const struct st_chip_info st1232_chip_info = {
1292     .have_z = true,
1293     .max_x = 0x31f, /* 800 - 1 */
1294     @@ -229,6 +234,13 @@ static int st1232_ts_probe(struct i2c_client *client,
1295    
1296     st1232_ts_power(ts, true);
1297    
1298     + error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts);
1299     + if (error) {
1300     + dev_err(&client->dev,
1301     + "Failed to install power off action: %d\n", error);
1302     + return error;
1303     + }
1304     +
1305     input_dev->name = "st1232-touchscreen";
1306     input_dev->id.bustype = BUS_I2C;
1307     input_dev->dev.parent = &client->dev;
1308     @@ -271,15 +283,6 @@ static int st1232_ts_probe(struct i2c_client *client,
1309     return 0;
1310     }
1311    
1312     -static int st1232_ts_remove(struct i2c_client *client)
1313     -{
1314     - struct st1232_ts_data *ts = i2c_get_clientdata(client);
1315     -
1316     - st1232_ts_power(ts, false);
1317     -
1318     - return 0;
1319     -}
1320     -
1321     static int __maybe_unused st1232_ts_suspend(struct device *dev)
1322     {
1323     struct i2c_client *client = to_i2c_client(dev);
1324     @@ -329,7 +332,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
1325    
1326     static struct i2c_driver st1232_ts_driver = {
1327     .probe = st1232_ts_probe,
1328     - .remove = st1232_ts_remove,
1329     .id_table = st1232_ts_id,
1330     .driver = {
1331     .name = ST1232_TS_NAME,
1332     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1333     index 8da93e730d6f..ed90361b84dc 100644
1334     --- a/drivers/iommu/arm-smmu-v3.c
1335     +++ b/drivers/iommu/arm-smmu-v3.c
1336     @@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
1337    
1338     /* Interrupt lines */
1339    
1340     - irq = platform_get_irq_byname(pdev, "combined");
1341     + irq = platform_get_irq_byname_optional(pdev, "combined");
1342     if (irq > 0)
1343     smmu->combined_irq = irq;
1344     else {
1345     - irq = platform_get_irq_byname(pdev, "eventq");
1346     + irq = platform_get_irq_byname_optional(pdev, "eventq");
1347     if (irq > 0)
1348     smmu->evtq.q.irq = irq;
1349    
1350     - irq = platform_get_irq_byname(pdev, "priq");
1351     + irq = platform_get_irq_byname_optional(pdev, "priq");
1352     if (irq > 0)
1353     smmu->priq.q.irq = irq;
1354    
1355     - irq = platform_get_irq_byname(pdev, "gerror");
1356     + irq = platform_get_irq_byname_optional(pdev, "gerror");
1357     if (irq > 0)
1358     smmu->gerr_irq = irq;
1359     }
1360     diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
1361     index 4dcbf68dfda4..0df091934361 100644
1362     --- a/drivers/iommu/rockchip-iommu.c
1363     +++ b/drivers/iommu/rockchip-iommu.c
1364     @@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
1365     if (!dma_dev)
1366     return NULL;
1367    
1368     - rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
1369     + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
1370     if (!rk_domain)
1371     return NULL;
1372    
1373     if (type == IOMMU_DOMAIN_DMA &&
1374     iommu_get_dma_cookie(&rk_domain->domain))
1375     - return NULL;
1376     + goto err_free_domain;
1377    
1378     /*
1379     * rk32xx iommus use a 2 level pagetable.
1380     @@ -1021,6 +1021,8 @@ err_free_dt:
1381     err_put_cookie:
1382     if (type == IOMMU_DOMAIN_DMA)
1383     iommu_put_dma_cookie(&rk_domain->domain);
1384     +err_free_domain:
1385     + kfree(rk_domain);
1386    
1387     return NULL;
1388     }
1389     @@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
1390    
1391     if (domain->type == IOMMU_DOMAIN_DMA)
1392     iommu_put_dma_cookie(&rk_domain->domain);
1393     + kfree(rk_domain);
1394     }
1395    
1396     static int rk_iommu_add_device(struct device *dev)
1397     diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
1398     index 7293fc3f796d..dd486233e282 100644
1399     --- a/drivers/iommu/tegra-smmu.c
1400     +++ b/drivers/iommu/tegra-smmu.c
1401     @@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
1402     return (addr & smmu->pfn_mask) == addr;
1403     }
1404    
1405     -static dma_addr_t smmu_pde_to_dma(u32 pde)
1406     +static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
1407     {
1408     - return pde << 12;
1409     + return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
1410     }
1411    
1412     static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
1413     @@ -549,6 +549,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
1414     dma_addr_t *dmap)
1415     {
1416     unsigned int pd_index = iova_pd_index(iova);
1417     + struct tegra_smmu *smmu = as->smmu;
1418     struct page *pt_page;
1419     u32 *pd;
1420    
1421     @@ -557,7 +558,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
1422     return NULL;
1423    
1424     pd = page_address(as->pd);
1425     - *dmap = smmu_pde_to_dma(pd[pd_index]);
1426     + *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
1427    
1428     return tegra_smmu_pte_offset(pt_page, iova);
1429     }
1430     @@ -599,7 +600,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
1431     } else {
1432     u32 *pd = page_address(as->pd);
1433    
1434     - *dmap = smmu_pde_to_dma(pd[pde]);
1435     + *dmap = smmu_pde_to_dma(smmu, pd[pde]);
1436     }
1437    
1438     return tegra_smmu_pte_offset(as->pts[pde], iova);
1439     @@ -624,7 +625,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
1440     if (--as->count[pde] == 0) {
1441     struct tegra_smmu *smmu = as->smmu;
1442     u32 *pd = page_address(as->pd);
1443     - dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
1444     + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
1445    
1446     tegra_smmu_set_pde(as, iova, 0);
1447    
1448     diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
1449     index fc75c61233aa..58bec2126966 100644
1450     --- a/drivers/irqchip/irq-bcm7038-l1.c
1451     +++ b/drivers/irqchip/irq-bcm7038-l1.c
1452     @@ -281,6 +281,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
1453     pr_err("failed to map parent interrupt %d\n", parent_irq);
1454     return -EINVAL;
1455     }
1456     +
1457     + if (of_property_read_bool(dn, "brcm,irq-can-wake"))
1458     + enable_irq_wake(parent_irq);
1459     +
1460     irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
1461     intc);
1462    
1463     diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
1464     index f126255b3260..dda512dfe2c1 100644
1465     --- a/drivers/irqchip/irq-ingenic.c
1466     +++ b/drivers/irqchip/irq-ingenic.c
1467     @@ -108,6 +108,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
1468     goto out_unmap_irq;
1469     }
1470    
1471     + domain = irq_domain_add_legacy(node, num_chips * 32,
1472     + JZ4740_IRQ_BASE, 0,
1473     + &irq_domain_simple_ops, NULL);
1474     + if (!domain) {
1475     + err = -ENOMEM;
1476     + goto out_unmap_base;
1477     + }
1478     +
1479     for (i = 0; i < num_chips; i++) {
1480     /* Mask all irqs */
1481     writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
1482     @@ -134,14 +142,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
1483     IRQ_NOPROBE | IRQ_LEVEL);
1484     }
1485    
1486     - domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
1487     - &irq_domain_simple_ops, NULL);
1488     - if (!domain)
1489     - pr_warn("unable to register IRQ domain\n");
1490     -
1491     setup_irq(parent_irq, &intc_cascade_action);
1492     return 0;
1493    
1494     +out_unmap_base:
1495     + iounmap(intc->base);
1496     out_unmap_irq:
1497     irq_dispose_mapping(parent_irq);
1498     out_free:
1499     diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
1500     index 250dc9d6f635..82350a28a564 100644
1501     --- a/drivers/leds/leds-an30259a.c
1502     +++ b/drivers/leds/leds-an30259a.c
1503     @@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client)
1504    
1505     chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
1506    
1507     + if (IS_ERR(chip->regmap)) {
1508     + err = PTR_ERR(chip->regmap);
1509     + dev_err(&client->dev, "Failed to allocate register map: %d\n",
1510     + err);
1511     + goto exit;
1512     + }
1513     +
1514     for (i = 0; i < chip->num_leds; i++) {
1515     struct led_init_data init_data = {};
1516    
1517     diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
1518     index 3d381f2f73d0..1ac9a44570ee 100644
1519     --- a/drivers/leds/leds-lm3692x.c
1520     +++ b/drivers/leds/leds-lm3692x.c
1521     @@ -334,9 +334,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
1522     return ret;
1523     }
1524    
1525     - led->regulator = devm_regulator_get(&led->client->dev, "vled");
1526     - if (IS_ERR(led->regulator))
1527     + led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
1528     + if (IS_ERR(led->regulator)) {
1529     + ret = PTR_ERR(led->regulator);
1530     + if (ret != -ENODEV) {
1531     + if (ret != -EPROBE_DEFER)
1532     + dev_err(&led->client->dev,
1533     + "Failed to get vled regulator: %d\n",
1534     + ret);
1535     + return ret;
1536     + }
1537     led->regulator = NULL;
1538     + }
1539    
1540     child = device_get_next_child_node(&led->client->dev, child);
1541     if (!child) {
1542     diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
1543     index 136f86a1627d..d5e774d83021 100644
1544     --- a/drivers/leds/trigger/ledtrig-netdev.c
1545     +++ b/drivers/leds/trigger/ledtrig-netdev.c
1546     @@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
1547     container_of(nb, struct led_netdev_data, notifier);
1548    
1549     if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
1550     - && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
1551     + && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
1552     + && evt != NETDEV_CHANGENAME)
1553     return NOTIFY_DONE;
1554    
1555     if (!(dev == trigger_data->net_dev ||
1556     + (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) ||
1557     (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
1558     return NOTIFY_DONE;
1559    
1560     @@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
1561    
1562     clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
1563     switch (evt) {
1564     + case NETDEV_CHANGENAME:
1565     case NETDEV_REGISTER:
1566     if (trigger_data->net_dev)
1567     dev_put(trigger_data->net_dev);
1568     diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
1569     index 9f74dee1a58c..afe625e88a5c 100644
1570     --- a/drivers/mailbox/imx-mailbox.c
1571     +++ b/drivers/mailbox/imx-mailbox.c
1572     @@ -214,11 +214,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
1573     struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
1574     struct imx_mu_con_priv *cp = chan->con_priv;
1575    
1576     - if (cp->type == IMX_MU_TYPE_TXDB)
1577     + if (cp->type == IMX_MU_TYPE_TXDB) {
1578     tasklet_kill(&cp->txdb_tasklet);
1579     + return;
1580     + }
1581    
1582     - imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) |
1583     - IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx));
1584     + switch (cp->type) {
1585     + case IMX_MU_TYPE_TX:
1586     + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
1587     + break;
1588     + case IMX_MU_TYPE_RX:
1589     + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
1590     + break;
1591     + case IMX_MU_TYPE_RXDB:
1592     + imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
1593     + break;
1594     + default:
1595     + break;
1596     + }
1597    
1598     free_irq(priv->irq, chan);
1599     }
1600     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1601     index ba434d9ac720..46a8b5a91c38 100644
1602     --- a/drivers/md/bcache/btree.c
1603     +++ b/drivers/md/bcache/btree.c
1604     @@ -723,6 +723,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
1605     * IO can always make forward progress:
1606     */
1607     nr /= c->btree_pages;
1608     + if (nr == 0)
1609     + nr = 1;
1610     nr = min_t(unsigned long, nr, mca_can_free(c));
1611    
1612     i = 0;
1613     diff --git a/drivers/md/md.c b/drivers/md/md.c
1614     index 805b33e27496..4e7c9f398bc6 100644
1615     --- a/drivers/md/md.c
1616     +++ b/drivers/md/md.c
1617     @@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
1618     /* not spare disk, or LEVEL_MULTIPATH */
1619     if (sb->level == LEVEL_MULTIPATH ||
1620     (rdev->desc_nr >= 0 &&
1621     + rdev->desc_nr < MD_SB_DISKS &&
1622     sb->disks[rdev->desc_nr].state &
1623     ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1624     spare_disk = false;
1625     diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
1626     index 365fb0cb8dff..22566b75ca50 100644
1627     --- a/drivers/misc/habanalabs/memory.c
1628     +++ b/drivers/misc/habanalabs/memory.c
1629     @@ -965,17 +965,19 @@ init_page_pack_err:
1630     *
1631     * @ctx : current context
1632     * @vaddr : device virtual address to unmap
1633     + * @ctx_free : true if in context free flow, false otherwise.
1634     *
1635     * This function does the following:
1636     * - Unmap the physical pages related to the given virtual address
1637     * - return the device virtual block to the virtual block list
1638     */
1639     -static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
1640     +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1641     {
1642     struct hl_device *hdev = ctx->hdev;
1643     struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1644     struct hl_vm_hash_node *hnode = NULL;
1645     struct hl_userptr *userptr = NULL;
1646     + struct hl_va_range *va_range;
1647     enum vm_type_t *vm_type;
1648     u64 next_vaddr, i;
1649     u32 page_size;
1650     @@ -1003,6 +1005,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
1651    
1652     if (*vm_type == VM_TYPE_USERPTR) {
1653     is_userptr = true;
1654     + va_range = &ctx->host_va_range;
1655     userptr = hnode->ptr;
1656     rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1657     &phys_pg_pack);
1658     @@ -1014,6 +1017,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
1659     }
1660     } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1661     is_userptr = false;
1662     + va_range = &ctx->dram_va_range;
1663     phys_pg_pack = hnode->ptr;
1664     } else {
1665     dev_warn(hdev->dev,
1666     @@ -1052,12 +1056,18 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
1667    
1668     mutex_unlock(&ctx->mmu_lock);
1669    
1670     - if (add_va_block(hdev,
1671     - is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
1672     - vaddr,
1673     - vaddr + phys_pg_pack->total_size - 1))
1674     - dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
1675     - vaddr);
1676     + /*
1677     + * No point in maintaining the free VA block list if the context is
1678     + * closing as the list will be freed anyway
1679     + */
1680     + if (!ctx_free) {
1681     + rc = add_va_block(hdev, va_range, vaddr,
1682     + vaddr + phys_pg_pack->total_size - 1);
1683     + if (rc)
1684     + dev_warn(hdev->dev,
1685     + "add va block failed for vaddr: 0x%llx\n",
1686     + vaddr);
1687     + }
1688    
1689     atomic_dec(&phys_pg_pack->mapping_cnt);
1690     kfree(hnode);
1691     @@ -1189,8 +1199,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1692     break;
1693    
1694     case HL_MEM_OP_UNMAP:
1695     - rc = unmap_device_va(ctx,
1696     - args->in.unmap.device_virt_addr);
1697     + rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1698     + false);
1699     break;
1700    
1701     default:
1702     @@ -1620,7 +1630,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
1703     dev_dbg(hdev->dev,
1704     "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1705     hnode->vaddr, ctx->asid);
1706     - unmap_device_va(ctx, hnode->vaddr);
1707     + unmap_device_va(ctx, hnode->vaddr, true);
1708     }
1709    
1710     spin_lock(&vm->idr_lock);
1711     diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
1712     index 57b582bf73d9..9289bb4d633e 100644
1713     --- a/drivers/mmc/host/sdhci-esdhc.h
1714     +++ b/drivers/mmc/host/sdhci-esdhc.h
1715     @@ -51,6 +51,11 @@
1716     #define ESDHC_CLOCK_HCKEN 0x00000002
1717     #define ESDHC_CLOCK_IPGEN 0x00000001
1718    
1719     +/* System Control 2 Register */
1720     +#define ESDHC_SYSTEM_CONTROL_2 0x3c
1721     +#define ESDHC_SMPCLKSEL 0x00800000
1722     +#define ESDHC_EXTN 0x00400000
1723     +
1724     /* Host Controller Capabilities Register 2 */
1725     #define ESDHC_CAPABILITIES_1 0x114
1726    
1727     @@ -59,7 +64,16 @@
1728     #define ESDHC_HS400_WNDW_ADJUST 0x00000040
1729     #define ESDHC_HS400_MODE 0x00000010
1730     #define ESDHC_TB_EN 0x00000004
1731     +#define ESDHC_TB_MODE_MASK 0x00000003
1732     +#define ESDHC_TB_MODE_SW 0x00000003
1733     +#define ESDHC_TB_MODE_3 0x00000002
1734     +
1735     +#define ESDHC_TBSTAT 0x124
1736     +
1737     #define ESDHC_TBPTR 0x128
1738     +#define ESDHC_WNDW_STRT_PTR_SHIFT 8
1739     +#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
1740     +#define ESDHC_WNDW_END_PTR_MASK 0x7f
1741    
1742     /* SD Clock Control Register */
1743     #define ESDHC_SDCLKCTL 0x144
1744     diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
1745     index 889ed98ec0e7..fcfb50f84c8b 100644
1746     --- a/drivers/mmc/host/sdhci-of-esdhc.c
1747     +++ b/drivers/mmc/host/sdhci-of-esdhc.c
1748     @@ -77,8 +77,11 @@ struct sdhci_esdhc {
1749     bool quirk_incorrect_hostver;
1750     bool quirk_limited_clk_division;
1751     bool quirk_unreliable_pulse_detection;
1752     - bool quirk_fixup_tuning;
1753     + bool quirk_tuning_erratum_type1;
1754     + bool quirk_tuning_erratum_type2;
1755     bool quirk_ignore_data_inhibit;
1756     + bool quirk_delay_before_data_reset;
1757     + bool in_sw_tuning;
1758     unsigned int peripheral_clock;
1759     const struct esdhc_clk_fixup *clk_fixup;
1760     u32 div_ratio;
1761     @@ -408,6 +411,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
1762    
1763     static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
1764     {
1765     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1766     + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1767     int base = reg & ~0x3;
1768     u32 value;
1769     u32 ret;
1770     @@ -416,10 +421,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
1771     ret = esdhc_writew_fixup(host, reg, val, value);
1772     if (reg != SDHCI_TRANSFER_MODE)
1773     iowrite32be(ret, host->ioaddr + base);
1774     +
1775     + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
1776     + * 1us later after ESDHC_EXTN is set.
1777     + */
1778     + if (base == ESDHC_SYSTEM_CONTROL_2) {
1779     + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
1780     + esdhc->in_sw_tuning) {
1781     + udelay(1);
1782     + ret |= ESDHC_SMPCLKSEL;
1783     + iowrite32be(ret, host->ioaddr + base);
1784     + }
1785     + }
1786     }
1787    
1788     static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
1789     {
1790     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1791     + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1792     int base = reg & ~0x3;
1793     u32 value;
1794     u32 ret;
1795     @@ -428,6 +447,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
1796     ret = esdhc_writew_fixup(host, reg, val, value);
1797     if (reg != SDHCI_TRANSFER_MODE)
1798     iowrite32(ret, host->ioaddr + base);
1799     +
1800     + /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
1801     + * 1us later after ESDHC_EXTN is set.
1802     + */
1803     + if (base == ESDHC_SYSTEM_CONTROL_2) {
1804     + if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
1805     + esdhc->in_sw_tuning) {
1806     + udelay(1);
1807     + ret |= ESDHC_SMPCLKSEL;
1808     + iowrite32(ret, host->ioaddr + base);
1809     + }
1810     + }
1811     }
1812    
1813     static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
1814     @@ -705,6 +736,11 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
1815     struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1816     u32 val;
1817    
1818     + if (esdhc->quirk_delay_before_data_reset &&
1819     + (mask & SDHCI_RESET_DATA) &&
1820     + (host->flags & SDHCI_REQ_USE_DMA))
1821     + mdelay(5);
1822     +
1823     sdhci_reset(host, mask);
1824    
1825     sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1826     @@ -793,16 +829,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
1827     }
1828     }
1829    
1830     -static struct soc_device_attribute soc_fixup_tuning[] = {
1831     +static struct soc_device_attribute soc_tuning_erratum_type1[] = {
1832     + { .family = "QorIQ T1023", .revision = "1.0", },
1833     { .family = "QorIQ T1040", .revision = "1.0", },
1834     { .family = "QorIQ T2080", .revision = "1.0", },
1835     - { .family = "QorIQ T1023", .revision = "1.0", },
1836     { .family = "QorIQ LS1021A", .revision = "1.0", },
1837     - { .family = "QorIQ LS1080A", .revision = "1.0", },
1838     - { .family = "QorIQ LS2080A", .revision = "1.0", },
1839     + { },
1840     +};
1841     +
1842     +static struct soc_device_attribute soc_tuning_erratum_type2[] = {
1843     { .family = "QorIQ LS1012A", .revision = "1.0", },
1844     { .family = "QorIQ LS1043A", .revision = "1.*", },
1845     { .family = "QorIQ LS1046A", .revision = "1.0", },
1846     + { .family = "QorIQ LS1080A", .revision = "1.0", },
1847     + { .family = "QorIQ LS2080A", .revision = "1.0", },
1848     + { .family = "QorIQ LA1575A", .revision = "1.0", },
1849     { },
1850     };
1851    
1852     @@ -826,15 +867,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
1853     esdhc_clock_enable(host, true);
1854     }
1855    
1856     +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
1857     + u8 *window_end)
1858     +{
1859     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1860     + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1861     + u8 tbstat_15_8, tbstat_7_0;
1862     + u32 val;
1863     +
1864     + if (esdhc->quirk_tuning_erratum_type1) {
1865     + *window_start = 5 * esdhc->div_ratio;
1866     + *window_end = 3 * esdhc->div_ratio;
1867     + return;
1868     + }
1869     +
1870     + /* Write TBCTL[11:8]=4'h8 */
1871     + val = sdhci_readl(host, ESDHC_TBCTL);
1872     + val &= ~(0xf << 8);
1873     + val |= 8 << 8;
1874     + sdhci_writel(host, val, ESDHC_TBCTL);
1875     +
1876     + mdelay(1);
1877     +
1878     + /* Read TBCTL[31:0] register and rewrite again */
1879     + val = sdhci_readl(host, ESDHC_TBCTL);
1880     + sdhci_writel(host, val, ESDHC_TBCTL);
1881     +
1882     + mdelay(1);
1883     +
1884     + /* Read the TBSTAT[31:0] register twice */
1885     + val = sdhci_readl(host, ESDHC_TBSTAT);
1886     + val = sdhci_readl(host, ESDHC_TBSTAT);
1887     +
1888     + /* Reset data lines by setting ESDHCCTL[RSTD] */
1889     + sdhci_reset(host, SDHCI_RESET_DATA);
1890     + /* Write 32'hFFFF_FFFF to IRQSTAT register */
1891     + sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1892     +
1893     + /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
1894     + * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
1895     + * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1896     + * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1897     + */
1898     + tbstat_7_0 = val & 0xff;
1899     + tbstat_15_8 = (val >> 8) & 0xff;
1900     +
1901     + if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
1902     + *window_start = 8 * esdhc->div_ratio;
1903     + *window_end = 4 * esdhc->div_ratio;
1904     + } else {
1905     + *window_start = 5 * esdhc->div_ratio;
1906     + *window_end = 3 * esdhc->div_ratio;
1907     + }
1908     +}
1909     +
1910     +static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1911     + u8 window_start, u8 window_end)
1912     +{
1913     + struct sdhci_host *host = mmc_priv(mmc);
1914     + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1915     + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1916     + u32 val;
1917     + int ret;
1918     +
1919     + /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1920     + val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1921     + ESDHC_WNDW_STRT_PTR_MASK;
1922     + val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1923     + sdhci_writel(host, val, ESDHC_TBPTR);
1924     +
1925     + /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1926     + val = sdhci_readl(host, ESDHC_TBCTL);
1927     + val &= ~ESDHC_TB_MODE_MASK;
1928     + val |= ESDHC_TB_MODE_SW;
1929     + sdhci_writel(host, val, ESDHC_TBCTL);
1930     +
1931     + esdhc->in_sw_tuning = true;
1932     + ret = sdhci_execute_tuning(mmc, opcode);
1933     + esdhc->in_sw_tuning = false;
1934     + return ret;
1935     +}
1936     +
1937     static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1938     {
1939     struct sdhci_host *host = mmc_priv(mmc);
1940     struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1941     struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1942     + u8 window_start, window_end;
1943     + int ret, retries = 1;
1944     bool hs400_tuning;
1945     unsigned int clk;
1946     u32 val;
1947     - int ret;
1948    
1949     /* For tuning mode, the sd clock divisor value
1950     * must be larger than 3 according to reference manual.
1951     @@ -843,39 +966,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1952     if (host->clock > clk)
1953     esdhc_of_set_clock(host, clk);
1954    
1955     - if (esdhc->quirk_limited_clk_division &&
1956     - host->flags & SDHCI_HS400_TUNING)
1957     - esdhc_of_set_clock(host, host->clock);
1958     -
1959     esdhc_tuning_block_enable(host, true);
1960    
1961     hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1962     - ret = sdhci_execute_tuning(mmc, opcode);
1963    
1964     - if (hs400_tuning) {
1965     - val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1966     - val |= ESDHC_FLW_CTL_BG;
1967     - sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1968     - }
1969     + do {
1970     + if (esdhc->quirk_limited_clk_division &&
1971     + hs400_tuning)
1972     + esdhc_of_set_clock(host, host->clock);
1973    
1974     - if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
1975     + /* Do HW tuning */
1976     + val = sdhci_readl(host, ESDHC_TBCTL);
1977     + val &= ~ESDHC_TB_MODE_MASK;
1978     + val |= ESDHC_TB_MODE_3;
1979     + sdhci_writel(host, val, ESDHC_TBCTL);
1980    
1981     - /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
1982     - * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
1983     - */
1984     - val = sdhci_readl(host, ESDHC_TBPTR);
1985     - val = (val & ~((0x7f << 8) | 0x7f)) |
1986     - (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
1987     - sdhci_writel(host, val, ESDHC_TBPTR);
1988     + ret = sdhci_execute_tuning(mmc, opcode);
1989     + if (ret)
1990     + break;
1991    
1992     - /* program the software tuning mode by setting
1993     - * TBCTL[TB_MODE]=2'h3
1994     + /* If HW tuning fails and triggers erratum,
1995     + * try workaround.
1996     */
1997     - val = sdhci_readl(host, ESDHC_TBCTL);
1998     - val |= 0x3;
1999     - sdhci_writel(host, val, ESDHC_TBCTL);
2000     - sdhci_execute_tuning(mmc, opcode);
2001     + ret = host->tuning_err;
2002     + if (ret == -EAGAIN &&
2003     + (esdhc->quirk_tuning_erratum_type1 ||
2004     + esdhc->quirk_tuning_erratum_type2)) {
2005     + /* Recover HS400 tuning flag */
2006     + if (hs400_tuning)
2007     + host->flags |= SDHCI_HS400_TUNING;
2008     + pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
2009     + mmc_hostname(mmc));
2010     + /* Do SW tuning */
2011     + esdhc_prepare_sw_tuning(host, &window_start,
2012     + &window_end);
2013     + ret = esdhc_execute_sw_tuning(mmc, opcode,
2014     + window_start,
2015     + window_end);
2016     + if (ret)
2017     + break;
2018     +
2019     + /* Retry both HW/SW tuning with reduced clock. */
2020     + ret = host->tuning_err;
2021     + if (ret == -EAGAIN && retries) {
2022     + /* Recover HS400 tuning flag */
2023     + if (hs400_tuning)
2024     + host->flags |= SDHCI_HS400_TUNING;
2025     +
2026     + clk = host->max_clk / (esdhc->div_ratio + 1);
2027     + esdhc_of_set_clock(host, clk);
2028     + pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
2029     + mmc_hostname(mmc));
2030     + } else {
2031     + break;
2032     + }
2033     + } else {
2034     + break;
2035     + }
2036     + } while (retries--);
2037     +
2038     + if (ret) {
2039     + esdhc_tuning_block_enable(host, false);
2040     + } else if (hs400_tuning) {
2041     + val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
2042     + val |= ESDHC_FLW_CTL_BG;
2043     + sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
2044     }
2045     +
2046     return ret;
2047     }
2048    
2049     @@ -1046,6 +1203,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
2050     if (match)
2051     esdhc->clk_fixup = match->data;
2052     np = pdev->dev.of_node;
2053     +
2054     + if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
2055     + esdhc->quirk_delay_before_data_reset = true;
2056     +
2057     clk = of_clk_get(np, 0);
2058     if (!IS_ERR(clk)) {
2059     /*
2060     @@ -1111,10 +1272,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
2061    
2062     pltfm_host = sdhci_priv(host);
2063     esdhc = sdhci_pltfm_priv(pltfm_host);
2064     - if (soc_device_match(soc_fixup_tuning))
2065     - esdhc->quirk_fixup_tuning = true;
2066     + if (soc_device_match(soc_tuning_erratum_type1))
2067     + esdhc->quirk_tuning_erratum_type1 = true;
2068     + else
2069     + esdhc->quirk_tuning_erratum_type1 = false;
2070     +
2071     + if (soc_device_match(soc_tuning_erratum_type2))
2072     + esdhc->quirk_tuning_erratum_type2 = true;
2073     else
2074     - esdhc->quirk_fixup_tuning = false;
2075     + esdhc->quirk_tuning_erratum_type2 = false;
2076    
2077     if (esdhc->vendor_ver == VENDOR_V_22)
2078     host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
2079     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2080     index face00c622ed..7dcd709f4ac3 100644
2081     --- a/drivers/net/bonding/bond_main.c
2082     +++ b/drivers/net/bonding/bond_main.c
2083     @@ -2225,9 +2225,6 @@ static void bond_miimon_commit(struct bonding *bond)
2084     } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2085     /* make it immediately active */
2086     bond_set_active_slave(slave);
2087     - } else if (slave != primary) {
2088     - /* prevent it from being the active one */
2089     - bond_set_backup_slave(slave);
2090     }
2091    
2092     slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2093     diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
2094     index d264776a95a3..471837cf0b21 100644
2095     --- a/drivers/net/dsa/bcm_sf2_cfp.c
2096     +++ b/drivers/net/dsa/bcm_sf2_cfp.c
2097     @@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
2098     return -EINVAL;
2099     }
2100    
2101     - ip_frag = be32_to_cpu(fs->m_ext.data[0]);
2102     + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
2103    
2104     /* Locate the first rule available */
2105     if (fs->location == RX_CLS_LOC_ANY)
2106     @@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
2107    
2108     if (rule->fs.flow_type != fs->flow_type ||
2109     rule->fs.ring_cookie != fs->ring_cookie ||
2110     - rule->fs.m_ext.data[0] != fs->m_ext.data[0])
2111     + rule->fs.h_ext.data[0] != fs->h_ext.data[0])
2112     continue;
2113    
2114     switch (fs->flow_type & ~FLOW_EXT) {
2115     @@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
2116     return -EINVAL;
2117     }
2118    
2119     - ip_frag = be32_to_cpu(fs->m_ext.data[0]);
2120     + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
2121    
2122     layout = &udf_tcpip6_layout;
2123     slice_num = bcm_sf2_get_slice_number(layout, 0);
2124     diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
2125     index 4e5a428ab1a4..7763221286d4 100644
2126     --- a/drivers/net/dsa/sja1105/sja1105_main.c
2127     +++ b/drivers/net/dsa/sja1105/sja1105_main.c
2128     @@ -1560,8 +1560,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
2129    
2130     if (enabled) {
2131     /* Enable VLAN filtering. */
2132     - tpid = ETH_P_8021AD;
2133     - tpid2 = ETH_P_8021Q;
2134     + tpid = ETH_P_8021Q;
2135     + tpid2 = ETH_P_8021AD;
2136     } else {
2137     /* Disable VLAN filtering. */
2138     tpid = ETH_P_SJA1105;
2139     @@ -1570,9 +1570,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
2140    
2141     table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2142     general_params = table->entries;
2143     - /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2144     - general_params->tpid = tpid;
2145     /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2146     + general_params->tpid = tpid;
2147     + /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2148     general_params->tpid2 = tpid2;
2149     /* When VLAN filtering is on, we need to at least be able to
2150     * decode management traffic through the "backup plan".
2151     diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
2152     index 0d03e13e9909..63d2311817c4 100644
2153     --- a/drivers/net/dsa/sja1105/sja1105_static_config.c
2154     +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
2155     @@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
2156     return size;
2157     }
2158    
2159     +/* TPID and TPID2 are intentionally reversed so that semantic
2160     + * compatibility with E/T is kept.
2161     + */
2162     static size_t
2163     sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
2164     enum packing_op op)
2165     @@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
2166     sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
2167     sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
2168     sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
2169     - sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
2170     + sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
2171     sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
2172     - sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
2173     + sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
2174     sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
2175     sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
2176     sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
2177     diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2178     index c487d2a7d6dd..b4a145220aba 100644
2179     --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
2180     +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2181     @@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
2182     struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
2183     struct ena_ring *tx_ring, *rx_ring;
2184    
2185     - u32 tx_work_done;
2186     - u32 rx_work_done;
2187     + int tx_work_done;
2188     + int rx_work_done = 0;
2189     int tx_budget;
2190     int napi_comp_call = 0;
2191     int ret;
2192     @@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
2193     }
2194    
2195     tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
2196     - rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
2197     + /* On netpoll the budget is zero and the handler should only clean the
2198     + * tx completions.
2199     + */
2200     + if (likely(budget))
2201     + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
2202    
2203     /* If the device is about to reset or down, avoid unmask
2204     * the interrupt and return 0 so NAPI won't reschedule
2205     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2206     index 527e1bf93116..5c75b061243f 100644
2207     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2208     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2209     @@ -1995,6 +1995,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
2210     case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2211     u32 data1 = le32_to_cpu(cmpl->event_data1);
2212    
2213     + if (!bp->fw_health)
2214     + goto async_event_process_exit;
2215     +
2216     bp->fw_reset_timestamp = jiffies;
2217     bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2218     if (!bp->fw_reset_min_dsecs)
2219     @@ -4438,8 +4441,9 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2220     FUNC_DRV_RGTR_REQ_ENABLES_VER);
2221    
2222     req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
2223     - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
2224     - FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
2225     + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
2226     + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2227     + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
2228     if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
2229     flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
2230     req.flags = cpu_to_le32(flags);
2231     @@ -6174,7 +6178,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
2232     tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
2233     val = clamp_t(u16, tmr, 1,
2234     coal_cap->cmpl_aggr_dma_tmr_during_int_max);
2235     - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
2236     + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
2237     req->enables |=
2238     cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
2239     }
2240     @@ -7096,14 +7100,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
2241     rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2242     if (rc)
2243     goto err_recovery_out;
2244     - if (!fw_health) {
2245     - fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
2246     - bp->fw_health = fw_health;
2247     - if (!fw_health) {
2248     - rc = -ENOMEM;
2249     - goto err_recovery_out;
2250     - }
2251     - }
2252     fw_health->flags = le32_to_cpu(resp->flags);
2253     if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
2254     !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
2255     @@ -8766,6 +8762,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
2256     }
2257     if (resc_reinit || fw_reset) {
2258     if (fw_reset) {
2259     + bnxt_free_ctx_mem(bp);
2260     + kfree(bp->ctx);
2261     + bp->ctx = NULL;
2262     rc = bnxt_fw_init_one(bp);
2263     if (rc) {
2264     set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
2265     @@ -9954,8 +9953,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
2266     struct bnxt_fw_health *fw_health = bp->fw_health;
2267     u32 val;
2268    
2269     - if (!fw_health || !fw_health->enabled ||
2270     - test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2271     + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2272     return;
2273    
2274     if (fw_health->tmr_counter) {
2275     @@ -10416,6 +10414,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
2276     bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
2277     }
2278    
2279     +static void bnxt_alloc_fw_health(struct bnxt *bp)
2280     +{
2281     + if (bp->fw_health)
2282     + return;
2283     +
2284     + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
2285     + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
2286     + return;
2287     +
2288     + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
2289     + if (!bp->fw_health) {
2290     + netdev_warn(bp->dev, "Failed to allocate fw_health\n");
2291     + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
2292     + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
2293     + }
2294     +}
2295     +
2296     static int bnxt_fw_init_one_p1(struct bnxt *bp)
2297     {
2298     int rc;
2299     @@ -10462,6 +10477,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
2300     netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
2301     rc);
2302    
2303     + bnxt_alloc_fw_health(bp);
2304     rc = bnxt_hwrm_error_recovery_qcfg(bp);
2305     if (rc)
2306     netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
2307     @@ -10547,6 +10563,12 @@ static int bnxt_fw_init_one(struct bnxt *bp)
2308     rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
2309     if (rc)
2310     return rc;
2311     +
2312     + /* In case fw capabilities have changed, destroy the unneeded
2313     + * reporters and create newly capable ones.
2314     + */
2315     + bnxt_dl_fw_reporters_destroy(bp, false);
2316     + bnxt_dl_fw_reporters_create(bp);
2317     bnxt_fw_init_one_p3(bp);
2318     return 0;
2319     }
2320     @@ -10680,8 +10702,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
2321     bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
2322     return;
2323     case BNXT_FW_RESET_STATE_ENABLE_DEV:
2324     - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
2325     - bp->fw_health) {
2326     + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
2327     u32 val;
2328    
2329     val = bnxt_fw_health_readl(bp,
2330     @@ -11322,11 +11343,11 @@ static void bnxt_remove_one(struct pci_dev *pdev)
2331     struct net_device *dev = pci_get_drvdata(pdev);
2332     struct bnxt *bp = netdev_priv(dev);
2333    
2334     - if (BNXT_PF(bp)) {
2335     + if (BNXT_PF(bp))
2336     bnxt_sriov_disable(bp);
2337     - bnxt_dl_unregister(bp);
2338     - }
2339    
2340     + bnxt_dl_fw_reporters_destroy(bp, true);
2341     + bnxt_dl_unregister(bp);
2342     pci_disable_pcie_error_reporting(pdev);
2343     unregister_netdev(dev);
2344     bnxt_shutdown_tc(bp);
2345     @@ -11341,6 +11362,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
2346     bnxt_dcb_free(bp);
2347     kfree(bp->edev);
2348     bp->edev = NULL;
2349     + kfree(bp->fw_health);
2350     + bp->fw_health = NULL;
2351     bnxt_cleanup_pci(bp);
2352     bnxt_free_ctx_mem(bp);
2353     kfree(bp->ctx);
2354     @@ -11820,8 +11843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2355     if (rc)
2356     goto init_err_cleanup_tc;
2357    
2358     - if (BNXT_PF(bp))
2359     - bnxt_dl_register(bp);
2360     + bnxt_dl_register(bp);
2361     + bnxt_dl_fw_reporters_create(bp);
2362    
2363     netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
2364     board_info[ent->driver_data].name,
2365     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2366     index 5163bb848618..dc26e3ace43f 100644
2367     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2368     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2369     @@ -1658,6 +1658,7 @@ struct bnxt {
2370     #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
2371     #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
2372     #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
2373     + #define BNXT_FW_CAP_HOT_RESET 0x00200000
2374    
2375     #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
2376     u32 hwrm_spec_code;
2377     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2378     index 7d2cfea05737..1e236e74ff2f 100644
2379     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2380     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
2381     @@ -19,11 +19,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
2382     struct devlink_fmsg *fmsg)
2383     {
2384     struct bnxt *bp = devlink_health_reporter_priv(reporter);
2385     - struct bnxt_fw_health *health = bp->fw_health;
2386     u32 val, health_status;
2387     int rc;
2388    
2389     - if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2390     + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2391     return 0;
2392    
2393     val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2394     @@ -103,21 +102,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
2395     .recover = bnxt_fw_fatal_recover,
2396     };
2397    
2398     -static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
2399     +void bnxt_dl_fw_reporters_create(struct bnxt *bp)
2400     {
2401     struct bnxt_fw_health *health = bp->fw_health;
2402    
2403     - if (!health)
2404     + if (!bp->dl || !health)
2405     return;
2406    
2407     - health->fw_reporter =
2408     - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
2409     - 0, false, bp);
2410     - if (IS_ERR(health->fw_reporter)) {
2411     - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
2412     - PTR_ERR(health->fw_reporter));
2413     - health->fw_reporter = NULL;
2414     - }
2415     + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
2416     + goto err_recovery;
2417    
2418     health->fw_reset_reporter =
2419     devlink_health_reporter_create(bp->dl,
2420     @@ -127,8 +120,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
2421     netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
2422     PTR_ERR(health->fw_reset_reporter));
2423     health->fw_reset_reporter = NULL;
2424     + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
2425     + }
2426     +
2427     +err_recovery:
2428     + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
2429     + return;
2430     +
2431     + if (!health->fw_reporter) {
2432     + health->fw_reporter =
2433     + devlink_health_reporter_create(bp->dl,
2434     + &bnxt_dl_fw_reporter_ops,
2435     + 0, false, bp);
2436     + if (IS_ERR(health->fw_reporter)) {
2437     + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
2438     + PTR_ERR(health->fw_reporter));
2439     + health->fw_reporter = NULL;
2440     + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
2441     + return;
2442     + }
2443     }
2444    
2445     + if (health->fw_fatal_reporter)
2446     + return;
2447     +
2448     health->fw_fatal_reporter =
2449     devlink_health_reporter_create(bp->dl,
2450     &bnxt_dl_fw_fatal_reporter_ops,
2451     @@ -137,24 +152,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
2452     netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
2453     PTR_ERR(health->fw_fatal_reporter));
2454     health->fw_fatal_reporter = NULL;
2455     + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
2456     }
2457     }
2458    
2459     -static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
2460     +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
2461     {
2462     struct bnxt_fw_health *health = bp->fw_health;
2463    
2464     - if (!health)
2465     + if (!bp->dl || !health)
2466     return;
2467    
2468     - if (health->fw_reporter)
2469     - devlink_health_reporter_destroy(health->fw_reporter);
2470     -
2471     - if (health->fw_reset_reporter)
2472     + if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
2473     + health->fw_reset_reporter) {
2474     devlink_health_reporter_destroy(health->fw_reset_reporter);
2475     + health->fw_reset_reporter = NULL;
2476     + }
2477    
2478     - if (health->fw_fatal_reporter)
2479     + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
2480     + return;
2481     +
2482     + if (health->fw_reporter) {
2483     + devlink_health_reporter_destroy(health->fw_reporter);
2484     + health->fw_reporter = NULL;
2485     + }
2486     +
2487     + if (health->fw_fatal_reporter) {
2488     devlink_health_reporter_destroy(health->fw_fatal_reporter);
2489     + health->fw_fatal_reporter = NULL;
2490     + }
2491     }
2492    
2493     void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
2494     @@ -162,9 +188,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
2495     struct bnxt_fw_health *fw_health = bp->fw_health;
2496     struct bnxt_fw_reporter_ctx fw_reporter_ctx;
2497    
2498     - if (!fw_health)
2499     - return;
2500     -
2501     fw_reporter_ctx.sp_event = event;
2502     switch (event) {
2503     case BNXT_FW_RESET_NOTIFY_SP_EVENT:
2504     @@ -203,6 +226,8 @@ static const struct devlink_ops bnxt_dl_ops = {
2505     #endif /* CONFIG_BNXT_SRIOV */
2506     };
2507    
2508     +static const struct devlink_ops bnxt_vf_dl_ops;
2509     +
2510     enum bnxt_dl_param_id {
2511     BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
2512     BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
2513     @@ -416,7 +441,10 @@ int bnxt_dl_register(struct bnxt *bp)
2514     return -ENOTSUPP;
2515     }
2516    
2517     - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
2518     + if (BNXT_PF(bp))
2519     + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
2520     + else
2521     + dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
2522     if (!dl) {
2523     netdev_warn(bp->dev, "devlink_alloc failed");
2524     return -ENOMEM;
2525     @@ -435,6 +463,9 @@ int bnxt_dl_register(struct bnxt *bp)
2526     goto err_dl_free;
2527     }
2528    
2529     + if (!BNXT_PF(bp))
2530     + return 0;
2531     +
2532     rc = devlink_params_register(dl, bnxt_dl_params,
2533     ARRAY_SIZE(bnxt_dl_params));
2534     if (rc) {
2535     @@ -462,8 +493,6 @@ int bnxt_dl_register(struct bnxt *bp)
2536    
2537     devlink_params_publish(dl);
2538    
2539     - bnxt_dl_fw_reporters_create(bp);
2540     -
2541     return 0;
2542    
2543     err_dl_port_unreg:
2544     @@ -486,12 +515,14 @@ void bnxt_dl_unregister(struct bnxt *bp)
2545     if (!dl)
2546     return;
2547    
2548     - bnxt_dl_fw_reporters_destroy(bp);
2549     - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
2550     - ARRAY_SIZE(bnxt_dl_port_params));
2551     - devlink_port_unregister(&bp->dl_port);
2552     - devlink_params_unregister(dl, bnxt_dl_params,
2553     - ARRAY_SIZE(bnxt_dl_params));
2554     + if (BNXT_PF(bp)) {
2555     + devlink_port_params_unregister(&bp->dl_port,
2556     + bnxt_dl_port_params,
2557     + ARRAY_SIZE(bnxt_dl_port_params));
2558     + devlink_port_unregister(&bp->dl_port);
2559     + devlink_params_unregister(dl, bnxt_dl_params,
2560     + ARRAY_SIZE(bnxt_dl_params));
2561     + }
2562     devlink_unregister(dl);
2563     devlink_free(dl);
2564     }
2565     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
2566     index 2f4fd0a7d04b..689c47ab2155 100644
2567     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
2568     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
2569     @@ -57,6 +57,8 @@ struct bnxt_dl_nvm_param {
2570     };
2571    
2572     void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
2573     +void bnxt_dl_fw_reporters_create(struct bnxt *bp);
2574     +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
2575     int bnxt_dl_register(struct bnxt *bp);
2576     void bnxt_dl_unregister(struct bnxt *bp);
2577    
2578     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2579     index 89f95428556e..ece70f61c89a 100644
2580     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2581     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2582     @@ -3064,8 +3064,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
2583     }
2584     }
2585    
2586     - if (info->dest_buf)
2587     - memcpy(info->dest_buf + off, dma_buf, len);
2588     + if (info->dest_buf) {
2589     + if ((info->seg_start + off + len) <=
2590     + BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
2591     + memcpy(info->dest_buf + off, dma_buf, len);
2592     + } else {
2593     + rc = -ENOBUFS;
2594     + break;
2595     + }
2596     + }
2597    
2598     if (cmn_req->req_type ==
2599     cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
2600     @@ -3119,7 +3126,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
2601    
2602     static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
2603     u16 segment_id, u32 *seg_len,
2604     - void *buf, u32 offset)
2605     + void *buf, u32 buf_len, u32 offset)
2606     {
2607     struct hwrm_dbg_coredump_retrieve_input req = {0};
2608     struct bnxt_hwrm_dbg_dma_info info = {NULL};
2609     @@ -3134,8 +3141,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
2610     seq_no);
2611     info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
2612     data_len);
2613     - if (buf)
2614     + if (buf) {
2615     info.dest_buf = buf + offset;
2616     + info.buf_len = buf_len;
2617     + info.seg_start = offset;
2618     + }
2619    
2620     rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
2621     if (!rc)
2622     @@ -3225,14 +3235,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
2623     static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
2624     {
2625     u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
2626     + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
2627     struct coredump_segment_record *seg_record = NULL;
2628     - u32 offset = 0, seg_hdr_len, seg_record_len;
2629     struct bnxt_coredump_segment_hdr seg_hdr;
2630     struct bnxt_coredump coredump = {NULL};
2631     time64_t start_time;
2632     u16 start_utc;
2633     int rc = 0, i;
2634    
2635     + if (buf)
2636     + buf_len = *dump_len;
2637     +
2638     start_time = ktime_get_real_seconds();
2639     start_utc = sys_tz.tz_minuteswest * 60;
2640     seg_hdr_len = sizeof(seg_hdr);
2641     @@ -3265,6 +3278,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
2642     u32 duration = 0, seg_len = 0;
2643     unsigned long start, end;
2644    
2645     + if (buf && ((offset + seg_hdr_len) >
2646     + BNXT_COREDUMP_BUF_LEN(buf_len))) {
2647     + rc = -ENOBUFS;
2648     + goto err;
2649     + }
2650     +
2651     start = jiffies;
2652    
2653     rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
2654     @@ -3277,9 +3296,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
2655    
2656     /* Write segment data into the buffer */
2657     rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
2658     - &seg_len, buf,
2659     + &seg_len, buf, buf_len,
2660     offset + seg_hdr_len);
2661     - if (rc)
2662     + if (rc && rc == -ENOBUFS)
2663     + goto err;
2664     + else if (rc)
2665     netdev_err(bp->dev,
2666     "Failed to retrieve coredump for seg = %d\n",
2667     seg_record->segment_id);
2668     @@ -3309,7 +3330,8 @@ err:
2669     rc);
2670     kfree(coredump.data);
2671     *dump_len += sizeof(struct bnxt_coredump_record);
2672     -
2673     + if (rc == -ENOBUFS)
2674     + netdev_err(bp->dev, "Firmware returned large coredump buffer");
2675     return rc;
2676     }
2677    
2678     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
2679     index b5b65b3f8534..3998f6e809a9 100644
2680     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
2681     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
2682     @@ -31,6 +31,8 @@ struct bnxt_coredump {
2683     u16 total_segs;
2684     };
2685    
2686     +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
2687     +
2688     struct bnxt_hwrm_dbg_dma_info {
2689     void *dest_buf;
2690     int dest_buf_size;
2691     @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info {
2692     u16 seq_off;
2693     u16 data_len_off;
2694     u16 segs;
2695     + u32 seg_start;
2696     + u32 buf_len;
2697     };
2698    
2699     struct hwrm_dbg_cmn_input {
2700     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2701     index b2c160947fc8..30816ec4fa91 100644
2702     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2703     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
2704     @@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
2705     {
2706     struct net_device *dev = edev->net;
2707     struct bnxt *bp = netdev_priv(dev);
2708     + struct bnxt_hw_resc *hw_resc;
2709     int max_idx, max_cp_rings;
2710     int avail_msix, idx;
2711     + int total_vecs;
2712     int rc = 0;
2713    
2714     ASSERT_RTNL();
2715     @@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
2716     }
2717     edev->ulp_tbl[ulp_id].msix_base = idx;
2718     edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
2719     - if (bp->total_irqs < (idx + avail_msix)) {
2720     + hw_resc = &bp->hw_resc;
2721     + total_vecs = idx + avail_msix;
2722     + if (bp->total_irqs < total_vecs ||
2723     + (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
2724     if (netif_running(dev)) {
2725     bnxt_close_nic(bp, true, false);
2726     rc = bnxt_open_nic(bp, true, false);
2727     @@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
2728     }
2729    
2730     if (BNXT_NEW_RM(bp)) {
2731     - struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
2732     int resv_msix;
2733    
2734     resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
2735     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2736     index 1fbb640e896a..4025a683fa7d 100644
2737     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2738     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2739     @@ -503,6 +503,7 @@ struct link_config {
2740    
2741     enum cc_pause requested_fc; /* flow control user has requested */
2742     enum cc_pause fc; /* actual link flow control */
2743     + enum cc_pause advertised_fc; /* actual advertised flow control */
2744    
2745     enum cc_fec requested_fec; /* Forward Error Correction: */
2746     enum cc_fec fec; /* requested and actual in use */
2747     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
2748     index 76538f4cd595..f537be9cb315 100644
2749     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
2750     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
2751     @@ -793,8 +793,8 @@ static void get_pauseparam(struct net_device *dev,
2752     struct port_info *p = netdev_priv(dev);
2753    
2754     epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2755     - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2756     - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2757     + epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
2758     + epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
2759     }
2760    
2761     static int set_pauseparam(struct net_device *dev,
2762     diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2763     index f2a7824da42b..3f6813daf3c1 100644
2764     --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2765     +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
2766     @@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
2767     if (cc_pause & PAUSE_TX)
2768     fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
2769     else
2770     - fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
2771     + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
2772     + FW_PORT_CAP32_802_3_PAUSE;
2773     } else if (cc_pause & PAUSE_TX) {
2774     fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
2775     }
2776     @@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
2777     void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
2778     {
2779     const struct fw_port_cmd *cmd = (const void *)rpl;
2780     - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
2781     - struct adapter *adapter = pi->adapter;
2782     + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
2783     struct link_config *lc = &pi->link_cfg;
2784     - int link_ok, linkdnrc;
2785     - enum fw_port_type port_type;
2786     + struct adapter *adapter = pi->adapter;
2787     + unsigned int speed, fc, fec, adv_fc;
2788     enum fw_port_module_type mod_type;
2789     - unsigned int speed, fc, fec;
2790     - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
2791     + int action, link_ok, linkdnrc;
2792     + enum fw_port_type port_type;
2793    
2794     /* Extract the various fields from the Port Information message.
2795     */
2796     + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
2797     switch (action) {
2798     case FW_PORT_ACTION_GET_PORT_INFO: {
2799     u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
2800     @@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
2801     }
2802    
2803     fec = fwcap_to_cc_fec(acaps);
2804     + adv_fc = fwcap_to_cc_pause(acaps);
2805     fc = fwcap_to_cc_pause(linkattr);
2806     speed = fwcap_to_speed(linkattr);
2807    
2808     @@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
2809     }
2810    
2811     if (link_ok != lc->link_ok || speed != lc->speed ||
2812     - fc != lc->fc || fec != lc->fec) { /* something changed */
2813     + fc != lc->fc || adv_fc != lc->advertised_fc ||
2814     + fec != lc->fec) {
2815     + /* something changed */
2816     if (!link_ok && lc->link_ok) {
2817     lc->link_down_rc = linkdnrc;
2818     dev_warn_ratelimited(adapter->pdev_dev,
2819     @@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
2820     }
2821     lc->link_ok = link_ok;
2822     lc->speed = speed;
2823     + lc->advertised_fc = adv_fc;
2824     lc->fc = fc;
2825     lc->fec = fec;
2826    
2827     diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2828     index f6fc0875d5b0..f4d41f968afa 100644
2829     --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2830     +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2831     @@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
2832     struct port_info *pi = netdev_priv(dev);
2833    
2834     pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2835     - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
2836     - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
2837     + pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
2838     + pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
2839     }
2840    
2841     /*
2842     diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
2843     index ccca67cf4487..57cfd10a99ec 100644
2844     --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
2845     +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
2846     @@ -135,6 +135,7 @@ struct link_config {
2847    
2848     enum cc_pause requested_fc; /* flow control user has requested */
2849     enum cc_pause fc; /* actual link flow control */
2850     + enum cc_pause advertised_fc; /* actual advertised flow control */
2851    
2852     enum cc_fec auto_fec; /* Forward Error Correction: */
2853     enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
2854     diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
2855     index 8a389d617a23..9d49ff211cc1 100644
2856     --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
2857     +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
2858     @@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
2859     static void t4vf_handle_get_port_info(struct port_info *pi,
2860     const struct fw_port_cmd *cmd)
2861     {
2862     - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
2863     - struct adapter *adapter = pi->adapter;
2864     + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
2865     struct link_config *lc = &pi->link_cfg;
2866     - int link_ok, linkdnrc;
2867     - enum fw_port_type port_type;
2868     + struct adapter *adapter = pi->adapter;
2869     + unsigned int speed, fc, fec, adv_fc;
2870     enum fw_port_module_type mod_type;
2871     - unsigned int speed, fc, fec;
2872     - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
2873     + int action, link_ok, linkdnrc;
2874     + enum fw_port_type port_type;
2875    
2876     /* Extract the various fields from the Port Information message. */
2877     + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
2878     switch (action) {
2879     case FW_PORT_ACTION_GET_PORT_INFO: {
2880     u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
2881     @@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
2882     }
2883    
2884     fec = fwcap_to_cc_fec(acaps);
2885     + adv_fc = fwcap_to_cc_pause(acaps);
2886     fc = fwcap_to_cc_pause(linkattr);
2887     speed = fwcap_to_speed(linkattr);
2888    
2889     @@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
2890     }
2891    
2892     if (link_ok != lc->link_ok || speed != lc->speed ||
2893     - fc != lc->fc || fec != lc->fec) { /* something changed */
2894     + fc != lc->fc || adv_fc != lc->advertised_fc ||
2895     + fec != lc->fec) {
2896     + /* something changed */
2897     if (!link_ok && lc->link_ok) {
2898     lc->link_down_rc = linkdnrc;
2899     dev_warn_ratelimited(adapter->pdev_dev,
2900     @@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
2901     }
2902     lc->link_ok = link_ok;
2903     lc->speed = speed;
2904     + lc->advertised_fc = adv_fc;
2905     lc->fc = fc;
2906     lc->fec = fec;
2907    
2908     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2909     index 111b3b8239e1..ef44c6979a31 100644
2910     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2911     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2912     @@ -3674,7 +3674,7 @@ static int mvpp2_open(struct net_device *dev)
2913     valid = true;
2914     }
2915    
2916     - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
2917     + if (priv->hw_version == MVPP22 && port->link_irq) {
2918     err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
2919     dev->name, port);
2920     if (err) {
2921     diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
2922     index 544344ac4894..79057af4fe99 100644
2923     --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
2924     +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
2925     @@ -6,6 +6,7 @@
2926     #include <linux/kernel.h>
2927     #include <linux/module.h>
2928     #include <linux/netlink.h>
2929     +#include <linux/vmalloc.h>
2930     #include <linux/xz.h>
2931     #include "mlxfw_mfa2.h"
2932     #include "mlxfw_mfa2_file.h"
2933     @@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
2934     comp_size = be32_to_cpu(comp->size);
2935     comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
2936    
2937     - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
2938     + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
2939     if (!comp_data)
2940     return ERR_PTR(-ENOMEM);
2941     comp_data->comp.data_size = comp_size;
2942     @@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
2943     comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
2944     return &comp_data->comp;
2945     err_out:
2946     - kfree(comp_data);
2947     + vfree(comp_data);
2948     return ERR_PTR(err);
2949     }
2950    
2951     @@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
2952     const struct mlxfw_mfa2_comp_data *comp_data;
2953    
2954     comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
2955     - kfree(comp_data);
2956     + vfree(comp_data);
2957     }
2958    
2959     void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
2960     diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
2961     index 5494cf93f34c..8e42ebdbd487 100644
2962     --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
2963     +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
2964     @@ -5421,6 +5421,7 @@ enum mlxsw_reg_htgt_trap_group {
2965     MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
2966     MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
2967     MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
2968     + MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
2969    
2970     __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
2971     MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
2972     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2973     index dcf9562bce8a..3ec18fb0d479 100644
2974     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2975     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2976     @@ -4398,8 +4398,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
2977     MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
2978     MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2979     MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
2980     - MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
2981     - MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
2982     + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
2983     + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
2984     /* PKT Sample trap */
2985     MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
2986     false, SP_IP2ME, DISCARD),
2987     @@ -4483,6 +4483,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2988     rate = 19 * 1024;
2989     burst_size = 12;
2990     break;
2991     + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
2992     + rate = 360;
2993     + burst_size = 7;
2994     + break;
2995     default:
2996     continue;
2997     }
2998     @@ -4522,6 +4526,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2999     case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3000     case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3001     case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
3002     + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
3003     priority = 5;
3004     tc = 5;
3005     break;
3006     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3007     index 210ebc91d3d6..efdf8cb5114c 100644
3008     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3009     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3010     @@ -6985,6 +6985,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
3011    
3012     for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
3013     rif = mlxsw_sp->router->rifs[i];
3014     + if (rif && rif->ops &&
3015     + rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
3016     + continue;
3017     if (rif && rif->dev && rif->dev != dev &&
3018     !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
3019     mlxsw_sp->mac_mask)) {
3020     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
3021     index 306da8f6b7d5..33ce139f090f 100644
3022     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
3023     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
3024     @@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
3025     struct device *dev = dwmac->dev;
3026     const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
3027     struct meson8b_dwmac_clk_configs *clk_configs;
3028     + static const struct clk_div_table div_table[] = {
3029     + { .div = 2, .val = 2, },
3030     + { .div = 3, .val = 3, },
3031     + { .div = 4, .val = 4, },
3032     + { .div = 5, .val = 5, },
3033     + { .div = 6, .val = 6, },
3034     + { .div = 7, .val = 7, },
3035     + };
3036    
3037     clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
3038     if (!clk_configs)
3039     @@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
3040     clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
3041     clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
3042     clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
3043     - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
3044     - CLK_DIVIDER_ALLOW_ZERO |
3045     - CLK_DIVIDER_ROUND_CLOSEST;
3046     + clk_configs->m250_div.table = div_table;
3047     + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
3048     + CLK_DIVIDER_ROUND_CLOSEST;
3049     clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
3050     &clk_divider_ops,
3051     &clk_configs->m250_div.hw);
3052     diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
3053     index ecfe26215935..fca471e27f39 100644
3054     --- a/drivers/net/gtp.c
3055     +++ b/drivers/net/gtp.c
3056     @@ -38,7 +38,6 @@ struct pdp_ctx {
3057     struct hlist_node hlist_addr;
3058    
3059     union {
3060     - u64 tid;
3061     struct {
3062     u64 tid;
3063     u16 flow;
3064     @@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
3065     mtu = dst_mtu(&rt->dst);
3066     }
3067    
3068     - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
3069     + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
3070    
3071     if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
3072     mtu < ntohs(iph->tot_len)) {
3073     @@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev)
3074     }
3075    
3076     static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
3077     -static void gtp_hashtable_free(struct gtp_dev *gtp);
3078     static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
3079    
3080     +static void gtp_destructor(struct net_device *dev)
3081     +{
3082     + struct gtp_dev *gtp = netdev_priv(dev);
3083     +
3084     + kfree(gtp->addr_hash);
3085     + kfree(gtp->tid_hash);
3086     +}
3087     +
3088     static int gtp_newlink(struct net *src_net, struct net_device *dev,
3089     struct nlattr *tb[], struct nlattr *data[],
3090     struct netlink_ext_ack *extack)
3091     @@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
3092     if (err < 0)
3093     return err;
3094    
3095     - if (!data[IFLA_GTP_PDP_HASHSIZE])
3096     + if (!data[IFLA_GTP_PDP_HASHSIZE]) {
3097     hashsize = 1024;
3098     - else
3099     + } else {
3100     hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
3101     + if (!hashsize)
3102     + hashsize = 1024;
3103     + }
3104    
3105     err = gtp_hashtable_new(gtp, hashsize);
3106     if (err < 0)
3107     @@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
3108    
3109     gn = net_generic(dev_net(dev), gtp_net_id);
3110     list_add_rcu(&gtp->list, &gn->gtp_dev_list);
3111     + dev->priv_destructor = gtp_destructor;
3112    
3113     netdev_dbg(dev, "registered new GTP interface\n");
3114    
3115     return 0;
3116    
3117     out_hashtable:
3118     - gtp_hashtable_free(gtp);
3119     + kfree(gtp->addr_hash);
3120     + kfree(gtp->tid_hash);
3121     out_encap:
3122     gtp_encap_disable(gtp);
3123     return err;
3124     @@ -693,8 +704,13 @@ out_encap:
3125     static void gtp_dellink(struct net_device *dev, struct list_head *head)
3126     {
3127     struct gtp_dev *gtp = netdev_priv(dev);
3128     + struct pdp_ctx *pctx;
3129     + int i;
3130     +
3131     + for (i = 0; i < gtp->hash_size; i++)
3132     + hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
3133     + pdp_context_delete(pctx);
3134    
3135     - gtp_hashtable_free(gtp);
3136     list_del_rcu(&gtp->list);
3137     unregister_netdevice_queue(dev, head);
3138     }
3139     @@ -772,20 +788,6 @@ err1:
3140     return -ENOMEM;
3141     }
3142    
3143     -static void gtp_hashtable_free(struct gtp_dev *gtp)
3144     -{
3145     - struct pdp_ctx *pctx;
3146     - int i;
3147     -
3148     - for (i = 0; i < gtp->hash_size; i++)
3149     - hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
3150     - pdp_context_delete(pctx);
3151     -
3152     - synchronize_rcu();
3153     - kfree(gtp->addr_hash);
3154     - kfree(gtp->tid_hash);
3155     -}
3156     -
3157     static struct sock *gtp_encap_enable_socket(int fd, int type,
3158     struct gtp_dev *gtp)
3159     {
3160     @@ -926,24 +928,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
3161     }
3162     }
3163    
3164     -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
3165     - struct genl_info *info)
3166     +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
3167     + struct genl_info *info)
3168     {
3169     + struct pdp_ctx *pctx, *pctx_tid = NULL;
3170     struct net_device *dev = gtp->dev;
3171     u32 hash_ms, hash_tid = 0;
3172     - struct pdp_ctx *pctx;
3173     + unsigned int version;
3174     bool found = false;
3175     __be32 ms_addr;
3176    
3177     ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
3178     hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
3179     + version = nla_get_u32(info->attrs[GTPA_VERSION]);
3180    
3181     - hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
3182     - if (pctx->ms_addr_ip4.s_addr == ms_addr) {
3183     - found = true;
3184     - break;
3185     - }
3186     - }
3187     + pctx = ipv4_pdp_find(gtp, ms_addr);
3188     + if (pctx)
3189     + found = true;
3190     + if (version == GTP_V0)
3191     + pctx_tid = gtp0_pdp_find(gtp,
3192     + nla_get_u64(info->attrs[GTPA_TID]));
3193     + else if (version == GTP_V1)
3194     + pctx_tid = gtp1_pdp_find(gtp,
3195     + nla_get_u32(info->attrs[GTPA_I_TEI]));
3196     + if (pctx_tid)
3197     + found = true;
3198    
3199     if (found) {
3200     if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3201     @@ -951,6 +960,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
3202     if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
3203     return -EOPNOTSUPP;
3204    
3205     + if (pctx && pctx_tid)
3206     + return -EEXIST;
3207     + if (!pctx)
3208     + pctx = pctx_tid;
3209     +
3210     ipv4_pdp_fill(pctx, info);
3211    
3212     if (pctx->gtp_version == GTP_V0)
3213     @@ -1074,7 +1088,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
3214     goto out_unlock;
3215     }
3216    
3217     - err = ipv4_pdp_add(gtp, sk, info);
3218     + err = gtp_pdp_add(gtp, sk, info);
3219    
3220     out_unlock:
3221     rcu_read_unlock();
3222     @@ -1232,43 +1246,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
3223     struct netlink_callback *cb)
3224     {
3225     struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
3226     + int i, j, bucket = cb->args[0], skip = cb->args[1];
3227     struct net *net = sock_net(skb->sk);
3228     - struct gtp_net *gn = net_generic(net, gtp_net_id);
3229     - unsigned long tid = cb->args[1];
3230     - int i, k = cb->args[0], ret;
3231     struct pdp_ctx *pctx;
3232     + struct gtp_net *gn;
3233     +
3234     + gn = net_generic(net, gtp_net_id);
3235    
3236     if (cb->args[4])
3237     return 0;
3238    
3239     + rcu_read_lock();
3240     list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
3241     if (last_gtp && last_gtp != gtp)
3242     continue;
3243     else
3244     last_gtp = NULL;
3245    
3246     - for (i = k; i < gtp->hash_size; i++) {
3247     - hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
3248     - if (tid && tid != pctx->u.tid)
3249     - continue;
3250     - else
3251     - tid = 0;
3252     -
3253     - ret = gtp_genl_fill_info(skb,
3254     - NETLINK_CB(cb->skb).portid,
3255     - cb->nlh->nlmsg_seq,
3256     - cb->nlh->nlmsg_type, pctx);
3257     - if (ret < 0) {
3258     + for (i = bucket; i < gtp->hash_size; i++) {
3259     + j = 0;
3260     + hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
3261     + hlist_tid) {
3262     + if (j >= skip &&
3263     + gtp_genl_fill_info(skb,
3264     + NETLINK_CB(cb->skb).portid,
3265     + cb->nlh->nlmsg_seq,
3266     + cb->nlh->nlmsg_type, pctx)) {
3267     cb->args[0] = i;
3268     - cb->args[1] = pctx->u.tid;
3269     + cb->args[1] = j;
3270     cb->args[2] = (unsigned long)gtp;
3271     goto out;
3272     }
3273     + j++;
3274     }
3275     + skip = 0;
3276     }
3277     + bucket = 0;
3278     }
3279     cb->args[4] = 1;
3280     out:
3281     + rcu_read_unlock();
3282     return skb->len;
3283     }
3284    
3285     diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
3286     index 23281aeeb222..71d6629e65c9 100644
3287     --- a/drivers/net/hamradio/6pack.c
3288     +++ b/drivers/net/hamradio/6pack.c
3289     @@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty)
3290     {
3291     struct sixpack *sp;
3292    
3293     - write_lock_bh(&disc_data_lock);
3294     + write_lock_irq(&disc_data_lock);
3295     sp = tty->disc_data;
3296     tty->disc_data = NULL;
3297     - write_unlock_bh(&disc_data_lock);
3298     + write_unlock_irq(&disc_data_lock);
3299     if (!sp)
3300     return;
3301    
3302     diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
3303     index c5bfa19ddb93..deef14215110 100644
3304     --- a/drivers/net/hamradio/mkiss.c
3305     +++ b/drivers/net/hamradio/mkiss.c
3306     @@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty)
3307     {
3308     struct mkiss *ax;
3309    
3310     - write_lock_bh(&disc_data_lock);
3311     + write_lock_irq(&disc_data_lock);
3312     ax = tty->disc_data;
3313     tty->disc_data = NULL;
3314     - write_unlock_bh(&disc_data_lock);
3315     + write_unlock_irq(&disc_data_lock);
3316    
3317     if (!ax)
3318     return;
3319     diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
3320     index abaf8156d19d..e3d3c9097ff1 100644
3321     --- a/drivers/net/hyperv/rndis_filter.c
3322     +++ b/drivers/net/hyperv/rndis_filter.c
3323     @@ -1165,6 +1165,9 @@ int rndis_set_subchannel(struct net_device *ndev,
3324     wait_event(nvdev->subchan_open,
3325     atomic_read(&nvdev->open_chn) == nvdev->num_chn);
3326    
3327     + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
3328     + ndev_ctx->tx_table[i] = i % nvdev->num_chn;
3329     +
3330     /* ignore failures from setting rss parameters, still have channels */
3331     if (dev_info)
3332     rndis_filter_set_rss_param(rdev, dev_info->rss_key);
3333     @@ -1174,9 +1177,6 @@ int rndis_set_subchannel(struct net_device *ndev,
3334     netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
3335     netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
3336    
3337     - for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
3338     - ndev_ctx->tx_table[i] = i % nvdev->num_chn;
3339     -
3340     return 0;
3341     }
3342    
3343     diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
3344     index 3b29d381116f..975789d9349d 100644
3345     --- a/drivers/net/phy/aquantia_main.c
3346     +++ b/drivers/net/phy/aquantia_main.c
3347     @@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
3348     .config_intr = aqr_config_intr,
3349     .ack_interrupt = aqr_ack_interrupt,
3350     .read_status = aqr_read_status,
3351     + .suspend = aqr107_suspend,
3352     + .resume = aqr107_resume,
3353     },
3354     {
3355     PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
3356     diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
3357     index 536236fdb232..bf5bbb565cf5 100644
3358     --- a/drivers/net/phy/phylink.c
3359     +++ b/drivers/net/phy/phylink.c
3360     @@ -444,8 +444,7 @@ static void phylink_mac_link_up(struct phylink *pl,
3361    
3362     pl->cur_interface = link_state.interface;
3363     pl->ops->mac_link_up(pl->config, pl->link_an_mode,
3364     - pl->phy_state.interface,
3365     - pl->phydev);
3366     + pl->cur_interface, pl->phydev);
3367    
3368     if (ndev)
3369     netif_carrier_on(ndev);
3370     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
3371     index 040cec17d3ad..b0b7eca1754e 100644
3372     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
3373     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
3374     @@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3375    
3376     /* same thing for QuZ... */
3377     if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
3378     - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
3379     - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
3380     - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
3381     - iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
3382     - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
3383     - iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
3384     - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
3385     - iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
3386     - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
3387     - iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
3388     - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
3389     - iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
3390     + if (cfg == &iwl_ax101_cfg_qu_hr)
3391     + cfg = &iwl_ax101_cfg_quz_hr;
3392     + else if (cfg == &iwl_ax201_cfg_qu_hr)
3393     + cfg = &iwl_ax201_cfg_quz_hr;
3394     + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
3395     + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
3396     + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
3397     + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
3398     + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
3399     + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
3400     + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
3401     + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
3402     }
3403    
3404     #endif
3405     diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
3406     index 3e9f45aec8d1..5129543a0473 100644
3407     --- a/drivers/nvdimm/btt.c
3408     +++ b/drivers/nvdimm/btt.c
3409     @@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
3410    
3411     ret = btt_data_read(arena, page, off, postmap, cur_len);
3412     if (ret) {
3413     - int rc;
3414     -
3415     /* Media error - set the e_flag */
3416     - rc = btt_map_write(arena, premap, postmap, 0, 1,
3417     - NVDIMM_IO_ATOMIC);
3418     + if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
3419     + dev_warn_ratelimited(to_dev(arena),
3420     + "Error persistently tracking bad blocks at %#x\n",
3421     + premap);
3422     goto out_rtt;
3423     }
3424    
3425     diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
3426     index 92e895d86458..ca7823eef2b4 100644
3427     --- a/drivers/of/unittest.c
3428     +++ b/drivers/of/unittest.c
3429     @@ -1146,8 +1146,10 @@ static void attach_node_and_children(struct device_node *np)
3430     full_name = kasprintf(GFP_KERNEL, "%pOF", np);
3431    
3432     if (!strcmp(full_name, "/__local_fixups__") ||
3433     - !strcmp(full_name, "/__fixups__"))
3434     + !strcmp(full_name, "/__fixups__")) {
3435     + kfree(full_name);
3436     return;
3437     + }
3438    
3439     dup = of_find_node_by_path(full_name);
3440     kfree(full_name);
3441     diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
3442     index 18627bb21e9e..32eab1776cfe 100644
3443     --- a/drivers/pci/hotplug/rpaphp_core.c
3444     +++ b/drivers/pci/hotplug/rpaphp_core.c
3445     @@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
3446     return speed;
3447     }
3448    
3449     -static int get_children_props(struct device_node *dn, const int **drc_indexes,
3450     - const int **drc_names, const int **drc_types,
3451     - const int **drc_power_domains)
3452     +static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
3453     + const __be32 **drc_names, const __be32 **drc_types,
3454     + const __be32 **drc_power_domains)
3455     {
3456     - const int *indexes, *names, *types, *domains;
3457     + const __be32 *indexes, *names, *types, *domains;
3458    
3459     indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
3460     names = of_get_property(dn, "ibm,drc-names", NULL);
3461     @@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
3462     char *drc_type, unsigned int my_index)
3463     {
3464     char *name_tmp, *type_tmp;
3465     - const int *indexes, *names;
3466     - const int *types, *domains;
3467     + const __be32 *indexes, *names;
3468     + const __be32 *types, *domains;
3469     int i, rc;
3470    
3471     rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
3472     @@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
3473    
3474     /* Iterate through parent properties, looking for my-drc-index */
3475     for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
3476     - if ((unsigned int) indexes[i + 1] == my_index)
3477     + if (be32_to_cpu(indexes[i + 1]) == my_index)
3478     break;
3479    
3480     name_tmp += (strlen(name_tmp) + 1);
3481     @@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
3482     value = of_prop_next_u32(info, NULL, &entries);
3483     if (!value)
3484     return -EINVAL;
3485     + else
3486     + value++;
3487    
3488     for (j = 0; j < entries; j++) {
3489     of_read_drc_info_cell(&info, &value, &drc);
3490     @@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
3491     /* Should now know end of current entry */
3492    
3493     /* Found it */
3494     - if (my_index <= drc.last_drc_index) {
3495     + if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
3496     + int index = my_index - drc.drc_index_start;
3497     sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
3498     - my_index);
3499     + drc.drc_name_suffix_start + index);
3500     break;
3501     }
3502     }
3503     @@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
3504     int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
3505     char *drc_type)
3506     {
3507     - const unsigned int *my_index;
3508     + const __be32 *my_index;
3509    
3510     my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
3511     if (!my_index) {
3512     @@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
3513     return -EINVAL;
3514     }
3515    
3516     - if (firmware_has_feature(FW_FEATURE_DRC_INFO))
3517     + if (of_find_property(dn->parent, "ibm,drc-info", NULL))
3518     return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
3519     - *my_index);
3520     + be32_to_cpu(*my_index));
3521     else
3522     return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
3523     - *my_index);
3524     + be32_to_cpu(*my_index));
3525     }
3526     EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
3527    
3528     @@ -309,10 +312,11 @@ static int is_php_type(char *drc_type)
3529     * for built-in pci slots (even when the built-in slots are
3530     * dlparable.)
3531     */
3532     -static int is_php_dn(struct device_node *dn, const int **indexes,
3533     - const int **names, const int **types, const int **power_domains)
3534     +static int is_php_dn(struct device_node *dn, const __be32 **indexes,
3535     + const __be32 **names, const __be32 **types,
3536     + const __be32 **power_domains)
3537     {
3538     - const int *drc_types;
3539     + const __be32 *drc_types;
3540     int rc;
3541    
3542     rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
3543     @@ -347,7 +351,7 @@ int rpaphp_add_slot(struct device_node *dn)
3544     struct slot *slot;
3545     int retval = 0;
3546     int i;
3547     - const int *indexes, *names, *types, *power_domains;
3548     + const __be32 *indexes, *names, *types, *power_domains;
3549     char *name, *type;
3550    
3551     if (!dn->name || strcmp(dn->name, "pci"))
3552     diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3553     index ae21d08c65e8..1cab99320514 100644
3554     --- a/drivers/platform/x86/Kconfig
3555     +++ b/drivers/platform/x86/Kconfig
3556     @@ -806,7 +806,6 @@ config PEAQ_WMI
3557     tristate "PEAQ 2-in-1 WMI hotkey driver"
3558     depends on ACPI_WMI
3559     depends on INPUT
3560     - select INPUT_POLLDEV
3561     help
3562     Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
3563    
3564     diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
3565     index 94a008efb09b..571b4754477c 100644
3566     --- a/drivers/platform/x86/intel_pmc_core.c
3567     +++ b/drivers/platform/x86/intel_pmc_core.c
3568     @@ -158,8 +158,9 @@ static const struct pmc_reg_map spt_reg_map = {
3569     .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
3570     };
3571    
3572     -/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
3573     +/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
3574     static const struct pmc_bit_map cnp_pfear_map[] = {
3575     + /* Reserved for Cannon Lake but valid for Comet Lake */
3576     {"PMC", BIT(0)},
3577     {"OPI-DMI", BIT(1)},
3578     {"SPI/eSPI", BIT(2)},
3579     @@ -185,7 +186,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
3580     {"SDX", BIT(4)},
3581     {"SPE", BIT(5)},
3582     {"Fuse", BIT(6)},
3583     - /* Reserved for Cannonlake but valid for Icelake */
3584     + /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
3585     {"SBR8", BIT(7)},
3586    
3587     {"CSME_FSC", BIT(0)},
3588     @@ -229,12 +230,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
3589     {"HDA_PGD4", BIT(2)},
3590     {"HDA_PGD5", BIT(3)},
3591     {"HDA_PGD6", BIT(4)},
3592     - /* Reserved for Cannonlake but valid for Icelake */
3593     + /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
3594     {"PSF6", BIT(5)},
3595     {"PSF7", BIT(6)},
3596     {"PSF8", BIT(7)},
3597    
3598     - /* Icelake generation onwards only */
3599     + /* Ice Lake generation onwards only */
3600     {"RES_65", BIT(0)},
3601     {"RES_66", BIT(1)},
3602     {"RES_67", BIT(2)},
3603     @@ -324,7 +325,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
3604     {"ISH", CNP_PMC_LTR_ISH},
3605     {"UFSX2", CNP_PMC_LTR_UFSX2},
3606     {"EMMC", CNP_PMC_LTR_EMMC},
3607     - /* Reserved for Cannonlake but valid for Icelake */
3608     + /* Reserved for Cannon Lake but valid for Ice Lake */
3609     {"WIGIG", ICL_PMC_LTR_WIGIG},
3610     /* Below two cannot be used for LTR_IGNORE */
3611     {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
3612     @@ -813,6 +814,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
3613     INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
3614     INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
3615     INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
3616     + INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
3617     + INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
3618     {}
3619     };
3620    
3621     @@ -871,8 +874,8 @@ static int pmc_core_probe(struct platform_device *pdev)
3622     pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
3623    
3624     /*
3625     - * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
3626     - * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
3627     + * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
3628     + * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
3629     * in this case.
3630     */
3631     if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
3632     diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
3633     index fdeb3624c529..cf9c44c20a82 100644
3634     --- a/drivers/platform/x86/peaq-wmi.c
3635     +++ b/drivers/platform/x86/peaq-wmi.c
3636     @@ -6,7 +6,7 @@
3637    
3638     #include <linux/acpi.h>
3639     #include <linux/dmi.h>
3640     -#include <linux/input-polldev.h>
3641     +#include <linux/input.h>
3642     #include <linux/kernel.h>
3643     #include <linux/module.h>
3644    
3645     @@ -18,8 +18,7 @@
3646    
3647     MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
3648    
3649     -static unsigned int peaq_ignore_events_counter;
3650     -static struct input_polled_dev *peaq_poll_dev;
3651     +static struct input_dev *peaq_poll_dev;
3652    
3653     /*
3654     * The Dolby button (yes really a Dolby button) causes an ACPI variable to get
3655     @@ -28,8 +27,10 @@ static struct input_polled_dev *peaq_poll_dev;
3656     * (if polling after the release) or twice (polling between press and release).
3657     * We ignore events for 0.5s after the first event to avoid reporting 2 presses.
3658     */
3659     -static void peaq_wmi_poll(struct input_polled_dev *dev)
3660     +static void peaq_wmi_poll(struct input_dev *input_dev)
3661     {
3662     + static unsigned long last_event_time;
3663     + static bool had_events;
3664     union acpi_object obj;
3665     acpi_status status;
3666     u32 dummy = 0;
3667     @@ -44,22 +45,25 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
3668     return;
3669    
3670     if (obj.type != ACPI_TYPE_INTEGER) {
3671     - dev_err(&peaq_poll_dev->input->dev,
3672     + dev_err(&input_dev->dev,
3673     "Error WMBC did not return an integer\n");
3674     return;
3675     }
3676    
3677     - if (peaq_ignore_events_counter && peaq_ignore_events_counter--)
3678     + if (!obj.integer.value)
3679     return;
3680    
3681     - if (obj.integer.value) {
3682     - input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1);
3683     - input_sync(peaq_poll_dev->input);
3684     - input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0);
3685     - input_sync(peaq_poll_dev->input);
3686     - peaq_ignore_events_counter = max(1u,
3687     - PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval);
3688     - }
3689     + if (had_events && time_before(jiffies, last_event_time +
3690     + msecs_to_jiffies(PEAQ_POLL_IGNORE_MS)))
3691     + return;
3692     +
3693     + input_event(input_dev, EV_KEY, KEY_SOUND, 1);
3694     + input_sync(input_dev);
3695     + input_event(input_dev, EV_KEY, KEY_SOUND, 0);
3696     + input_sync(input_dev);
3697     +
3698     + last_event_time = jiffies;
3699     + had_events = true;
3700     }
3701    
3702     /* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
3703     @@ -75,6 +79,8 @@ static const struct dmi_system_id peaq_dmi_table[] __initconst = {
3704    
3705     static int __init peaq_wmi_init(void)
3706     {
3707     + int err;
3708     +
3709     /* WMI GUID is not unique, also check for a DMI match */
3710     if (!dmi_check_system(peaq_dmi_table))
3711     return -ENODEV;
3712     @@ -82,24 +88,36 @@ static int __init peaq_wmi_init(void)
3713     if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
3714     return -ENODEV;
3715    
3716     - peaq_poll_dev = input_allocate_polled_device();
3717     + peaq_poll_dev = input_allocate_device();
3718     if (!peaq_poll_dev)
3719     return -ENOMEM;
3720    
3721     - peaq_poll_dev->poll = peaq_wmi_poll;
3722     - peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS;
3723     - peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS;
3724     - peaq_poll_dev->input->name = "PEAQ WMI hotkeys";
3725     - peaq_poll_dev->input->phys = "wmi/input0";
3726     - peaq_poll_dev->input->id.bustype = BUS_HOST;
3727     - input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND);
3728     + peaq_poll_dev->name = "PEAQ WMI hotkeys";
3729     + peaq_poll_dev->phys = "wmi/input0";
3730     + peaq_poll_dev->id.bustype = BUS_HOST;
3731     + input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND);
3732     +
3733     + err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll);
3734     + if (err)
3735     + goto err_out;
3736     +
3737     + input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS);
3738     + input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS);
3739     +
3740     + err = input_register_device(peaq_poll_dev);
3741     + if (err)
3742     + goto err_out;
3743     +
3744     + return 0;
3745    
3746     - return input_register_polled_device(peaq_poll_dev);
3747     +err_out:
3748     + input_free_device(peaq_poll_dev);
3749     + return err;
3750     }
3751    
3752     static void __exit peaq_wmi_exit(void)
3753     {
3754     - input_unregister_polled_device(peaq_poll_dev);
3755     + input_unregister_device(peaq_poll_dev);
3756     }
3757    
3758     module_init(peaq_wmi_init);
3759     diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
3760     index e60eab7f8a61..61fafe0374ce 100644
3761     --- a/drivers/ptp/ptp_clock.c
3762     +++ b/drivers/ptp/ptp_clock.c
3763     @@ -166,9 +166,9 @@ static struct posix_clock_operations ptp_clock_ops = {
3764     .read = ptp_read,
3765     };
3766    
3767     -static void delete_ptp_clock(struct posix_clock *pc)
3768     +static void ptp_clock_release(struct device *dev)
3769     {
3770     - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
3771     + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
3772    
3773     mutex_destroy(&ptp->tsevq_mux);
3774     mutex_destroy(&ptp->pincfg_mux);
3775     @@ -213,7 +213,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
3776     }
3777    
3778     ptp->clock.ops = ptp_clock_ops;
3779     - ptp->clock.release = delete_ptp_clock;
3780     ptp->info = info;
3781     ptp->devid = MKDEV(major, index);
3782     ptp->index = index;
3783     @@ -236,15 +235,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
3784     if (err)
3785     goto no_pin_groups;
3786    
3787     - /* Create a new device in our class. */
3788     - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
3789     - ptp, ptp->pin_attr_groups,
3790     - "ptp%d", ptp->index);
3791     - if (IS_ERR(ptp->dev)) {
3792     - err = PTR_ERR(ptp->dev);
3793     - goto no_device;
3794     - }
3795     -
3796     /* Register a new PPS source. */
3797     if (info->pps) {
3798     struct pps_source_info pps;
3799     @@ -260,8 +250,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
3800     }
3801     }
3802    
3803     - /* Create a posix clock. */
3804     - err = posix_clock_register(&ptp->clock, ptp->devid);
3805     + /* Initialize a new device of our class in our clock structure. */
3806     + device_initialize(&ptp->dev);
3807     + ptp->dev.devt = ptp->devid;
3808     + ptp->dev.class = ptp_class;
3809     + ptp->dev.parent = parent;
3810     + ptp->dev.groups = ptp->pin_attr_groups;
3811     + ptp->dev.release = ptp_clock_release;
3812     + dev_set_drvdata(&ptp->dev, ptp);
3813     + dev_set_name(&ptp->dev, "ptp%d", ptp->index);
3814     +
3815     + /* Create a posix clock and link it to the device. */
3816     + err = posix_clock_register(&ptp->clock, &ptp->dev);
3817     if (err) {
3818     pr_err("failed to create posix clock\n");
3819     goto no_clock;
3820     @@ -273,8 +273,6 @@ no_clock:
3821     if (ptp->pps_source)
3822     pps_unregister_source(ptp->pps_source);
3823     no_pps:
3824     - device_destroy(ptp_class, ptp->devid);
3825     -no_device:
3826     ptp_cleanup_pin_groups(ptp);
3827     no_pin_groups:
3828     if (ptp->kworker)
3829     @@ -304,7 +302,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
3830     if (ptp->pps_source)
3831     pps_unregister_source(ptp->pps_source);
3832    
3833     - device_destroy(ptp_class, ptp->devid);
3834     ptp_cleanup_pin_groups(ptp);
3835    
3836     posix_clock_unregister(&ptp->clock);
3837     diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
3838     index 9171d42468fd..6b97155148f1 100644
3839     --- a/drivers/ptp/ptp_private.h
3840     +++ b/drivers/ptp/ptp_private.h
3841     @@ -28,7 +28,7 @@ struct timestamp_event_queue {
3842    
3843     struct ptp_clock {
3844     struct posix_clock clock;
3845     - struct device *dev;
3846     + struct device dev;
3847     struct ptp_clock_info *info;
3848     dev_t devid;
3849     int index; /* index into clocks.map */
3850     diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
3851     index f34ee41cbed8..4f4dd9d727c9 100644
3852     --- a/drivers/s390/crypto/zcrypt_error.h
3853     +++ b/drivers/s390/crypto/zcrypt_error.h
3854     @@ -61,6 +61,7 @@ struct error_hdr {
3855     #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
3856     #define REP82_ERROR_RESERVED_FIELD 0x88
3857     #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
3858     +#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
3859     #define REP82_ERROR_TRANSPORT_FAIL 0x90
3860     #define REP82_ERROR_PACKET_TRUNCATED 0xA0
3861     #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
3862     @@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
3863     case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
3864     case REP82_ERROR_INVALID_DOMAIN_PENDING:
3865     case REP82_ERROR_INVALID_SPECIAL_CMD:
3866     + case REP82_ERROR_FILTERED_BY_HYPERVISOR:
3867     // REP88_ERROR_INVALID_KEY // '82' CEX2A
3868     // REP88_ERROR_OPERAND // '84' CEX2A
3869     // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
3870     diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
3871     index 536426f25e86..d4401c768a0c 100644
3872     --- a/drivers/scsi/NCR5380.c
3873     +++ b/drivers/scsi/NCR5380.c
3874     @@ -129,6 +129,9 @@
3875     #define NCR5380_release_dma_irq(x)
3876     #endif
3877    
3878     +static unsigned int disconnect_mask = ~0;
3879     +module_param(disconnect_mask, int, 0444);
3880     +
3881     static int do_abort(struct Scsi_Host *);
3882     static void do_reset(struct Scsi_Host *);
3883     static void bus_reset_cleanup(struct Scsi_Host *);
3884     @@ -954,7 +957,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
3885     int err;
3886     bool ret = true;
3887     bool can_disconnect = instance->irq != NO_IRQ &&
3888     - cmd->cmnd[0] != REQUEST_SENSE;
3889     + cmd->cmnd[0] != REQUEST_SENSE &&
3890     + (disconnect_mask & BIT(scmd_id(cmd)));
3891    
3892     NCR5380_dprint(NDEBUG_ARBITRATION, instance);
3893     dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
3894     diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
3895     index e809493d0d06..a82b63a66635 100644
3896     --- a/drivers/scsi/atari_scsi.c
3897     +++ b/drivers/scsi/atari_scsi.c
3898     @@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
3899     atari_scsi_template.sg_tablesize = SG_ALL;
3900     } else {
3901     atari_scsi_template.can_queue = 1;
3902     - atari_scsi_template.sg_tablesize = SG_NONE;
3903     + atari_scsi_template.sg_tablesize = 1;
3904     }
3905    
3906     if (setup_can_queue > 0)
3907     @@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
3908     if (setup_cmd_per_lun > 0)
3909     atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
3910    
3911     - /* Leave sg_tablesize at 0 on a Falcon! */
3912     - if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
3913     + /* Don't increase sg_tablesize on Falcon! */
3914     + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
3915     atari_scsi_template.sg_tablesize = setup_sg_tablesize;
3916    
3917     if (setup_hostid >= 0) {
3918     diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
3919     index 66e58f0a75dc..23cbe4cda760 100644
3920     --- a/drivers/scsi/csiostor/csio_lnode.c
3921     +++ b/drivers/scsi/csiostor/csio_lnode.c
3922     @@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3923     struct fc_fdmi_port_name *port_name;
3924     uint8_t buf[64];
3925     uint8_t *fc4_type;
3926     + unsigned long flags;
3927    
3928     if (fdmi_req->wr_status != FW_SUCCESS) {
3929     csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
3930     @@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3931     len = (uint32_t)(pld - (uint8_t *)cmd);
3932    
3933     /* Submit FDMI RPA request */
3934     - spin_lock_irq(&hw->lock);
3935     + spin_lock_irqsave(&hw->lock, flags);
3936     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
3937     FCOE_CT, &fdmi_req->dma_buf, len)) {
3938     CSIO_INC_STATS(ln, n_fdmi_err);
3939     csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
3940     }
3941     - spin_unlock_irq(&hw->lock);
3942     + spin_unlock_irqrestore(&hw->lock, flags);
3943     }
3944    
3945     /*
3946     @@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3947     struct fc_fdmi_rpl *reg_pl;
3948     struct fs_fdmi_attrs *attrib_blk;
3949     uint8_t buf[64];
3950     + unsigned long flags;
3951    
3952     if (fdmi_req->wr_status != FW_SUCCESS) {
3953     csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
3954     @@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3955     attrib_blk->numattrs = htonl(numattrs);
3956    
3957     /* Submit FDMI RHBA request */
3958     - spin_lock_irq(&hw->lock);
3959     + spin_lock_irqsave(&hw->lock, flags);
3960     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
3961     FCOE_CT, &fdmi_req->dma_buf, len)) {
3962     CSIO_INC_STATS(ln, n_fdmi_err);
3963     csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
3964     }
3965     - spin_unlock_irq(&hw->lock);
3966     + spin_unlock_irqrestore(&hw->lock, flags);
3967     }
3968    
3969     /*
3970     @@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3971     void *cmd;
3972     struct fc_fdmi_port_name *port_name;
3973     uint32_t len;
3974     + unsigned long flags;
3975    
3976     if (fdmi_req->wr_status != FW_SUCCESS) {
3977     csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
3978     @@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
3979     len += sizeof(*port_name);
3980    
3981     /* Submit FDMI request */
3982     - spin_lock_irq(&hw->lock);
3983     + spin_lock_irqsave(&hw->lock, flags);
3984     if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
3985     FCOE_CT, &fdmi_req->dma_buf, len)) {
3986     CSIO_INC_STATS(ln, n_fdmi_err);
3987     csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
3988     }
3989     - spin_unlock_irq(&hw->lock);
3990     + spin_unlock_irqrestore(&hw->lock, flags);
3991     }
3992    
3993     /**
3994     diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
3995     index 0847e682797b..633effb09c9c 100644
3996     --- a/drivers/scsi/hisi_sas/hisi_sas_main.c
3997     +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
3998     @@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
3999     dev = hisi_hba->dev;
4000    
4001     if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
4002     - if (in_softirq())
4003     + /*
4004     + * For IOs from upper layer, it may already disable preempt
4005     + * in the IO path, if disable preempt again in down(),
4006     + * function schedule() will report schedule_bug(), so check
4007     + * preemptible() before goto down().
4008     + */
4009     + if (!preemptible())
4010     return -EINVAL;
4011    
4012     down(&hisi_hba->sem);
4013     @@ -2676,6 +2682,7 @@ int hisi_sas_probe(struct platform_device *pdev,
4014     err_out_register_ha:
4015     scsi_remove_host(shost);
4016     err_out_ha:
4017     + hisi_sas_debugfs_exit(hisi_hba);
4018     hisi_sas_free(hisi_hba);
4019     scsi_host_put(shost);
4020     return rc;
4021     diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
4022     index cb8d087762db..ef32ee12f606 100644
4023     --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
4024     +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
4025     @@ -3259,6 +3259,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4026     err_out_register_ha:
4027     scsi_remove_host(shost);
4028     err_out_ha:
4029     + hisi_sas_debugfs_exit(hisi_hba);
4030     scsi_host_put(shost);
4031     err_out_regions:
4032     pci_release_regions(pdev);
4033     diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
4034     index 7bedbe877704..0bc63a7ab41c 100644
4035     --- a/drivers/scsi/iscsi_tcp.c
4036     +++ b/drivers/scsi/iscsi_tcp.c
4037     @@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
4038     {
4039     struct iscsi_conn *conn = task->conn;
4040     unsigned int noreclaim_flag;
4041     + struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
4042     + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
4043     int rc = 0;
4044    
4045     + if (!tcp_sw_conn->sock) {
4046     + iscsi_conn_printk(KERN_ERR, conn,
4047     + "Transport not bound to socket!\n");
4048     + return -EINVAL;
4049     + }
4050     +
4051     noreclaim_flag = memalloc_noreclaim_save();
4052    
4053     while (iscsi_sw_tcp_xmit_qlen(conn)) {
4054     diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
4055     index 25e86706e207..f883fac2d2b1 100644
4056     --- a/drivers/scsi/lpfc/lpfc_ct.c
4057     +++ b/drivers/scsi/lpfc/lpfc_ct.c
4058     @@ -1868,6 +1868,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4059     if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4060     switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
4061     case IOERR_SLI_ABORTED:
4062     + case IOERR_SLI_DOWN:
4063     + /* Driver aborted this IO. No retry as error
4064     + * is likely Offline->Online or some adapter
4065     + * error. Recovery will try again.
4066     + */
4067     + break;
4068     case IOERR_ABORT_IN_PROGRESS:
4069     case IOERR_SEQUENCE_TIMEOUT:
4070     case IOERR_ILLEGAL_FRAME:
4071     diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
4072     index d5303994bfd6..66f8867dd837 100644
4073     --- a/drivers/scsi/lpfc/lpfc_els.c
4074     +++ b/drivers/scsi/lpfc/lpfc_els.c
4075     @@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4076     struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4077     IOCB_t *irsp;
4078     struct lpfc_nodelist *ndlp;
4079     + char *mode;
4080    
4081     /* we pass cmdiocb to state machine which needs rspiocb as well */
4082     cmdiocb->context_un.rsp_iocb = rspiocb;
4083     @@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4084     goto out;
4085     }
4086    
4087     + /* If we don't send GFT_ID to Fabric, a PRLI error
4088     + * could be expected.
4089     + */
4090     + if ((vport->fc_flag & FC_FABRIC) ||
4091     + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
4092     + mode = KERN_ERR;
4093     + else
4094     + mode = KERN_INFO;
4095     +
4096     /* PRLI failed */
4097     - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4098     + lpfc_printf_vlog(vport, mode, LOG_ELS,
4099     "2754 PRLI failure DID:%06X Status:x%x/x%x, "
4100     "data: x%x\n",
4101     ndlp->nlp_DID, irsp->ulpStatus,
4102     @@ -4430,7 +4440,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4103     mempool_free(mbox, phba->mbox_mem_pool);
4104     }
4105     out:
4106     - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
4107     + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
4108     spin_lock_irq(shost->host_lock);
4109     ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
4110     spin_unlock_irq(shost->host_lock);
4111     @@ -6455,7 +6465,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4112     uint32_t payload_len, length, nportid, *cmd;
4113     int rscn_cnt;
4114     int rscn_id = 0, hba_id = 0;
4115     - int i;
4116     + int i, tmo;
4117    
4118     pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4119     lp = (uint32_t *) pcmd->virt;
4120     @@ -6561,6 +6571,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4121    
4122     spin_lock_irq(shost->host_lock);
4123     vport->fc_flag |= FC_RSCN_DEFERRED;
4124     +
4125     + /* Restart disctmo if its already running */
4126     + if (vport->fc_flag & FC_DISC_TMO) {
4127     + tmo = ((phba->fc_ratov * 3) + 3);
4128     + mod_timer(&vport->fc_disctmo,
4129     + jiffies + msecs_to_jiffies(1000 * tmo));
4130     + }
4131     if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4132     !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4133     vport->fc_flag |= FC_RSCN_MODE;
4134     @@ -7986,20 +8003,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
4135     struct lpfc_sli_ring *pring;
4136     struct lpfc_iocbq *tmp_iocb, *piocb;
4137     IOCB_t *cmd = NULL;
4138     + unsigned long iflags = 0;
4139    
4140     lpfc_fabric_abort_vport(vport);
4141     +
4142     /*
4143     * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
4144     * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
4145     * ultimately grabs the ring_lock, the driver must splice the list into
4146     * a working list and release the locks before calling the abort.
4147     */
4148     - spin_lock_irq(&phba->hbalock);
4149     + spin_lock_irqsave(&phba->hbalock, iflags);
4150     pring = lpfc_phba_elsring(phba);
4151    
4152     /* Bail out if we've no ELS wq, like in PCI error recovery case. */
4153     if (unlikely(!pring)) {
4154     - spin_unlock_irq(&phba->hbalock);
4155     + spin_unlock_irqrestore(&phba->hbalock, iflags);
4156     return;
4157     }
4158    
4159     @@ -8014,6 +8033,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
4160     if (piocb->vport != vport)
4161     continue;
4162    
4163     + if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
4164     + continue;
4165     +
4166     /* On the ELS ring we can have ELS_REQUESTs or
4167     * GEN_REQUESTs waiting for a response.
4168     */
4169     @@ -8037,21 +8059,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
4170    
4171     if (phba->sli_rev == LPFC_SLI_REV4)
4172     spin_unlock(&pring->ring_lock);
4173     - spin_unlock_irq(&phba->hbalock);
4174     + spin_unlock_irqrestore(&phba->hbalock, iflags);
4175    
4176     /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
4177     list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
4178     - spin_lock_irq(&phba->hbalock);
4179     + spin_lock_irqsave(&phba->hbalock, iflags);
4180     list_del_init(&piocb->dlist);
4181     lpfc_sli_issue_abort_iotag(phba, pring, piocb);
4182     - spin_unlock_irq(&phba->hbalock);
4183     + spin_unlock_irqrestore(&phba->hbalock, iflags);
4184     }
4185     if (!list_empty(&abort_list))
4186     lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4187     "3387 abort list for txq not empty\n");
4188     INIT_LIST_HEAD(&abort_list);
4189    
4190     - spin_lock_irq(&phba->hbalock);
4191     + spin_lock_irqsave(&phba->hbalock, iflags);
4192     if (phba->sli_rev == LPFC_SLI_REV4)
4193     spin_lock(&pring->ring_lock);
4194    
4195     @@ -8091,7 +8113,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
4196    
4197     if (phba->sli_rev == LPFC_SLI_REV4)
4198     spin_unlock(&pring->ring_lock);
4199     - spin_unlock_irq(&phba->hbalock);
4200     + spin_unlock_irqrestore(&phba->hbalock, iflags);
4201    
4202     /* Cancel all the IOCBs from the completions list */
4203     lpfc_sli_cancel_iocbs(phba, &abort_list,
4204     diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
4205     index 749286acdc17..1286c658ba34 100644
4206     --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
4207     +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
4208     @@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
4209     if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
4210     set_bit(LPFC_DATA_READY, &phba->data_flags);
4211     } else {
4212     - if (phba->link_state >= LPFC_LINK_UP ||
4213     + /* Driver could have abort request completed in queue
4214     + * when link goes down. Allow for this transition.
4215     + */
4216     + if (phba->link_state >= LPFC_LINK_DOWN ||
4217     phba->link_flag & LS_MDS_LOOPBACK) {
4218     pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
4219     lpfc_sli_handle_slow_ring_event(phba, pring,
4220     @@ -5405,9 +5408,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
4221     /* If we've already received a PLOGI from this NPort
4222     * we don't need to try to discover it again.
4223     */
4224     - if (ndlp->nlp_flag & NLP_RCV_PLOGI)
4225     + if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
4226     + !(ndlp->nlp_type &
4227     + (NLP_FCP_TARGET | NLP_NVME_TARGET)))
4228     return NULL;
4229    
4230     + ndlp->nlp_prev_state = ndlp->nlp_state;
4231     + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4232     +
4233     spin_lock_irq(shost->host_lock);
4234     ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4235     spin_unlock_irq(shost->host_lock);
4236     diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
4237     index fc6e4546d738..696171382558 100644
4238     --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
4239     +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
4240     @@ -484,8 +484,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4241     * single discovery thread, this will cause a huge delay in
4242     * discovery. Also this will cause multiple state machines
4243     * running in parallel for this node.
4244     + * This only applies to a fabric environment.
4245     */
4246     - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
4247     + if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
4248     + (vport->fc_flag & FC_FABRIC)) {
4249     /* software abort outstanding PLOGI */
4250     lpfc_els_abort(phba, ndlp);
4251     }
4252     diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
4253     index ad8ef67a1db3..aa82d538a18a 100644
4254     --- a/drivers/scsi/lpfc/lpfc_scsi.c
4255     +++ b/drivers/scsi/lpfc/lpfc_scsi.c
4256     @@ -4846,20 +4846,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4257     ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4258     abtsiocb, 0);
4259     }
4260     - /* no longer need the lock after this point */
4261     - spin_unlock_irqrestore(&phba->hbalock, flags);
4262    
4263     if (ret_val == IOCB_ERROR) {
4264     /* Indicate the IO is not being aborted by the driver. */
4265     iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4266     lpfc_cmd->waitq = NULL;
4267     spin_unlock(&lpfc_cmd->buf_lock);
4268     + spin_unlock_irqrestore(&phba->hbalock, flags);
4269     lpfc_sli_release_iocbq(phba, abtsiocb);
4270     ret = FAILED;
4271     goto out;
4272     }
4273    
4274     + /* no longer need the lock after this point */
4275     spin_unlock(&lpfc_cmd->buf_lock);
4276     + spin_unlock_irqrestore(&phba->hbalock, flags);
4277    
4278     if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4279     lpfc_sli_handle_fast_ring_event(phba,
4280     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
4281     index 5ed4219675eb..2b0e7b32c2df 100644
4282     --- a/drivers/scsi/lpfc/lpfc_sli.c
4283     +++ b/drivers/scsi/lpfc/lpfc_sli.c
4284     @@ -11050,9 +11050,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4285     irsp->ulpStatus, irsp->un.ulpWord[4]);
4286    
4287     spin_unlock_irq(&phba->hbalock);
4288     - if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
4289     - irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
4290     - lpfc_sli_release_iocbq(phba, abort_iocb);
4291     }
4292     release_iocb:
4293     lpfc_sli_release_iocbq(phba, cmdiocb);
4294     @@ -13161,13 +13158,19 @@ send_current_mbox:
4295     phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4296     /* Setting active mailbox pointer need to be in sync to flag clear */
4297     phba->sli.mbox_active = NULL;
4298     + if (bf_get(lpfc_trailer_consumed, mcqe))
4299     + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
4300     spin_unlock_irqrestore(&phba->hbalock, iflags);
4301     /* Wake up worker thread to post the next pending mailbox command */
4302     lpfc_worker_wake_up(phba);
4303     + return workposted;
4304     +
4305     out_no_mqe_complete:
4306     + spin_lock_irqsave(&phba->hbalock, iflags);
4307     if (bf_get(lpfc_trailer_consumed, mcqe))
4308     lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
4309     - return workposted;
4310     + spin_unlock_irqrestore(&phba->hbalock, iflags);
4311     + return false;
4312     }
4313    
4314     /**
4315     @@ -18184,6 +18187,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
4316     static void
4317     __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
4318     {
4319     + /*
4320     + * if the rpi value indicates a prior unreg has already
4321     + * been done, skip the unreg.
4322     + */
4323     + if (rpi == LPFC_RPI_ALLOC_ERROR)
4324     + return;
4325     +
4326     if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
4327     phba->sli4_hba.rpi_count--;
4328     phba->sli4_hba.max_cfg_param.rpi_used--;
4329     diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
4330     index 9c5566217ef6..b5dde9d0d054 100644
4331     --- a/drivers/scsi/mac_scsi.c
4332     +++ b/drivers/scsi/mac_scsi.c
4333     @@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
4334     mac_scsi_template.can_queue = setup_can_queue;
4335     if (setup_cmd_per_lun > 0)
4336     mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
4337     - if (setup_sg_tablesize >= 0)
4338     + if (setup_sg_tablesize > 0)
4339     mac_scsi_template.sg_tablesize = setup_sg_tablesize;
4340     if (setup_hostid >= 0)
4341     mac_scsi_template.this_id = setup_hostid & 7;
4342     diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
4343     index 7d696952b376..b95f7d062ea4 100644
4344     --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
4345     +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
4346     @@ -778,6 +778,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
4347     case MPI2_FUNCTION_NVME_ENCAPSULATED:
4348     {
4349     nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
4350     + if (!ioc->pcie_sg_lookup) {
4351     + dtmprintk(ioc, ioc_info(ioc,
4352     + "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
4353     + ));
4354     +
4355     + if (ioc->logging_level & MPT_DEBUG_TM)
4356     + _debug_dump_mf(nvme_encap_request,
4357     + ioc->request_sz/4);
4358     + mpt3sas_base_free_smid(ioc, smid);
4359     + ret = -EINVAL;
4360     + goto out;
4361     + }
4362     /*
4363     * Get the Physical Address of the sense buffer.
4364     * Use Error Response buffer address field to hold the sense
4365     @@ -1584,7 +1596,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
4366     ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
4367     __func__, request_data_sz);
4368     mpt3sas_base_free_smid(ioc, smid);
4369     - return -ENOMEM;
4370     + rc = -ENOMEM;
4371     + goto out;
4372     }
4373     ioc->diag_buffer[buffer_type] = request_data;
4374     ioc->diag_buffer_sz[buffer_type] = request_data_sz;
4375     diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
4376     index 73261902d75d..161bf4760eac 100644
4377     --- a/drivers/scsi/pm8001/pm80xx_hwi.c
4378     +++ b/drivers/scsi/pm8001/pm80xx_hwi.c
4379     @@ -2382,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
4380     pm8001_printk("task 0x%p done with io_status 0x%x"
4381     " resp 0x%x stat 0x%x but aborted by upper layer!\n",
4382     t, status, ts->resp, ts->stat));
4383     + if (t->slow_task)
4384     + complete(&t->slow_task->completion);
4385     pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
4386     } else {
4387     spin_unlock_irqrestore(&t->task_state_lock, flags);
4388     diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
4389     index d323523f5f9d..32965ec76965 100644
4390     --- a/drivers/scsi/scsi_debug.c
4391     +++ b/drivers/scsi/scsi_debug.c
4392     @@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
4393     return -EINVAL;
4394     }
4395    
4396     + if (sdebug_num_tgts < 0) {
4397     + pr_err("num_tgts must be >= 0\n");
4398     + return -EINVAL;
4399     + }
4400     +
4401     if (sdebug_guard > 1) {
4402     pr_err("guard must be 0 or 1\n");
4403     return -EINVAL;
4404     diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
4405     index 0f17e7dac1b0..07a2425ffa2c 100644
4406     --- a/drivers/scsi/scsi_trace.c
4407     +++ b/drivers/scsi/scsi_trace.c
4408     @@ -18,15 +18,18 @@ static const char *
4409     scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
4410     {
4411     const char *ret = trace_seq_buffer_ptr(p);
4412     - sector_t lba = 0, txlen = 0;
4413     + u32 lba = 0, txlen;
4414    
4415     lba |= ((cdb[1] & 0x1F) << 16);
4416     lba |= (cdb[2] << 8);
4417     lba |= cdb[3];
4418     - txlen = cdb[4];
4419     + /*
4420     + * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
4421     + * logical blocks shall be read (READ(6)) or written (WRITE(6)).
4422     + */
4423     + txlen = cdb[4] ? cdb[4] : 256;
4424    
4425     - trace_seq_printf(p, "lba=%llu txlen=%llu",
4426     - (unsigned long long)lba, (unsigned long long)txlen);
4427     + trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
4428     trace_seq_putc(p, 0);
4429    
4430     return ret;
4431     diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
4432     index 955e4c938d49..701b842296f0 100644
4433     --- a/drivers/scsi/sun3_scsi.c
4434     +++ b/drivers/scsi/sun3_scsi.c
4435     @@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
4436     .eh_host_reset_handler = sun3scsi_host_reset,
4437     .can_queue = 16,
4438     .this_id = 7,
4439     - .sg_tablesize = SG_NONE,
4440     + .sg_tablesize = 1,
4441     .cmd_per_lun = 2,
4442     .dma_boundary = PAGE_SIZE - 1,
4443     .cmd_size = NCR5380_CMD_SIZE,
4444     @@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
4445     sun3_scsi_template.can_queue = setup_can_queue;
4446     if (setup_cmd_per_lun > 0)
4447     sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
4448     - if (setup_sg_tablesize >= 0)
4449     + if (setup_sg_tablesize > 0)
4450     sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
4451     if (setup_hostid >= 0)
4452     sun3_scsi_template.this_id = setup_hostid & 7;
4453     diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
4454     index 969a36b15897..ad2abc96c0f1 100644
4455     --- a/drivers/scsi/ufs/ufs-sysfs.c
4456     +++ b/drivers/scsi/ufs/ufs-sysfs.c
4457     @@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4458     return;
4459    
4460     spin_lock_irqsave(hba->host->host_lock, flags);
4461     - if (hba->ahit == ahit)
4462     - goto out_unlock;
4463     - hba->ahit = ahit;
4464     - if (!pm_runtime_suspended(hba->dev))
4465     - ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4466     -out_unlock:
4467     + if (hba->ahit != ahit)
4468     + hba->ahit = ahit;
4469     spin_unlock_irqrestore(hba->host->host_lock, flags);
4470     + if (!pm_runtime_suspended(hba->dev)) {
4471     + pm_runtime_get_sync(hba->dev);
4472     + ufshcd_hold(hba, false);
4473     + ufshcd_auto_hibern8_enable(hba);
4474     + ufshcd_release(hba);
4475     + pm_runtime_put(hba->dev);
4476     + }
4477     }
4478    
4479     /* Convert Auto-Hibernate Idle Timer register value to microseconds */
4480     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
4481     index 11a87f51c442..25a6a25b17a2 100644
4482     --- a/drivers/scsi/ufs/ufshcd.c
4483     +++ b/drivers/scsi/ufs/ufshcd.c
4484     @@ -2986,10 +2986,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
4485     goto out_unlock;
4486     }
4487    
4488     - hba->dev_cmd.query.descriptor = NULL;
4489     *buf_len = be16_to_cpu(response->upiu_res.length);
4490    
4491     out_unlock:
4492     + hba->dev_cmd.query.descriptor = NULL;
4493     mutex_unlock(&hba->dev_cmd.lock);
4494     out:
4495     ufshcd_release(hba);
4496     @@ -3885,15 +3885,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4497     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4498    
4499     if (ret) {
4500     + int err;
4501     +
4502     dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4503     __func__, ret);
4504    
4505     /*
4506     - * If link recovery fails then return error so that caller
4507     - * don't retry the hibern8 enter again.
4508     + * If link recovery fails then return error code returned from
4509     + * ufshcd_link_recovery().
4510     + * If link recovery succeeds then return -EAGAIN to attempt
4511     + * hibern8 enter retry again.
4512     */
4513     - if (ufshcd_link_recovery(hba))
4514     - ret = -ENOLINK;
4515     + err = ufshcd_link_recovery(hba);
4516     + if (err) {
4517     + dev_err(hba->dev, "%s: link recovery failed", __func__);
4518     + ret = err;
4519     + } else {
4520     + ret = -EAGAIN;
4521     + }
4522     } else
4523     ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4524     POST_CHANGE);
4525     @@ -3907,7 +3916,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4526    
4527     for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
4528     ret = __ufshcd_uic_hibern8_enter(hba);
4529     - if (!ret || ret == -ENOLINK)
4530     + if (!ret)
4531     goto out;
4532     }
4533     out:
4534     @@ -3941,7 +3950,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4535     return ret;
4536     }
4537    
4538     -static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4539     +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4540     {
4541     unsigned long flags;
4542    
4543     @@ -6881,9 +6890,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4544     /* UniPro link is active now */
4545     ufshcd_set_link_active(hba);
4546    
4547     - /* Enable Auto-Hibernate if configured */
4548     - ufshcd_auto_hibern8_enable(hba);
4549     -
4550     ret = ufshcd_verify_dev_init(hba);
4551     if (ret)
4552     goto out;
4553     @@ -6934,6 +6940,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4554     /* set the state as operational after switching to desired gear */
4555     hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4556    
4557     + /* Enable Auto-Hibernate if configured */
4558     + ufshcd_auto_hibern8_enable(hba);
4559     +
4560     /*
4561     * If we are in error handling context or in power management callbacks
4562     * context, no need to scan the host
4563     @@ -7950,12 +7959,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4564     if (hba->clk_scaling.is_allowed)
4565     ufshcd_resume_clkscaling(hba);
4566    
4567     - /* Schedule clock gating in case of no access to UFS device yet */
4568     - ufshcd_release(hba);
4569     -
4570     /* Enable Auto-Hibernate if configured */
4571     ufshcd_auto_hibern8_enable(hba);
4572    
4573     + /* Schedule clock gating in case of no access to UFS device yet */
4574     + ufshcd_release(hba);
4575     +
4576     goto out;
4577    
4578     set_old_link_state:
4579     diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
4580     index c94cfda52829..52c9676a1242 100644
4581     --- a/drivers/scsi/ufs/ufshcd.h
4582     +++ b/drivers/scsi/ufs/ufshcd.h
4583     @@ -916,6 +916,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
4584     int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
4585     enum flag_idn idn, bool *flag_res);
4586    
4587     +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
4588     +
4589     #define SD_ASCII_STD true
4590     #define SD_RAW false
4591     int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
4592     diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
4593     index ca8e3abeb2c7..a23a8e5794f5 100644
4594     --- a/drivers/scsi/zorro_esp.c
4595     +++ b/drivers/scsi/zorro_esp.c
4596     @@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
4597     static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
4598     u32 dma_len)
4599     {
4600     - return dma_len > 0xFFFF ? 0xFFFF : dma_len;
4601     + return dma_len > (1U << 16) ? (1U << 16) : dma_len;
4602     +}
4603     +
4604     +static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
4605     + u32 dma_len)
4606     +{
4607     + /* The old driver used 0xfffc as limit, so do that here too */
4608     + return dma_len > 0xfffc ? 0xfffc : dma_len;
4609     }
4610    
4611     static void zorro_esp_reset_dma(struct esp *esp)
4612     @@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
4613     .esp_write8 = zorro_esp_write8,
4614     .esp_read8 = zorro_esp_read8,
4615     .irq_pending = fastlane_esp_irq_pending,
4616     - .dma_length_limit = zorro_esp_dma_length_limit,
4617     + .dma_length_limit = fastlane_esp_dma_length_limit,
4618     .reset_dma = zorro_esp_reset_dma,
4619     .dma_drain = zorro_esp_dma_drain,
4620     .dma_invalidate = fastlane_esp_dma_invalidate,
4621     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
4622     index d19e051f2bc2..f194ffc4699e 100644
4623     --- a/drivers/target/iscsi/iscsi_target.c
4624     +++ b/drivers/target/iscsi/iscsi_target.c
4625     @@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
4626     hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
4627     conn->cid);
4628    
4629     - target_get_sess_cmd(&cmd->se_cmd, true);
4630     + if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
4631     + return iscsit_add_reject_cmd(cmd,
4632     + ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
4633    
4634     cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
4635     scsilun_to_int(&hdr->lun));
4636     @@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
4637     conn->sess->se_sess, 0, DMA_NONE,
4638     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
4639    
4640     - target_get_sess_cmd(&cmd->se_cmd, true);
4641     + if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
4642     + return iscsit_add_reject_cmd(cmd,
4643     + ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
4644    
4645     /*
4646     * TASK_REASSIGN for ERL=2 / connection stays inside of
4647     @@ -4232,6 +4236,8 @@ int iscsit_close_connection(
4648     * must wait until they have completed.
4649     */
4650     iscsit_check_conn_usage_count(conn);
4651     + target_sess_cmd_list_set_waiting(sess->se_sess);
4652     + target_wait_for_sess_cmds(sess->se_sess);
4653    
4654     ahash_request_free(conn->conn_tx_hash);
4655     if (conn->conn_rx_hash) {
4656     diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
4657     index 51ddca2033e0..8fe9b12a07a4 100644
4658     --- a/drivers/target/iscsi/iscsi_target_auth.c
4659     +++ b/drivers/target/iscsi/iscsi_target_auth.c
4660     @@ -70,7 +70,7 @@ static int chap_check_algorithm(const char *a_str)
4661     if (!token)
4662     goto out;
4663    
4664     - if (!strncmp(token, "5", 1)) {
4665     + if (!strcmp(token, "5")) {
4666     pr_debug("Selected MD5 Algorithm\n");
4667     kfree(orig);
4668     return CHAP_DIGEST_MD5;
4669     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
4670     index 7f06a62f8661..eda8b4736c15 100644
4671     --- a/drivers/target/target_core_transport.c
4672     +++ b/drivers/target/target_core_transport.c
4673     @@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
4674     }
4675     EXPORT_SYMBOL(transport_free_session);
4676    
4677     +static int target_release_res(struct se_device *dev, void *data)
4678     +{
4679     + struct se_session *sess = data;
4680     +
4681     + if (dev->reservation_holder == sess)
4682     + target_release_reservation(dev);
4683     + return 0;
4684     +}
4685     +
4686     void transport_deregister_session(struct se_session *se_sess)
4687     {
4688     struct se_portal_group *se_tpg = se_sess->se_tpg;
4689     @@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
4690     se_sess->fabric_sess_ptr = NULL;
4691     spin_unlock_irqrestore(&se_tpg->session_lock, flags);
4692    
4693     + /*
4694     + * Since the session is being removed, release SPC-2
4695     + * reservations held by the session that is disappearing.
4696     + */
4697     + target_for_each_device(target_release_res, se_sess);
4698     +
4699     pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
4700     se_tpg->se_tpg_tfo->fabric_name);
4701     /*
4702     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
4703     index 9f57736fe15e..88a5aa6624b4 100644
4704     --- a/drivers/vhost/vsock.c
4705     +++ b/drivers/vhost/vsock.c
4706     @@ -437,7 +437,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
4707     virtio_transport_deliver_tap_pkt(pkt);
4708    
4709     /* Only accept correctly addressed packets */
4710     - if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
4711     + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
4712     + le64_to_cpu(pkt->hdr.dst_cid) ==
4713     + vhost_transport_get_local_cid())
4714     virtio_transport_recv_pkt(pkt);
4715     else
4716     virtio_transport_free_pkt(pkt);
4717     diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
4718     index 5ce51026989a..ba5d535a6db2 100644
4719     --- a/drivers/watchdog/imx7ulp_wdt.c
4720     +++ b/drivers/watchdog/imx7ulp_wdt.c
4721     @@ -106,12 +106,28 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
4722     return 0;
4723     }
4724    
4725     +static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
4726     + unsigned long action, void *data)
4727     +{
4728     + struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
4729     +
4730     + imx7ulp_wdt_enable(wdt->base, true);
4731     + imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
4732     +
4733     + /* wait for wdog to fire */
4734     + while (true)
4735     + ;
4736     +
4737     + return NOTIFY_DONE;
4738     +}
4739     +
4740     static const struct watchdog_ops imx7ulp_wdt_ops = {
4741     .owner = THIS_MODULE,
4742     .start = imx7ulp_wdt_start,
4743     .stop = imx7ulp_wdt_stop,
4744     .ping = imx7ulp_wdt_ping,
4745     .set_timeout = imx7ulp_wdt_set_timeout,
4746     + .restart = imx7ulp_wdt_restart,
4747     };
4748    
4749     static const struct watchdog_info imx7ulp_wdt_info = {
4750     diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
4751     index dbd2ad4c9294..62483a99105c 100644
4752     --- a/drivers/watchdog/watchdog_dev.c
4753     +++ b/drivers/watchdog/watchdog_dev.c
4754     @@ -34,7 +34,6 @@
4755     #include <linux/init.h> /* For __init/__exit/... */
4756     #include <linux/hrtimer.h> /* For hrtimers */
4757     #include <linux/kernel.h> /* For printk/panic/... */
4758     -#include <linux/kref.h> /* For data references */
4759     #include <linux/kthread.h> /* For kthread_work */
4760     #include <linux/miscdevice.h> /* For handling misc devices */
4761     #include <linux/module.h> /* For module stuff/... */
4762     @@ -52,14 +51,14 @@
4763    
4764     /*
4765     * struct watchdog_core_data - watchdog core internal data
4766     - * @kref: Reference count.
4767     + * @dev: The watchdog's internal device
4768     * @cdev: The watchdog's Character device.
4769     * @wdd: Pointer to watchdog device.
4770     * @lock: Lock for watchdog core.
4771     * @status: Watchdog core internal status bits.
4772     */
4773     struct watchdog_core_data {
4774     - struct kref kref;
4775     + struct device dev;
4776     struct cdev cdev;
4777     struct watchdog_device *wdd;
4778     struct mutex lock;
4779     @@ -158,7 +157,8 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd)
4780     ktime_t t = watchdog_next_keepalive(wdd);
4781    
4782     if (t > 0)
4783     - hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
4784     + hrtimer_start(&wd_data->timer, t,
4785     + HRTIMER_MODE_REL_HARD);
4786     } else {
4787     hrtimer_cancel(&wd_data->timer);
4788     }
4789     @@ -177,7 +177,7 @@ static int __watchdog_ping(struct watchdog_device *wdd)
4790     if (ktime_after(earliest_keepalive, now)) {
4791     hrtimer_start(&wd_data->timer,
4792     ktime_sub(earliest_keepalive, now),
4793     - HRTIMER_MODE_REL);
4794     + HRTIMER_MODE_REL_HARD);
4795     return 0;
4796     }
4797    
4798     @@ -839,7 +839,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
4799     file->private_data = wd_data;
4800    
4801     if (!hw_running)
4802     - kref_get(&wd_data->kref);
4803     + get_device(&wd_data->dev);
4804    
4805     /*
4806     * open_timeout only applies for the first open from
4807     @@ -860,11 +860,11 @@ out_clear:
4808     return err;
4809     }
4810    
4811     -static void watchdog_core_data_release(struct kref *kref)
4812     +static void watchdog_core_data_release(struct device *dev)
4813     {
4814     struct watchdog_core_data *wd_data;
4815    
4816     - wd_data = container_of(kref, struct watchdog_core_data, kref);
4817     + wd_data = container_of(dev, struct watchdog_core_data, dev);
4818    
4819     kfree(wd_data);
4820     }
4821     @@ -924,7 +924,7 @@ done:
4822     */
4823     if (!running) {
4824     module_put(wd_data->cdev.owner);
4825     - kref_put(&wd_data->kref, watchdog_core_data_release);
4826     + put_device(&wd_data->dev);
4827     }
4828     return 0;
4829     }
4830     @@ -943,17 +943,22 @@ static struct miscdevice watchdog_miscdev = {
4831     .fops = &watchdog_fops,
4832     };
4833    
4834     +static struct class watchdog_class = {
4835     + .name = "watchdog",
4836     + .owner = THIS_MODULE,
4837     + .dev_groups = wdt_groups,
4838     +};
4839     +
4840     /*
4841     * watchdog_cdev_register: register watchdog character device
4842     * @wdd: watchdog device
4843     - * @devno: character device number
4844     *
4845     * Register a watchdog character device including handling the legacy
4846     * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
4847     * thus we set it up like that.
4848     */
4849    
4850     -static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
4851     +static int watchdog_cdev_register(struct watchdog_device *wdd)
4852     {
4853     struct watchdog_core_data *wd_data;
4854     int err;
4855     @@ -961,7 +966,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
4856     wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
4857     if (!wd_data)
4858     return -ENOMEM;
4859     - kref_init(&wd_data->kref);
4860     mutex_init(&wd_data->lock);
4861    
4862     wd_data->wdd = wdd;
4863     @@ -971,7 +975,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
4864     return -ENODEV;
4865    
4866     kthread_init_work(&wd_data->work, watchdog_ping_work);
4867     - hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4868     + hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
4869     wd_data->timer.function = watchdog_timer_expired;
4870    
4871     if (wdd->id == 0) {
4872     @@ -990,23 +994,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
4873     }
4874     }
4875    
4876     + device_initialize(&wd_data->dev);
4877     + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
4878     + wd_data->dev.class = &watchdog_class;
4879     + wd_data->dev.parent = wdd->parent;
4880     + wd_data->dev.groups = wdd->groups;
4881     + wd_data->dev.release = watchdog_core_data_release;
4882     + dev_set_drvdata(&wd_data->dev, wdd);
4883     + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
4884     +
4885     /* Fill in the data structures */
4886     cdev_init(&wd_data->cdev, &watchdog_fops);
4887     - wd_data->cdev.owner = wdd->ops->owner;
4888    
4889     /* Add the device */
4890     - err = cdev_add(&wd_data->cdev, devno, 1);
4891     + err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
4892     if (err) {
4893     pr_err("watchdog%d unable to add device %d:%d\n",
4894     wdd->id, MAJOR(watchdog_devt), wdd->id);
4895     if (wdd->id == 0) {
4896     misc_deregister(&watchdog_miscdev);
4897     old_wd_data = NULL;
4898     - kref_put(&wd_data->kref, watchdog_core_data_release);
4899     + put_device(&wd_data->dev);
4900     }
4901     return err;
4902     }
4903    
4904     + wd_data->cdev.owner = wdd->ops->owner;
4905     +
4906     /* Record time of most recent heartbeat as 'just before now'. */
4907     wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
4908     watchdog_set_open_deadline(wd_data);
4909     @@ -1017,9 +1031,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
4910     */
4911     if (watchdog_hw_running(wdd)) {
4912     __module_get(wdd->ops->owner);
4913     - kref_get(&wd_data->kref);
4914     + get_device(&wd_data->dev);
4915     if (handle_boot_enabled)
4916     - hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
4917     + hrtimer_start(&wd_data->timer, 0,
4918     + HRTIMER_MODE_REL_HARD);
4919     else
4920     pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
4921     wdd->id);
4922     @@ -1040,7 +1055,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
4923     {
4924     struct watchdog_core_data *wd_data = wdd->wd_data;
4925    
4926     - cdev_del(&wd_data->cdev);
4927     + cdev_device_del(&wd_data->cdev, &wd_data->dev);
4928     if (wdd->id == 0) {
4929     misc_deregister(&watchdog_miscdev);
4930     old_wd_data = NULL;
4931     @@ -1059,15 +1074,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
4932     hrtimer_cancel(&wd_data->timer);
4933     kthread_cancel_work_sync(&wd_data->work);
4934    
4935     - kref_put(&wd_data->kref, watchdog_core_data_release);
4936     + put_device(&wd_data->dev);
4937     }
4938    
4939     -static struct class watchdog_class = {
4940     - .name = "watchdog",
4941     - .owner = THIS_MODULE,
4942     - .dev_groups = wdt_groups,
4943     -};
4944     -
4945     static int watchdog_reboot_notifier(struct notifier_block *nb,
4946     unsigned long code, void *data)
4947     {
4948     @@ -1098,27 +1107,14 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
4949    
4950     int watchdog_dev_register(struct watchdog_device *wdd)
4951     {
4952     - struct device *dev;
4953     - dev_t devno;
4954     int ret;
4955    
4956     - devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
4957     -
4958     - ret = watchdog_cdev_register(wdd, devno);
4959     + ret = watchdog_cdev_register(wdd);
4960     if (ret)
4961     return ret;
4962    
4963     - dev = device_create_with_groups(&watchdog_class, wdd->parent,
4964     - devno, wdd, wdd->groups,
4965     - "watchdog%d", wdd->id);
4966     - if (IS_ERR(dev)) {
4967     - watchdog_cdev_unregister(wdd);
4968     - return PTR_ERR(dev);
4969     - }
4970     -
4971     ret = watchdog_register_pretimeout(wdd);
4972     if (ret) {
4973     - device_destroy(&watchdog_class, devno);
4974     watchdog_cdev_unregister(wdd);
4975     return ret;
4976     }
4977     @@ -1126,7 +1122,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
4978     if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
4979     wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
4980    
4981     - ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
4982     + ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
4983     + &wdd->reboot_nb);
4984     if (ret) {
4985     pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
4986     wdd->id, ret);
4987     @@ -1148,7 +1145,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
4988     void watchdog_dev_unregister(struct watchdog_device *wdd)
4989     {
4990     watchdog_unregister_pretimeout(wdd);
4991     - device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
4992     watchdog_cdev_unregister(wdd);
4993     }
4994    
4995     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
4996     index 1a135d1b85bd..07d8ace61f77 100644
4997     --- a/fs/cifs/cifsfs.c
4998     +++ b/fs/cifs/cifsfs.c
4999     @@ -119,6 +119,7 @@ extern mempool_t *cifs_mid_poolp;
5000    
5001     struct workqueue_struct *cifsiod_wq;
5002     struct workqueue_struct *decrypt_wq;
5003     +struct workqueue_struct *fileinfo_put_wq;
5004     struct workqueue_struct *cifsoplockd_wq;
5005     __u32 cifs_lock_secret;
5006    
5007     @@ -1554,11 +1555,18 @@ init_cifs(void)
5008     goto out_destroy_cifsiod_wq;
5009     }
5010    
5011     + fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
5012     + WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
5013     + if (!fileinfo_put_wq) {
5014     + rc = -ENOMEM;
5015     + goto out_destroy_decrypt_wq;
5016     + }
5017     +
5018     cifsoplockd_wq = alloc_workqueue("cifsoplockd",
5019     WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
5020     if (!cifsoplockd_wq) {
5021     rc = -ENOMEM;
5022     - goto out_destroy_decrypt_wq;
5023     + goto out_destroy_fileinfo_put_wq;
5024     }
5025    
5026     rc = cifs_fscache_register();
5027     @@ -1624,6 +1632,8 @@ out_unreg_fscache:
5028     cifs_fscache_unregister();
5029     out_destroy_cifsoplockd_wq:
5030     destroy_workqueue(cifsoplockd_wq);
5031     +out_destroy_fileinfo_put_wq:
5032     + destroy_workqueue(fileinfo_put_wq);
5033     out_destroy_decrypt_wq:
5034     destroy_workqueue(decrypt_wq);
5035     out_destroy_cifsiod_wq:
5036     @@ -1653,6 +1663,7 @@ exit_cifs(void)
5037     cifs_fscache_unregister();
5038     destroy_workqueue(cifsoplockd_wq);
5039     destroy_workqueue(decrypt_wq);
5040     + destroy_workqueue(fileinfo_put_wq);
5041     destroy_workqueue(cifsiod_wq);
5042     cifs_proc_clean();
5043     }
5044     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5045     index 5d2dd04b55a6..f55e53486e74 100644
5046     --- a/fs/cifs/cifsglob.h
5047     +++ b/fs/cifs/cifsglob.h
5048     @@ -1265,6 +1265,7 @@ struct cifsFileInfo {
5049     struct mutex fh_mutex; /* prevents reopen race after dead ses*/
5050     struct cifs_search_info srch_inf;
5051     struct work_struct oplock_break; /* work for oplock breaks */
5052     + struct work_struct put; /* work for the final part of _put */
5053     };
5054    
5055     struct cifs_io_parms {
5056     @@ -1370,7 +1371,8 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
5057     }
5058    
5059     struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
5060     -void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
5061     +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
5062     + bool offload);
5063     void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
5064    
5065     #define CIFS_CACHE_READ_FLG 1
5066     @@ -1908,6 +1910,7 @@ void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
5067     extern const struct slow_work_ops cifs_oplock_break_ops;
5068     extern struct workqueue_struct *cifsiod_wq;
5069     extern struct workqueue_struct *decrypt_wq;
5070     +extern struct workqueue_struct *fileinfo_put_wq;
5071     extern struct workqueue_struct *cifsoplockd_wq;
5072     extern __u32 cifs_lock_secret;
5073    
5074     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5075     index 20c70cbab1ad..02451d085ddd 100644
5076     --- a/fs/cifs/connect.c
5077     +++ b/fs/cifs/connect.c
5078     @@ -387,7 +387,7 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
5079     #ifdef CONFIG_CIFS_DFS_UPCALL
5080     struct super_cb_data {
5081     struct TCP_Server_Info *server;
5082     - struct cifs_sb_info *cifs_sb;
5083     + struct super_block *sb;
5084     };
5085    
5086     /* These functions must be called with server->srv_mutex held */
5087     @@ -398,25 +398,39 @@ static void super_cb(struct super_block *sb, void *arg)
5088     struct cifs_sb_info *cifs_sb;
5089     struct cifs_tcon *tcon;
5090    
5091     - if (d->cifs_sb)
5092     + if (d->sb)
5093     return;
5094    
5095     cifs_sb = CIFS_SB(sb);
5096     tcon = cifs_sb_master_tcon(cifs_sb);
5097     if (tcon->ses->server == d->server)
5098     - d->cifs_sb = cifs_sb;
5099     + d->sb = sb;
5100     }
5101    
5102     -static inline struct cifs_sb_info *
5103     -find_super_by_tcp(struct TCP_Server_Info *server)
5104     +static struct super_block *get_tcp_super(struct TCP_Server_Info *server)
5105     {
5106     struct super_cb_data d = {
5107     .server = server,
5108     - .cifs_sb = NULL,
5109     + .sb = NULL,
5110     };
5111    
5112     iterate_supers_type(&cifs_fs_type, super_cb, &d);
5113     - return d.cifs_sb ? d.cifs_sb : ERR_PTR(-ENOENT);
5114     +
5115     + if (unlikely(!d.sb))
5116     + return ERR_PTR(-ENOENT);
5117     + /*
5118     + * Grab an active reference in order to prevent automounts (DFS links)
5119     + * of expiring and then freeing up our cifs superblock pointer while
5120     + * we're doing failover.
5121     + */
5122     + cifs_sb_active(d.sb);
5123     + return d.sb;
5124     +}
5125     +
5126     +static inline void put_tcp_super(struct super_block *sb)
5127     +{
5128     + if (!IS_ERR_OR_NULL(sb))
5129     + cifs_sb_deactive(sb);
5130     }
5131    
5132     static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
5133     @@ -480,6 +494,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
5134     struct mid_q_entry *mid_entry;
5135     struct list_head retry_list;
5136     #ifdef CONFIG_CIFS_DFS_UPCALL
5137     + struct super_block *sb = NULL;
5138     struct cifs_sb_info *cifs_sb = NULL;
5139     struct dfs_cache_tgt_list tgt_list = {0};
5140     struct dfs_cache_tgt_iterator *tgt_it = NULL;
5141     @@ -489,13 +504,15 @@ cifs_reconnect(struct TCP_Server_Info *server)
5142     server->nr_targets = 1;
5143     #ifdef CONFIG_CIFS_DFS_UPCALL
5144     spin_unlock(&GlobalMid_Lock);
5145     - cifs_sb = find_super_by_tcp(server);
5146     - if (IS_ERR(cifs_sb)) {
5147     - rc = PTR_ERR(cifs_sb);
5148     + sb = get_tcp_super(server);
5149     + if (IS_ERR(sb)) {
5150     + rc = PTR_ERR(sb);
5151     cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
5152     __func__, rc);
5153     - cifs_sb = NULL;
5154     + sb = NULL;
5155     } else {
5156     + cifs_sb = CIFS_SB(sb);
5157     +
5158     rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
5159     if (rc && (rc != -EOPNOTSUPP)) {
5160     cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
5161     @@ -512,6 +529,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
5162     /* the demux thread will exit normally
5163     next time through the loop */
5164     spin_unlock(&GlobalMid_Lock);
5165     +#ifdef CONFIG_CIFS_DFS_UPCALL
5166     + dfs_cache_free_tgts(&tgt_list);
5167     + put_tcp_super(sb);
5168     +#endif
5169     return rc;
5170     } else
5171     server->tcpStatus = CifsNeedReconnect;
5172     @@ -638,7 +659,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
5173     __func__, rc);
5174     }
5175     dfs_cache_free_tgts(&tgt_list);
5176     +
5177     }
5178     +
5179     + put_tcp_super(sb);
5180     #endif
5181     if (server->tcpStatus == CifsNeedNegotiate)
5182     mod_delayed_work(cifsiod_wq, &server->echo, 0);
5183     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5184     index c32650f14c9b..969543034b4d 100644
5185     --- a/fs/cifs/file.c
5186     +++ b/fs/cifs/file.c
5187     @@ -288,6 +288,8 @@ cifs_down_write(struct rw_semaphore *sem)
5188     msleep(10);
5189     }
5190    
5191     +static void cifsFileInfo_put_work(struct work_struct *work);
5192     +
5193     struct cifsFileInfo *
5194     cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
5195     struct tcon_link *tlink, __u32 oplock)
5196     @@ -322,6 +324,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
5197     cfile->invalidHandle = false;
5198     cfile->tlink = cifs_get_tlink(tlink);
5199     INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
5200     + INIT_WORK(&cfile->put, cifsFileInfo_put_work);
5201     mutex_init(&cfile->fh_mutex);
5202     spin_lock_init(&cfile->file_info_lock);
5203    
5204     @@ -376,6 +379,41 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
5205     return cifs_file;
5206     }
5207    
5208     +static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
5209     +{
5210     + struct inode *inode = d_inode(cifs_file->dentry);
5211     + struct cifsInodeInfo *cifsi = CIFS_I(inode);
5212     + struct cifsLockInfo *li, *tmp;
5213     + struct super_block *sb = inode->i_sb;
5214     +
5215     + /*
5216     + * Delete any outstanding lock records. We'll lose them when the file
5217     + * is closed anyway.
5218     + */
5219     + cifs_down_write(&cifsi->lock_sem);
5220     + list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
5221     + list_del(&li->llist);
5222     + cifs_del_lock_waiters(li);
5223     + kfree(li);
5224     + }
5225     + list_del(&cifs_file->llist->llist);
5226     + kfree(cifs_file->llist);
5227     + up_write(&cifsi->lock_sem);
5228     +
5229     + cifs_put_tlink(cifs_file->tlink);
5230     + dput(cifs_file->dentry);
5231     + cifs_sb_deactive(sb);
5232     + kfree(cifs_file);
5233     +}
5234     +
5235     +static void cifsFileInfo_put_work(struct work_struct *work)
5236     +{
5237     + struct cifsFileInfo *cifs_file = container_of(work,
5238     + struct cifsFileInfo, put);
5239     +
5240     + cifsFileInfo_put_final(cifs_file);
5241     +}
5242     +
5243     /**
5244     * cifsFileInfo_put - release a reference of file priv data
5245     *
5246     @@ -383,15 +421,15 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
5247     */
5248     void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
5249     {
5250     - _cifsFileInfo_put(cifs_file, true);
5251     + _cifsFileInfo_put(cifs_file, true, true);
5252     }
5253    
5254     /**
5255     * _cifsFileInfo_put - release a reference of file priv data
5256     *
5257     * This may involve closing the filehandle @cifs_file out on the
5258     - * server. Must be called without holding tcon->open_file_lock and
5259     - * cifs_file->file_info_lock.
5260     + * server. Must be called without holding tcon->open_file_lock,
5261     + * cinode->open_file_lock and cifs_file->file_info_lock.
5262     *
5263     * If @wait_for_oplock_handler is true and we are releasing the last
5264     * reference, wait for any running oplock break handler of the file
5265     @@ -399,7 +437,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
5266     * oplock break handler, you need to pass false.
5267     *
5268     */
5269     -void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5270     +void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
5271     + bool wait_oplock_handler, bool offload)
5272     {
5273     struct inode *inode = d_inode(cifs_file->dentry);
5274     struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
5275     @@ -407,7 +446,6 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5276     struct cifsInodeInfo *cifsi = CIFS_I(inode);
5277     struct super_block *sb = inode->i_sb;
5278     struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
5279     - struct cifsLockInfo *li, *tmp;
5280     struct cifs_fid fid;
5281     struct cifs_pending_open open;
5282     bool oplock_break_cancelled;
5283     @@ -468,24 +506,10 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5284    
5285     cifs_del_pending_open(&open);
5286    
5287     - /*
5288     - * Delete any outstanding lock records. We'll lose them when the file
5289     - * is closed anyway.
5290     - */
5291     - cifs_down_write(&cifsi->lock_sem);
5292     - list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
5293     - list_del(&li->llist);
5294     - cifs_del_lock_waiters(li);
5295     - kfree(li);
5296     - }
5297     - list_del(&cifs_file->llist->llist);
5298     - kfree(cifs_file->llist);
5299     - up_write(&cifsi->lock_sem);
5300     -
5301     - cifs_put_tlink(cifs_file->tlink);
5302     - dput(cifs_file->dentry);
5303     - cifs_sb_deactive(sb);
5304     - kfree(cifs_file);
5305     + if (offload)
5306     + queue_work(fileinfo_put_wq, &cifs_file->put);
5307     + else
5308     + cifsFileInfo_put_final(cifs_file);
5309     }
5310    
5311     int cifs_open(struct inode *inode, struct file *file)
5312     @@ -816,7 +840,7 @@ reopen_error_exit:
5313     int cifs_close(struct inode *inode, struct file *file)
5314     {
5315     if (file->private_data != NULL) {
5316     - cifsFileInfo_put(file->private_data);
5317     + _cifsFileInfo_put(file->private_data, true, false);
5318     file->private_data = NULL;
5319     }
5320    
5321     @@ -4688,7 +4712,7 @@ void cifs_oplock_break(struct work_struct *work)
5322     cinode);
5323     cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
5324     }
5325     - _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
5326     + _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
5327     cifs_done_oplock_break(cinode);
5328     }
5329    
5330     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5331     index 53134e4509b8..8bba6cd5e870 100644
5332     --- a/fs/ext4/inode.c
5333     +++ b/fs/ext4/inode.c
5334     @@ -3532,8 +3532,14 @@ retry:
5335     return ret;
5336     }
5337    
5338     + /*
5339     + * Writes that span EOF might trigger an I/O size update on completion,
5340     + * so consider them to be dirty for the purposes of O_DSYNC, even if
5341     + * there is no other metadata changes being made or are pending here.
5342     + */
5343     iomap->flags = 0;
5344     - if (ext4_inode_datasync_dirty(inode))
5345     + if (ext4_inode_datasync_dirty(inode) ||
5346     + offset + length > i_size_read(inode))
5347     iomap->flags |= IOMAP_F_DIRTY;
5348     iomap->bdev = inode->i_sb->s_bdev;
5349     iomap->dax_dev = sbi->s_daxdev;
5350     @@ -3836,7 +3842,13 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
5351     * writes & truncates and since we take care of writing back page cache,
5352     * we are protected against page writeback as well.
5353     */
5354     - inode_lock_shared(inode);
5355     + if (iocb->ki_flags & IOCB_NOWAIT) {
5356     + if (!inode_trylock_shared(inode))
5357     + return -EAGAIN;
5358     + } else {
5359     + inode_lock_shared(inode);
5360     + }
5361     +
5362     ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
5363     iocb->ki_pos + count - 1);
5364     if (ret)
5365     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
5366     index 4024790028aa..9046432b87c2 100644
5367     --- a/fs/f2fs/f2fs.h
5368     +++ b/fs/f2fs/f2fs.h
5369     @@ -1289,6 +1289,7 @@ struct f2fs_sb_info {
5370     unsigned int gc_mode; /* current GC state */
5371     unsigned int next_victim_seg[2]; /* next segment in victim section */
5372     /* for skip statistic */
5373     + unsigned int atomic_files; /* # of opened atomic file */
5374     unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
5375     unsigned long long skipped_gc_rwsem; /* FG_GC only */
5376    
5377     @@ -2704,6 +2705,20 @@ static inline void clear_file(struct inode *inode, int type)
5378     f2fs_mark_inode_dirty_sync(inode, true);
5379     }
5380    
5381     +static inline bool f2fs_is_time_consistent(struct inode *inode)
5382     +{
5383     + if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
5384     + return false;
5385     + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
5386     + return false;
5387     + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
5388     + return false;
5389     + if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
5390     + &F2FS_I(inode)->i_crtime))
5391     + return false;
5392     + return true;
5393     +}
5394     +
5395     static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
5396     {
5397     bool ret;
5398     @@ -2721,14 +2736,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
5399     i_size_read(inode) & ~PAGE_MASK)
5400     return false;
5401    
5402     - if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
5403     - return false;
5404     - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
5405     - return false;
5406     - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
5407     - return false;
5408     - if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
5409     - &F2FS_I(inode)->i_crtime))
5410     + if (!f2fs_is_time_consistent(inode))
5411     return false;
5412    
5413     down_read(&F2FS_I(inode)->i_sem);
5414     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5415     index 29bc0a542759..8ed8e4328bd1 100644
5416     --- a/fs/f2fs/file.c
5417     +++ b/fs/f2fs/file.c
5418     @@ -1890,6 +1890,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
5419     spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
5420     if (list_empty(&fi->inmem_ilist))
5421     list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
5422     + sbi->atomic_files++;
5423     spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
5424    
5425     /* add inode in inmem_list first and set atomic_file */
5426     diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
5427     index db4fec30c30d..386ad54c13c3 100644
5428     --- a/fs/f2fs/inode.c
5429     +++ b/fs/f2fs/inode.c
5430     @@ -615,7 +615,11 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
5431     inode->i_ino == F2FS_META_INO(sbi))
5432     return 0;
5433    
5434     - if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
5435     + /*
5436     + * atime could be updated without dirtying f2fs inode in lazytime mode
5437     + */
5438     + if (f2fs_is_time_consistent(inode) &&
5439     + !is_inode_flag_set(inode, FI_DIRTY_INODE))
5440     return 0;
5441    
5442     if (!f2fs_is_checkpoint_ready(sbi))
5443     diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
5444     index 4faf06e8bf89..a1c507b0b4ac 100644
5445     --- a/fs/f2fs/namei.c
5446     +++ b/fs/f2fs/namei.c
5447     @@ -981,7 +981,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
5448     if (!old_dir_entry || whiteout)
5449     file_lost_pino(old_inode);
5450     else
5451     - F2FS_I(old_inode)->i_pino = new_dir->i_ino;
5452     + /* adjust dir's i_pino to pass fsck check */
5453     + f2fs_i_pino_write(old_inode, new_dir->i_ino);
5454     up_write(&F2FS_I(old_inode)->i_sem);
5455    
5456     old_inode->i_ctime = current_time(old_inode);
5457     @@ -1141,7 +1142,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
5458     f2fs_set_link(old_dir, old_entry, old_page, new_inode);
5459    
5460     down_write(&F2FS_I(old_inode)->i_sem);
5461     - file_lost_pino(old_inode);
5462     + if (!old_dir_entry)
5463     + file_lost_pino(old_inode);
5464     + else
5465     + /* adjust dir's i_pino to pass fsck check */
5466     + f2fs_i_pino_write(old_inode, new_dir->i_ino);
5467     up_write(&F2FS_I(old_inode)->i_sem);
5468    
5469     old_dir->i_ctime = current_time(old_dir);
5470     @@ -1156,7 +1161,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
5471     f2fs_set_link(new_dir, new_entry, new_page, old_inode);
5472    
5473     down_write(&F2FS_I(new_inode)->i_sem);
5474     - file_lost_pino(new_inode);
5475     + if (!new_dir_entry)
5476     + file_lost_pino(new_inode);
5477     + else
5478     + /* adjust dir's i_pino to pass fsck check */
5479     + f2fs_i_pino_write(new_inode, old_dir->i_ino);
5480     up_write(&F2FS_I(new_inode)->i_sem);
5481    
5482     new_dir->i_ctime = current_time(new_dir);
5483     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
5484     index 808709581481..7d8578401267 100644
5485     --- a/fs/f2fs/segment.c
5486     +++ b/fs/f2fs/segment.c
5487     @@ -288,6 +288,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
5488     struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
5489     struct inode *inode;
5490     struct f2fs_inode_info *fi;
5491     + unsigned int count = sbi->atomic_files;
5492     + unsigned int looped = 0;
5493     next:
5494     spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
5495     if (list_empty(head)) {
5496     @@ -296,22 +298,26 @@ next:
5497     }
5498     fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
5499     inode = igrab(&fi->vfs_inode);
5500     + if (inode)
5501     + list_move_tail(&fi->inmem_ilist, head);
5502     spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
5503    
5504     if (inode) {
5505     if (gc_failure) {
5506     - if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
5507     - goto drop;
5508     - goto skip;
5509     + if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
5510     + goto skip;
5511     }
5512     -drop:
5513     set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
5514     f2fs_drop_inmem_pages(inode);
5515     +skip:
5516     iput(inode);
5517     }
5518     -skip:
5519     congestion_wait(BLK_RW_ASYNC, HZ/50);
5520     cond_resched();
5521     + if (gc_failure) {
5522     + if (++looped >= count)
5523     + return;
5524     + }
5525     goto next;
5526     }
5527    
5528     @@ -327,13 +333,16 @@ void f2fs_drop_inmem_pages(struct inode *inode)
5529     mutex_unlock(&fi->inmem_lock);
5530     }
5531    
5532     - clear_inode_flag(inode, FI_ATOMIC_FILE);
5533     fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
5534     stat_dec_atomic_write(inode);
5535    
5536     spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
5537     if (!list_empty(&fi->inmem_ilist))
5538     list_del_init(&fi->inmem_ilist);
5539     + if (f2fs_is_atomic_file(inode)) {
5540     + clear_inode_flag(inode, FI_ATOMIC_FILE);
5541     + sbi->atomic_files--;
5542     + }
5543     spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
5544     }
5545    
5546     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
5547     index a478df035651..40306c1eab07 100644
5548     --- a/fs/hugetlbfs/inode.c
5549     +++ b/fs/hugetlbfs/inode.c
5550     @@ -1461,28 +1461,43 @@ static int __init init_hugetlbfs_fs(void)
5551     sizeof(struct hugetlbfs_inode_info),
5552     0, SLAB_ACCOUNT, init_once);
5553     if (hugetlbfs_inode_cachep == NULL)
5554     - goto out2;
5555     + goto out;
5556    
5557     error = register_filesystem(&hugetlbfs_fs_type);
5558     if (error)
5559     - goto out;
5560     + goto out_free;
5561    
5562     + /* default hstate mount is required */
5563     + mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
5564     + if (IS_ERR(mnt)) {
5565     + error = PTR_ERR(mnt);
5566     + goto out_unreg;
5567     + }
5568     + hugetlbfs_vfsmount[default_hstate_idx] = mnt;
5569     +
5570     + /* other hstates are optional */
5571     i = 0;
5572     for_each_hstate(h) {
5573     - mnt = mount_one_hugetlbfs(h);
5574     - if (IS_ERR(mnt) && i == 0) {
5575     - error = PTR_ERR(mnt);
5576     - goto out;
5577     + if (i == default_hstate_idx) {
5578     + i++;
5579     + continue;
5580     }
5581     - hugetlbfs_vfsmount[i] = mnt;
5582     +
5583     + mnt = mount_one_hugetlbfs(h);
5584     + if (IS_ERR(mnt))
5585     + hugetlbfs_vfsmount[i] = NULL;
5586     + else
5587     + hugetlbfs_vfsmount[i] = mnt;
5588     i++;
5589     }
5590    
5591     return 0;
5592    
5593     - out:
5594     + out_unreg:
5595     + (void)unregister_filesystem(&hugetlbfs_fs_type);
5596     + out_free:
5597     kmem_cache_destroy(hugetlbfs_inode_cachep);
5598     - out2:
5599     + out:
5600     return error;
5601     }
5602     fs_initcall(init_hugetlbfs_fs)
5603     diff --git a/fs/io_uring.c b/fs/io_uring.c
5604     index a340147387ec..74e786578c77 100644
5605     --- a/fs/io_uring.c
5606     +++ b/fs/io_uring.c
5607     @@ -3773,12 +3773,18 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
5608     ctx->cq_entries = rings->cq_ring_entries;
5609    
5610     size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
5611     - if (size == SIZE_MAX)
5612     + if (size == SIZE_MAX) {
5613     + io_mem_free(ctx->rings);
5614     + ctx->rings = NULL;
5615     return -EOVERFLOW;
5616     + }
5617    
5618     ctx->sq_sqes = io_mem_alloc(size);
5619     - if (!ctx->sq_sqes)
5620     + if (!ctx->sq_sqes) {
5621     + io_mem_free(ctx->rings);
5622     + ctx->rings = NULL;
5623     return -ENOMEM;
5624     + }
5625    
5626     return 0;
5627     }
5628     diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
5629     index fd46ec83cb04..7b5f76efef02 100644
5630     --- a/fs/iomap/direct-io.c
5631     +++ b/fs/iomap/direct-io.c
5632     @@ -318,7 +318,9 @@ zero_tail:
5633     if (pad)
5634     iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
5635     }
5636     - return copied ? copied : ret;
5637     + if (copied)
5638     + return copied;
5639     + return ret;
5640     }
5641    
5642     static loff_t
5643     diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
5644     index 132fb92098c7..c43591cd70f1 100644
5645     --- a/fs/jbd2/commit.c
5646     +++ b/fs/jbd2/commit.c
5647     @@ -727,7 +727,6 @@ start_journal_io:
5648     submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
5649     }
5650     cond_resched();
5651     - stats.run.rs_blocks_logged += bufs;
5652    
5653     /* Force a new descriptor to be generated next
5654     time round the loop. */
5655     @@ -814,6 +813,7 @@ start_journal_io:
5656     if (unlikely(!buffer_uptodate(bh)))
5657     err = -EIO;
5658     jbd2_unfile_log_bh(bh);
5659     + stats.run.rs_blocks_logged++;
5660    
5661     /*
5662     * The list contains temporary buffer heads created by
5663     @@ -859,6 +859,7 @@ start_journal_io:
5664     BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
5665     clear_buffer_jwrite(bh);
5666     jbd2_unfile_log_bh(bh);
5667     + stats.run.rs_blocks_logged++;
5668     __brelse(bh); /* One for getblk */
5669     /* AKPM: bforget here */
5670     }
5671     @@ -880,6 +881,7 @@ start_journal_io:
5672     }
5673     if (cbh)
5674     err = journal_wait_on_commit_record(journal, cbh);
5675     + stats.run.rs_blocks_logged++;
5676     if (jbd2_has_feature_async_commit(journal) &&
5677     journal->j_flags & JBD2_BARRIER) {
5678     blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
5679     diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
5680     index 3e7da392aa6f..bb981ec76456 100644
5681     --- a/fs/ocfs2/acl.c
5682     +++ b/fs/ocfs2/acl.c
5683     @@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
5684     down_read(&OCFS2_I(inode)->ip_xattr_sem);
5685     acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
5686     up_read(&OCFS2_I(inode)->ip_xattr_sem);
5687     - if (IS_ERR(acl) || !acl)
5688     - return PTR_ERR(acl);
5689     + if (IS_ERR_OR_NULL(acl))
5690     + return PTR_ERR_OR_ZERO(acl);
5691     ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
5692     if (ret)
5693     return ret;
5694     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
5695     index 7f0b39da5022..9b96243de081 100644
5696     --- a/fs/quota/dquot.c
5697     +++ b/fs/quota/dquot.c
5698     @@ -2861,68 +2861,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
5699     static int do_proc_dqstats(struct ctl_table *table, int write,
5700     void __user *buffer, size_t *lenp, loff_t *ppos)
5701     {
5702     - unsigned int type = (int *)table->data - dqstats.stat;
5703     + unsigned int type = (unsigned long *)table->data - dqstats.stat;
5704     + s64 value = percpu_counter_sum(&dqstats.counter[type]);
5705     +
5706     + /* Filter negative values for non-monotonic counters */
5707     + if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
5708     + type == DQST_FREE_DQUOTS))
5709     + value = 0;
5710    
5711     /* Update global table */
5712     - dqstats.stat[type] =
5713     - percpu_counter_sum_positive(&dqstats.counter[type]);
5714     - return proc_dointvec(table, write, buffer, lenp, ppos);
5715     + dqstats.stat[type] = value;
5716     + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
5717     }
5718    
5719     static struct ctl_table fs_dqstats_table[] = {
5720     {
5721     .procname = "lookups",
5722     .data = &dqstats.stat[DQST_LOOKUPS],
5723     - .maxlen = sizeof(int),
5724     + .maxlen = sizeof(unsigned long),
5725     .mode = 0444,
5726     .proc_handler = do_proc_dqstats,
5727     },
5728     {
5729     .procname = "drops",
5730     .data = &dqstats.stat[DQST_DROPS],
5731     - .maxlen = sizeof(int),
5732     + .maxlen = sizeof(unsigned long),
5733     .mode = 0444,
5734     .proc_handler = do_proc_dqstats,
5735     },
5736     {
5737     .procname = "reads",
5738     .data = &dqstats.stat[DQST_READS],
5739     - .maxlen = sizeof(int),
5740     + .maxlen = sizeof(unsigned long),
5741     .mode = 0444,
5742     .proc_handler = do_proc_dqstats,
5743     },
5744     {
5745     .procname = "writes",
5746     .data = &dqstats.stat[DQST_WRITES],
5747     - .maxlen = sizeof(int),
5748     + .maxlen = sizeof(unsigned long),
5749     .mode = 0444,
5750     .proc_handler = do_proc_dqstats,
5751     },
5752     {
5753     .procname = "cache_hits",
5754     .data = &dqstats.stat[DQST_CACHE_HITS],
5755     - .maxlen = sizeof(int),
5756     + .maxlen = sizeof(unsigned long),
5757     .mode = 0444,
5758     .proc_handler = do_proc_dqstats,
5759     },
5760     {
5761     .procname = "allocated_dquots",
5762     .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
5763     - .maxlen = sizeof(int),
5764     + .maxlen = sizeof(unsigned long),
5765     .mode = 0444,
5766     .proc_handler = do_proc_dqstats,
5767     },
5768     {
5769     .procname = "free_dquots",
5770     .data = &dqstats.stat[DQST_FREE_DQUOTS],
5771     - .maxlen = sizeof(int),
5772     + .maxlen = sizeof(unsigned long),
5773     .mode = 0444,
5774     .proc_handler = do_proc_dqstats,
5775     },
5776     {
5777     .procname = "syncs",
5778     .data = &dqstats.stat[DQST_SYNCS],
5779     - .maxlen = sizeof(int),
5780     + .maxlen = sizeof(unsigned long),
5781     .mode = 0444,
5782     .proc_handler = do_proc_dqstats,
5783     },
5784     diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
5785     index f9fd18670e22..d99d166fd892 100644
5786     --- a/fs/userfaultfd.c
5787     +++ b/fs/userfaultfd.c
5788     @@ -1834,13 +1834,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
5789     if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
5790     goto out;
5791     features = uffdio_api.features;
5792     - if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
5793     - memset(&uffdio_api, 0, sizeof(uffdio_api));
5794     - if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
5795     - goto out;
5796     - ret = -EINVAL;
5797     - goto out;
5798     - }
5799     + ret = -EINVAL;
5800     + if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
5801     + goto err_out;
5802     + ret = -EPERM;
5803     + if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
5804     + goto err_out;
5805     /* report all available features and ioctls to userland */
5806     uffdio_api.features = UFFD_API_FEATURES;
5807     uffdio_api.ioctls = UFFD_API_IOCTLS;
5808     @@ -1853,6 +1852,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
5809     ret = 0;
5810     out:
5811     return ret;
5812     +err_out:
5813     + memset(&uffdio_api, 0, sizeof(uffdio_api));
5814     + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
5815     + ret = -EFAULT;
5816     + goto out;
5817     }
5818    
5819     static long userfaultfd_ioctl(struct file *file, unsigned cmd,
5820     diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
5821     index 641d07f30a27..7b0d9ad8cb1a 100644
5822     --- a/fs/xfs/xfs_log.c
5823     +++ b/fs/xfs/xfs_log.c
5824     @@ -1495,6 +1495,8 @@ out_free_iclog:
5825     prev_iclog = iclog->ic_next;
5826     kmem_free(iclog->ic_data);
5827     kmem_free(iclog);
5828     + if (prev_iclog == log->l_iclog)
5829     + break;
5830     }
5831     out_free_log:
5832     kmem_free(log);
5833     diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
5834     index adf993a3bd58..6a18a97b76a8 100644
5835     --- a/include/linux/dma-direct.h
5836     +++ b/include/linux/dma-direct.h
5837     @@ -3,8 +3,11 @@
5838     #define _LINUX_DMA_DIRECT_H 1
5839    
5840     #include <linux/dma-mapping.h>
5841     +#include <linux/memblock.h> /* for min_low_pfn */
5842     #include <linux/mem_encrypt.h>
5843    
5844     +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
5845     +
5846     #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
5847     #include <asm/dma-direct.h>
5848     #else
5849     @@ -24,11 +27,16 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
5850    
5851     static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
5852     {
5853     + dma_addr_t end = addr + size - 1;
5854     +
5855     if (!dev->dma_mask)
5856     return false;
5857    
5858     - return addr + size - 1 <=
5859     - min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
5860     + if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
5861     + min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
5862     + return false;
5863     +
5864     + return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
5865     }
5866     #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
5867    
5868     diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
5869     index 4a1c4fca475a..4d450672b7d6 100644
5870     --- a/include/linux/dma-mapping.h
5871     +++ b/include/linux/dma-mapping.h
5872     @@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
5873     int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
5874     void *cpu_addr, size_t size, int *ret);
5875    
5876     -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
5877     +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
5878     int dma_release_from_global_coherent(int order, void *vaddr);
5879     int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
5880     size_t size, int *ret);
5881     @@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
5882     #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
5883     #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
5884    
5885     -static inline void *dma_alloc_from_global_coherent(ssize_t size,
5886     +static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
5887     dma_addr_t *dma_handle)
5888     {
5889     return NULL;
5890     @@ -583,6 +583,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
5891     static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
5892     size_t size, enum dma_data_direction dir, unsigned long attrs)
5893     {
5894     + /* DMA must never operate on areas that might be remapped. */
5895     + if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
5896     + "rejecting DMA map of vmalloc memory\n"))
5897     + return DMA_MAPPING_ERROR;
5898     debug_dma_map_single(dev, ptr, size);
5899     return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
5900     size, dir, attrs);
5901     diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
5902     index 1b9a51a1bccb..1f98b52118f0 100644
5903     --- a/include/linux/hrtimer.h
5904     +++ b/include/linux/hrtimer.h
5905     @@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
5906    
5907     extern bool hrtimer_active(const struct hrtimer *timer);
5908    
5909     -/*
5910     - * Helper function to check, whether the timer is on one of the queues
5911     +/**
5912     + * hrtimer_is_queued = check, whether the timer is on one of the queues
5913     + * @timer: Timer to check
5914     + *
5915     + * Returns: True if the timer is queued, false otherwise
5916     + *
5917     + * The function can be used lockless, but it gives only a current snapshot.
5918     */
5919     -static inline int hrtimer_is_queued(struct hrtimer *timer)
5920     +static inline bool hrtimer_is_queued(struct hrtimer *timer)
5921     {
5922     - return timer->state & HRTIMER_STATE_ENQUEUED;
5923     + /* The READ_ONCE pairs with the update functions of timer->state */
5924     + return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
5925     }
5926    
5927     /*
5928     diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
5929     index edb0f0c30904..1adf54aad2df 100644
5930     --- a/include/linux/libfdt_env.h
5931     +++ b/include/linux/libfdt_env.h
5932     @@ -7,6 +7,9 @@
5933    
5934     #include <asm/byteorder.h>
5935    
5936     +#define INT32_MAX S32_MAX
5937     +#define UINT32_MAX U32_MAX
5938     +
5939     typedef __be16 fdt16_t;
5940     typedef __be32 fdt32_t;
5941     typedef __be64 fdt64_t;
5942     diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
5943     index fe6cfdcfbc26..468328b1e1dd 100644
5944     --- a/include/linux/posix-clock.h
5945     +++ b/include/linux/posix-clock.h
5946     @@ -69,29 +69,32 @@ struct posix_clock_operations {
5947     *
5948     * @ops: Functional interface to the clock
5949     * @cdev: Character device instance for this clock
5950     - * @kref: Reference count.
5951     + * @dev: Pointer to the clock's device.
5952     * @rwsem: Protects the 'zombie' field from concurrent access.
5953     * @zombie: If 'zombie' is true, then the hardware has disappeared.
5954     - * @release: A function to free the structure when the reference count reaches
5955     - * zero. May be NULL if structure is statically allocated.
5956     *
5957     * Drivers should embed their struct posix_clock within a private
5958     * structure, obtaining a reference to it during callbacks using
5959     * container_of().
5960     + *
5961     + * Drivers should supply an initialized but not exposed struct device
5962     + * to posix_clock_register(). It is used to manage lifetime of the
5963     + * driver's private structure. It's 'release' field should be set to
5964     + * a release function for this private structure.
5965     */
5966     struct posix_clock {
5967     struct posix_clock_operations ops;
5968     struct cdev cdev;
5969     - struct kref kref;
5970     + struct device *dev;
5971     struct rw_semaphore rwsem;
5972     bool zombie;
5973     - void (*release)(struct posix_clock *clk);
5974     };
5975    
5976     /**
5977     * posix_clock_register() - register a new clock
5978     - * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
5979     - * @devid: Allocated device id
5980     + * @clk: Pointer to the clock. Caller must provide 'ops' field
5981     + * @dev: Pointer to the initialized device. Caller must provide
5982     + * 'release' field
5983     *
5984     * A clock driver calls this function to register itself with the
5985     * clock device subsystem. If 'clk' points to dynamically allocated
5986     @@ -100,7 +103,7 @@ struct posix_clock {
5987     *
5988     * Returns zero on success, non-zero otherwise.
5989     */
5990     -int posix_clock_register(struct posix_clock *clk, dev_t devid);
5991     +int posix_clock_register(struct posix_clock *clk, struct device *dev);
5992    
5993     /**
5994     * posix_clock_unregister() - unregister a clock
5995     diff --git a/include/linux/quota.h b/include/linux/quota.h
5996     index f32dd270b8e3..27aab84fcbaa 100644
5997     --- a/include/linux/quota.h
5998     +++ b/include/linux/quota.h
5999     @@ -263,7 +263,7 @@ enum {
6000     };
6001    
6002     struct dqstats {
6003     - int stat[_DQST_DQSTAT_LAST];
6004     + unsigned long stat[_DQST_DQSTAT_LAST];
6005     struct percpu_counter counter[_DQST_DQSTAT_LAST];
6006     };
6007    
6008     diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
6009     index bc8206a8f30e..61974c4c566b 100644
6010     --- a/include/linux/rculist_nulls.h
6011     +++ b/include/linux/rculist_nulls.h
6012     @@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
6013     first->pprev = &n->next;
6014     }
6015    
6016     +/**
6017     + * hlist_nulls_add_tail_rcu
6018     + * @n: the element to add to the hash list.
6019     + * @h: the list to add to.
6020     + *
6021     + * Description:
6022     + * Adds the specified element to the specified hlist_nulls,
6023     + * while permitting racing traversals.
6024     + *
6025     + * The caller must take whatever precautions are necessary
6026     + * (such as holding appropriate locks) to avoid racing
6027     + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
6028     + * or hlist_nulls_del_rcu(), running on this same list.
6029     + * However, it is perfectly legal to run concurrently with
6030     + * the _rcu list-traversal primitives, such as
6031     + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
6032     + * problems on Alpha CPUs. Regardless of the type of CPU, the
6033     + * list-traversal primitive must be guarded by rcu_read_lock().
6034     + */
6035     +static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
6036     + struct hlist_nulls_head *h)
6037     +{
6038     + struct hlist_nulls_node *i, *last = NULL;
6039     +
6040     + /* Note: write side code, so rcu accessors are not needed. */
6041     + for (i = h->first; !is_a_nulls(i); i = i->next)
6042     + last = i;
6043     +
6044     + if (last) {
6045     + n->next = last->next;
6046     + n->pprev = &last->next;
6047     + rcu_assign_pointer(hlist_next_rcu(last), n);
6048     + } else {
6049     + hlist_nulls_add_head_rcu(n, h);
6050     + }
6051     +}
6052     +
6053     /**
6054     * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
6055     * @tpos: the type * to use as a loop cursor.
6056     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
6057     index 1ba6e2cc2725..6ae88b0c1c31 100644
6058     --- a/include/linux/skbuff.h
6059     +++ b/include/linux/skbuff.h
6060     @@ -1795,7 +1795,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
6061     */
6062     static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
6063     {
6064     - struct sk_buff *skb = list_->prev;
6065     + struct sk_buff *skb = READ_ONCE(list_->prev);
6066    
6067     if (skb == (struct sk_buff *)list_)
6068     skb = NULL;
6069     @@ -1861,7 +1861,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
6070     struct sk_buff *prev, struct sk_buff *next,
6071     struct sk_buff_head *list)
6072     {
6073     - /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
6074     + /* See skb_queue_empty_lockless() and skb_peek_tail()
6075     + * for the opposite READ_ONCE()
6076     + */
6077     WRITE_ONCE(newsk->next, next);
6078     WRITE_ONCE(newsk->prev, prev);
6079     WRITE_ONCE(next->prev, newsk);
6080     diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
6081     index 659a4400517b..e93e249a4e9b 100644
6082     --- a/include/linux/thread_info.h
6083     +++ b/include/linux/thread_info.h
6084     @@ -147,6 +147,8 @@ check_copy_size(const void *addr, size_t bytes, bool is_source)
6085     __bad_copy_to();
6086     return false;
6087     }
6088     + if (WARN_ON_ONCE(bytes > INT_MAX))
6089     + return false;
6090     check_object_size(addr, bytes, is_source);
6091     return true;
6092     }
6093     diff --git a/include/net/dst.h b/include/net/dst.h
6094     index 8224dad2ae94..3448cf865ede 100644
6095     --- a/include/net/dst.h
6096     +++ b/include/net/dst.h
6097     @@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
6098     struct dst_entry *dst = skb_dst(skb);
6099    
6100     if (dst && dst->ops->update_pmtu)
6101     - dst->ops->update_pmtu(dst, NULL, skb, mtu);
6102     + dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
6103     +}
6104     +
6105     +/* update dst pmtu but not do neighbor confirm */
6106     +static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
6107     +{
6108     + struct dst_entry *dst = skb_dst(skb);
6109     +
6110     + if (dst && dst->ops->update_pmtu)
6111     + dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
6112     }
6113    
6114     static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
6115     @@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
6116     u32 encap_mtu = dst_mtu(encap_dst);
6117    
6118     if (skb->len > encap_mtu - headroom)
6119     - skb_dst_update_pmtu(skb, encap_mtu - headroom);
6120     + skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
6121     }
6122    
6123     #endif /* _NET_DST_H */
6124     diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
6125     index 5ec645f27ee3..443863c7b8da 100644
6126     --- a/include/net/dst_ops.h
6127     +++ b/include/net/dst_ops.h
6128     @@ -27,7 +27,8 @@ struct dst_ops {
6129     struct dst_entry * (*negative_advice)(struct dst_entry *);
6130     void (*link_failure)(struct sk_buff *);
6131     void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
6132     - struct sk_buff *skb, u32 mtu);
6133     + struct sk_buff *skb, u32 mtu,
6134     + bool confirm_neigh);
6135     void (*redirect)(struct dst_entry *dst, struct sock *sk,
6136     struct sk_buff *skb);
6137     int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
6138     diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
6139     index af2b4c065a04..d0019d3395cf 100644
6140     --- a/include/net/inet_hashtables.h
6141     +++ b/include/net/inet_hashtables.h
6142     @@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
6143     struct hlist_head chain;
6144     };
6145    
6146     -/*
6147     - * Sockets can be hashed in established or listening table
6148     +/* Sockets can be hashed in established or listening table.
6149     + * We must use different 'nulls' end-of-chain value for all hash buckets :
6150     + * A socket might transition from ESTABLISH to LISTEN state without
6151     + * RCU grace period. A lookup in ehash table needs to handle this case.
6152     */
6153     +#define LISTENING_NULLS_BASE (1U << 29)
6154     struct inet_listen_hashbucket {
6155     spinlock_t lock;
6156     unsigned int count;
6157     - struct hlist_head head;
6158     + union {
6159     + struct hlist_head head;
6160     + struct hlist_nulls_head nulls_head;
6161     + };
6162     };
6163    
6164     /* This is for listening sockets, thus all sockets which possess wildcards. */
6165     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
6166     index d80acda231ae..47e61956168d 100644
6167     --- a/include/net/sch_generic.h
6168     +++ b/include/net/sch_generic.h
6169     @@ -308,6 +308,7 @@ struct tcf_proto_ops {
6170     int (*delete)(struct tcf_proto *tp, void *arg,
6171     bool *last, bool rtnl_held,
6172     struct netlink_ext_ack *);
6173     + bool (*delete_empty)(struct tcf_proto *tp);
6174     void (*walk)(struct tcf_proto *tp,
6175     struct tcf_walker *arg, bool rtnl_held);
6176     int (*reoffload)(struct tcf_proto *tp, bool add,
6177     @@ -336,6 +337,10 @@ struct tcf_proto_ops {
6178     int flags;
6179     };
6180    
6181     +/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
6182     + * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
6183     + * conditions can occur when filters are inserted/deleted simultaneously.
6184     + */
6185     enum tcf_proto_ops_flags {
6186     TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
6187     };
6188     diff --git a/include/net/sock.h b/include/net/sock.h
6189     index 013396e50b91..e09e2886a836 100644
6190     --- a/include/net/sock.h
6191     +++ b/include/net/sock.h
6192     @@ -723,6 +723,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
6193     hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
6194     }
6195    
6196     +static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
6197     +{
6198     + hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
6199     +}
6200     +
6201     static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
6202     {
6203     sock_hold(sk);
6204     diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
6205     index b71b5c4f418c..533f56733ba8 100644
6206     --- a/include/scsi/iscsi_proto.h
6207     +++ b/include/scsi/iscsi_proto.h
6208     @@ -627,6 +627,7 @@ struct iscsi_reject {
6209     #define ISCSI_REASON_BOOKMARK_INVALID 9
6210     #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
6211     #define ISCSI_REASON_NEGOTIATION_RESET 11
6212     +#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
6213    
6214     /* Max. number of Key=Value pairs in a text message */
6215     #define MAX_KEY_VALUE_PAIRS 8192
6216     diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
6217     index 545e3869b0e3..551b0eb7028a 100644
6218     --- a/kernel/dma/coherent.c
6219     +++ b/kernel/dma/coherent.c
6220     @@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
6221     return ret;
6222     }
6223    
6224     -static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
6225     - ssize_t size, dma_addr_t *dma_handle)
6226     +static void *__dma_alloc_from_coherent(struct device *dev,
6227     + struct dma_coherent_mem *mem,
6228     + ssize_t size, dma_addr_t *dma_handle)
6229     {
6230     int order = get_order(size);
6231     unsigned long flags;
6232     @@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
6233     /*
6234     * Memory was found in the coherent area.
6235     */
6236     - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
6237     + *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
6238     ret = mem->virt_base + (pageno << PAGE_SHIFT);
6239     spin_unlock_irqrestore(&mem->spinlock, flags);
6240     memset(ret, 0, size);
6241     @@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
6242     if (!mem)
6243     return 0;
6244    
6245     - *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
6246     + *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
6247     return 1;
6248     }
6249    
6250     -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
6251     +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
6252     + dma_addr_t *dma_handle)
6253     {
6254     if (!dma_coherent_default_memory)
6255     return NULL;
6256    
6257     - return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
6258     - dma_handle);
6259     + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
6260     + dma_handle);
6261     }
6262    
6263     static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
6264     diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
6265     index 099002d84f46..4ad74f5987ea 100644
6266     --- a/kernel/dma/debug.c
6267     +++ b/kernel/dma/debug.c
6268     @@ -420,6 +420,7 @@ void debug_dma_dump_mappings(struct device *dev)
6269     }
6270    
6271     spin_unlock_irqrestore(&bucket->lock, flags);
6272     + cond_resched();
6273     }
6274     }
6275    
6276     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
6277     index b6f2f35d0bcf..70665934d53e 100644
6278     --- a/kernel/sysctl.c
6279     +++ b/kernel/sysctl.c
6280     @@ -1466,7 +1466,7 @@ static struct ctl_table vm_table[] = {
6281     .procname = "drop_caches",
6282     .data = &sysctl_drop_caches,
6283     .maxlen = sizeof(int),
6284     - .mode = 0644,
6285     + .mode = 0200,
6286     .proc_handler = drop_caches_sysctl_handler,
6287     .extra1 = SYSCTL_ONE,
6288     .extra2 = &four,
6289     diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
6290     index 65605530ee34..7f31932216a1 100644
6291     --- a/kernel/time/hrtimer.c
6292     +++ b/kernel/time/hrtimer.c
6293     @@ -966,7 +966,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
6294    
6295     base->cpu_base->active_bases |= 1 << base->index;
6296    
6297     - timer->state = HRTIMER_STATE_ENQUEUED;
6298     + /* Pairs with the lockless read in hrtimer_is_queued() */
6299     + WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
6300    
6301     return timerqueue_add(&base->active, &timer->node);
6302     }
6303     @@ -988,7 +989,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
6304     struct hrtimer_cpu_base *cpu_base = base->cpu_base;
6305     u8 state = timer->state;
6306    
6307     - timer->state = newstate;
6308     + /* Pairs with the lockless read in hrtimer_is_queued() */
6309     + WRITE_ONCE(timer->state, newstate);
6310     if (!(state & HRTIMER_STATE_ENQUEUED))
6311     return;
6312    
6313     @@ -1013,8 +1015,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
6314     static inline int
6315     remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
6316     {
6317     - if (hrtimer_is_queued(timer)) {
6318     - u8 state = timer->state;
6319     + u8 state = timer->state;
6320     +
6321     + if (state & HRTIMER_STATE_ENQUEUED) {
6322     int reprogram;
6323    
6324     /*
6325     diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
6326     index ec960bb939fd..200fb2d3be99 100644
6327     --- a/kernel/time/posix-clock.c
6328     +++ b/kernel/time/posix-clock.c
6329     @@ -14,8 +14,6 @@
6330    
6331     #include "posix-timers.h"
6332    
6333     -static void delete_clock(struct kref *kref);
6334     -
6335     /*
6336     * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
6337     */
6338     @@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
6339     err = 0;
6340    
6341     if (!err) {
6342     - kref_get(&clk->kref);
6343     + get_device(clk->dev);
6344     fp->private_data = clk;
6345     }
6346     out:
6347     @@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
6348     if (clk->ops.release)
6349     err = clk->ops.release(clk);
6350    
6351     - kref_put(&clk->kref, delete_clock);
6352     + put_device(clk->dev);
6353    
6354     fp->private_data = NULL;
6355    
6356     @@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
6357     #endif
6358     };
6359    
6360     -int posix_clock_register(struct posix_clock *clk, dev_t devid)
6361     +int posix_clock_register(struct posix_clock *clk, struct device *dev)
6362     {
6363     int err;
6364    
6365     - kref_init(&clk->kref);
6366     init_rwsem(&clk->rwsem);
6367    
6368     cdev_init(&clk->cdev, &posix_clock_file_operations);
6369     + err = cdev_device_add(&clk->cdev, dev);
6370     + if (err) {
6371     + pr_err("%s unable to add device %d:%d\n",
6372     + dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
6373     + return err;
6374     + }
6375     clk->cdev.owner = clk->ops.owner;
6376     - err = cdev_add(&clk->cdev, devid, 1);
6377     + clk->dev = dev;
6378    
6379     - return err;
6380     + return 0;
6381     }
6382     EXPORT_SYMBOL_GPL(posix_clock_register);
6383    
6384     -static void delete_clock(struct kref *kref)
6385     -{
6386     - struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
6387     -
6388     - if (clk->release)
6389     - clk->release(clk);
6390     -}
6391     -
6392     void posix_clock_unregister(struct posix_clock *clk)
6393     {
6394     - cdev_del(&clk->cdev);
6395     + cdev_device_del(&clk->cdev, clk->dev);
6396    
6397     down_write(&clk->rwsem);
6398     clk->zombie = true;
6399     up_write(&clk->rwsem);
6400    
6401     - kref_put(&clk->kref, delete_clock);
6402     + put_device(clk->dev);
6403     }
6404     EXPORT_SYMBOL_GPL(posix_clock_unregister);
6405    
6406     diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
6407     index af7800103e51..59980ecfc962 100644
6408     --- a/net/bridge/br_netfilter_hooks.c
6409     +++ b/net/bridge/br_netfilter_hooks.c
6410     @@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv,
6411     nf_bridge_pull_encap_header(skb);
6412     }
6413    
6414     + if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
6415     + return NF_DROP;
6416     +
6417     if (arp_hdr(skb)->ar_pln != 4) {
6418     if (is_vlan_arp(skb, state->net))
6419     nf_bridge_push_encap_header(skb);
6420     diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
6421     index 2cdfc5d6c25d..8c69f0c95a8e 100644
6422     --- a/net/bridge/br_nf_core.c
6423     +++ b/net/bridge/br_nf_core.c
6424     @@ -22,7 +22,8 @@
6425     #endif
6426    
6427     static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
6428     - struct sk_buff *skb, u32 mtu)
6429     + struct sk_buff *skb, u32 mtu,
6430     + bool confirm_neigh)
6431     {
6432     }
6433    
6434     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
6435     index 4096d8a74a2b..e1256e03a9a8 100644
6436     --- a/net/bridge/netfilter/ebtables.c
6437     +++ b/net/bridge/netfilter/ebtables.c
6438     @@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
6439     }
6440    
6441     static int ebt_buf_add(struct ebt_entries_buf_state *state,
6442     - void *data, unsigned int sz)
6443     + const void *data, unsigned int sz)
6444     {
6445     if (state->buf_kern_start == NULL)
6446     goto count_only;
6447     @@ -1901,7 +1901,7 @@ enum compat_mwt {
6448     EBT_COMPAT_TARGET,
6449     };
6450    
6451     -static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
6452     +static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
6453     enum compat_mwt compat_mwt,
6454     struct ebt_entries_buf_state *state,
6455     const unsigned char *base)
6456     @@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
6457     /* return size of all matches, watchers or target, including necessary
6458     * alignment and padding.
6459     */
6460     -static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
6461     +static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
6462     unsigned int size_left, enum compat_mwt type,
6463     struct ebt_entries_buf_state *state, const void *base)
6464     {
6465     + const char *buf = (const char *)match32;
6466     int growth = 0;
6467     - char *buf;
6468    
6469     if (size_left == 0)
6470     return 0;
6471    
6472     - buf = (char *) match32;
6473     -
6474     - while (size_left >= sizeof(*match32)) {
6475     + do {
6476     struct ebt_entry_match *match_kern;
6477     int ret;
6478    
6479     + if (size_left < sizeof(*match32))
6480     + return -EINVAL;
6481     +
6482     match_kern = (struct ebt_entry_match *) state->buf_kern_start;
6483     if (match_kern) {
6484     char *tmp;
6485     @@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
6486     if (match_kern)
6487     match_kern->match_size = ret;
6488    
6489     - /* rule should have no remaining data after target */
6490     - if (type == EBT_COMPAT_TARGET && size_left)
6491     - return -EINVAL;
6492     -
6493     match32 = (struct compat_ebt_entry_mwt *) buf;
6494     - }
6495     + } while (size_left);
6496    
6497     return growth;
6498     }
6499    
6500     /* called for all ebt_entry structures. */
6501     -static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
6502     +static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
6503     unsigned int *total,
6504     struct ebt_entries_buf_state *state)
6505     {
6506     - unsigned int i, j, startoff, new_offset = 0;
6507     + unsigned int i, j, startoff, next_expected_off, new_offset = 0;
6508     /* stores match/watchers/targets & offset of next struct ebt_entry: */
6509     unsigned int offsets[4];
6510     unsigned int *offsets_update = NULL;
6511     @@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
6512     return ret;
6513     }
6514    
6515     - startoff = state->buf_user_offset - startoff;
6516     + next_expected_off = state->buf_user_offset - startoff;
6517     + if (next_expected_off != entry->next_offset)
6518     + return -EINVAL;
6519    
6520     - if (WARN_ON(*total < startoff))
6521     + if (*total < entry->next_offset)
6522     return -EINVAL;
6523     - *total -= startoff;
6524     + *total -= entry->next_offset;
6525     return 0;
6526     }
6527    
6528     diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
6529     index aea918135ec3..08c3dc45f1a4 100644
6530     --- a/net/decnet/dn_route.c
6531     +++ b/net/decnet/dn_route.c
6532     @@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
6533     static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
6534     static void dn_dst_link_failure(struct sk_buff *);
6535     static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
6536     - struct sk_buff *skb , u32 mtu);
6537     + struct sk_buff *skb , u32 mtu,
6538     + bool confirm_neigh);
6539     static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
6540     struct sk_buff *skb);
6541     static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
6542     @@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
6543     * advertise to the other end).
6544     */
6545     static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
6546     - struct sk_buff *skb, u32 mtu)
6547     + struct sk_buff *skb, u32 mtu,
6548     + bool confirm_neigh)
6549     {
6550     struct dn_route *rt = (struct dn_route *) dst;
6551     struct neighbour *n = rt->n;
6552     diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
6553     index 4298aae74e0e..ac95ba78b903 100644
6554     --- a/net/ipv4/icmp.c
6555     +++ b/net/ipv4/icmp.c
6556     @@ -249,10 +249,11 @@ bool icmp_global_allow(void)
6557     bool rc = false;
6558    
6559     /* Check if token bucket is empty and cannot be refilled
6560     - * without taking the spinlock.
6561     + * without taking the spinlock. The READ_ONCE() are paired
6562     + * with the following WRITE_ONCE() in this same function.
6563     */
6564     - if (!icmp_global.credit) {
6565     - delta = min_t(u32, now - icmp_global.stamp, HZ);
6566     + if (!READ_ONCE(icmp_global.credit)) {
6567     + delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
6568     if (delta < HZ / 50)
6569     return false;
6570     }
6571     @@ -262,14 +263,14 @@ bool icmp_global_allow(void)
6572     if (delta >= HZ / 50) {
6573     incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
6574     if (incr)
6575     - icmp_global.stamp = now;
6576     + WRITE_ONCE(icmp_global.stamp, now);
6577     }
6578     credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
6579     if (credit) {
6580     credit--;
6581     rc = true;
6582     }
6583     - icmp_global.credit = credit;
6584     + WRITE_ONCE(icmp_global.credit, credit);
6585     spin_unlock(&icmp_global.lock);
6586     return rc;
6587     }
6588     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
6589     index eb30fc1770de..ac05e273bc66 100644
6590     --- a/net/ipv4/inet_connection_sock.c
6591     +++ b/net/ipv4/inet_connection_sock.c
6592     @@ -1086,7 +1086,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
6593     if (!dst)
6594     goto out;
6595     }
6596     - dst->ops->update_pmtu(dst, sk, NULL, mtu);
6597     + dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
6598    
6599     dst = __sk_dst_check(sk, 0);
6600     if (!dst)
6601     diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
6602     index 7dc79b973e6e..6a4c82f96e78 100644
6603     --- a/net/ipv4/inet_diag.c
6604     +++ b/net/ipv4/inet_diag.c
6605     @@ -914,11 +914,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
6606    
6607     for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
6608     struct inet_listen_hashbucket *ilb;
6609     + struct hlist_nulls_node *node;
6610    
6611     num = 0;
6612     ilb = &hashinfo->listening_hash[i];
6613     spin_lock(&ilb->lock);
6614     - sk_for_each(sk, &ilb->head) {
6615     + sk_nulls_for_each(sk, node, &ilb->nulls_head) {
6616     struct inet_sock *inet = inet_sk(sk);
6617    
6618     if (!net_eq(sock_net(sk), net))
6619     diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
6620     index 83fb00153018..2bbaaf0c7176 100644
6621     --- a/net/ipv4/inet_hashtables.c
6622     +++ b/net/ipv4/inet_hashtables.c
6623     @@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
6624     struct inet_listen_hashbucket *ilb)
6625     {
6626     struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
6627     + const struct hlist_nulls_node *node;
6628     struct sock *sk2;
6629     kuid_t uid = sock_i_uid(sk);
6630    
6631     - sk_for_each_rcu(sk2, &ilb->head) {
6632     + sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
6633     if (sk2 != sk &&
6634     sk2->sk_family == sk->sk_family &&
6635     ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
6636     @@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
6637     }
6638     if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
6639     sk->sk_family == AF_INET6)
6640     - hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
6641     + __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
6642     else
6643     - hlist_add_head_rcu(&sk->sk_node, &ilb->head);
6644     + __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
6645     inet_hash2(hashinfo, sk);
6646     ilb->count++;
6647     sock_set_flag(sk, SOCK_RCU_FREE);
6648     @@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk)
6649     reuseport_detach_sock(sk);
6650     if (ilb) {
6651     inet_unhash2(hashinfo, sk);
6652     - __sk_del_node_init(sk);
6653     - ilb->count--;
6654     - } else {
6655     - __sk_nulls_del_node_init_rcu(sk);
6656     + ilb->count--;
6657     }
6658     + __sk_nulls_del_node_init_rcu(sk);
6659     sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
6660     unlock:
6661     spin_unlock_bh(lock);
6662     @@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
6663    
6664     for (i = 0; i < INET_LHTABLE_SIZE; i++) {
6665     spin_lock_init(&h->listening_hash[i].lock);
6666     - INIT_HLIST_HEAD(&h->listening_hash[i].head);
6667     + INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
6668     + i + LISTENING_NULLS_BASE);
6669     h->listening_hash[i].count = 0;
6670     }
6671    
6672     diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
6673     index be778599bfed..ff327a62c9ce 100644
6674     --- a/net/ipv4/inetpeer.c
6675     +++ b/net/ipv4/inetpeer.c
6676     @@ -160,7 +160,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
6677     base->total / inet_peer_threshold * HZ;
6678     for (i = 0; i < gc_cnt; i++) {
6679     p = gc_stack[i];
6680     - delta = (__u32)jiffies - p->dtime;
6681     +
6682     + /* The READ_ONCE() pairs with the WRITE_ONCE()
6683     + * in inet_putpeer()
6684     + */
6685     + delta = (__u32)jiffies - READ_ONCE(p->dtime);
6686     +
6687     if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
6688     gc_stack[i] = NULL;
6689     }
6690     @@ -237,7 +242,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
6691    
6692     void inet_putpeer(struct inet_peer *p)
6693     {
6694     - p->dtime = (__u32)jiffies;
6695     + /* The WRITE_ONCE() pairs with itself (we run lockless)
6696     + * and the READ_ONCE() in inet_peer_gc()
6697     + */
6698     + WRITE_ONCE(p->dtime, (__u32)jiffies);
6699    
6700     if (refcount_dec_and_test(&p->refcnt))
6701     call_rcu(&p->rcu, inetpeer_free_rcu);
6702     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
6703     index 38c02bb62e2c..0fe2a5d3e258 100644
6704     --- a/net/ipv4/ip_tunnel.c
6705     +++ b/net/ipv4/ip_tunnel.c
6706     @@ -505,7 +505,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
6707     mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
6708    
6709     if (skb_valid_dst(skb))
6710     - skb_dst_update_pmtu(skb, mtu);
6711     + skb_dst_update_pmtu_no_confirm(skb, mtu);
6712    
6713     if (skb->protocol == htons(ETH_P_IP)) {
6714     if (!skb_is_gso(skb) &&
6715     diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
6716     index cfb025606793..fb9f6d60c27c 100644
6717     --- a/net/ipv4/ip_vti.c
6718     +++ b/net/ipv4/ip_vti.c
6719     @@ -214,7 +214,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
6720    
6721     mtu = dst_mtu(dst);
6722     if (skb->len > mtu) {
6723     - skb_dst_update_pmtu(skb, mtu);
6724     + skb_dst_update_pmtu_no_confirm(skb, mtu);
6725     if (skb->protocol == htons(ETH_P_IP)) {
6726     icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
6727     htonl(mtu));
6728     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
6729     index 621f83434b24..fe34e9e0912a 100644
6730     --- a/net/ipv4/route.c
6731     +++ b/net/ipv4/route.c
6732     @@ -139,7 +139,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
6733     static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
6734     static void ipv4_link_failure(struct sk_buff *skb);
6735     static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
6736     - struct sk_buff *skb, u32 mtu);
6737     + struct sk_buff *skb, u32 mtu,
6738     + bool confirm_neigh);
6739     static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
6740     struct sk_buff *skb);
6741     static void ipv4_dst_destroy(struct dst_entry *dst);
6742     @@ -1043,7 +1044,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
6743     }
6744    
6745     static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
6746     - struct sk_buff *skb, u32 mtu)
6747     + struct sk_buff *skb, u32 mtu,
6748     + bool confirm_neigh)
6749     {
6750     struct rtable *rt = (struct rtable *) dst;
6751     struct flowi4 fl4;
6752     @@ -2648,7 +2650,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
6753     }
6754    
6755     static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
6756     - struct sk_buff *skb, u32 mtu)
6757     + struct sk_buff *skb, u32 mtu,
6758     + bool confirm_neigh)
6759     {
6760     }
6761    
6762     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
6763     index 67b2dc7a1727..eda64871f983 100644
6764     --- a/net/ipv4/tcp_ipv4.c
6765     +++ b/net/ipv4/tcp_ipv4.c
6766     @@ -2149,13 +2149,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
6767     struct tcp_iter_state *st = seq->private;
6768     struct net *net = seq_file_net(seq);
6769     struct inet_listen_hashbucket *ilb;
6770     + struct hlist_nulls_node *node;
6771     struct sock *sk = cur;
6772    
6773     if (!sk) {
6774     get_head:
6775     ilb = &tcp_hashinfo.listening_hash[st->bucket];
6776     spin_lock(&ilb->lock);
6777     - sk = sk_head(&ilb->head);
6778     + sk = sk_nulls_head(&ilb->nulls_head);
6779     st->offset = 0;
6780     goto get_sk;
6781     }
6782     @@ -2163,9 +2164,9 @@ get_head:
6783     ++st->num;
6784     ++st->offset;
6785    
6786     - sk = sk_next(sk);
6787     + sk = sk_nulls_next(sk);
6788     get_sk:
6789     - sk_for_each_from(sk) {
6790     + sk_nulls_for_each_from(sk, node) {
6791     if (!net_eq(sock_net(sk), net))
6792     continue;
6793     if (sk->sk_family == afinfo->family)
6794     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6795     index 762edd800d78..0269584e9cf7 100644
6796     --- a/net/ipv4/tcp_output.c
6797     +++ b/net/ipv4/tcp_output.c
6798     @@ -72,6 +72,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
6799     __skb_unlink(skb, &sk->sk_write_queue);
6800     tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
6801    
6802     + if (tp->highest_sack == NULL)
6803     + tp->highest_sack = skb;
6804     +
6805     tp->packets_out += tcp_skb_pcount(skb);
6806     if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
6807     tcp_rearm_rto(sk);
6808     @@ -2438,6 +2441,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6809     if (tcp_small_queue_check(sk, skb, 0))
6810     break;
6811    
6812     + /* Argh, we hit an empty skb(), presumably a thread
6813     + * is sleeping in sendmsg()/sk_stream_wait_memory().
6814     + * We do not want to send a pure-ack packet and have
6815     + * a strange looking rtx queue with empty packet(s).
6816     + */
6817     + if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
6818     + break;
6819     +
6820     if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
6821     break;
6822    
6823     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
6824     index 447defbfccdd..7aa4e77161f6 100644
6825     --- a/net/ipv4/udp.c
6826     +++ b/net/ipv4/udp.c
6827     @@ -1475,7 +1475,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
6828     * queue contains some other skb
6829     */
6830     rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
6831     - if (rmem > (size + sk->sk_rcvbuf))
6832     + if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
6833     goto uncharge_drop;
6834    
6835     spin_lock(&list->lock);
6836     diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
6837     index 35b84b52b702..9ebd54752e03 100644
6838     --- a/net/ipv4/xfrm4_policy.c
6839     +++ b/net/ipv4/xfrm4_policy.c
6840     @@ -100,12 +100,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
6841     }
6842    
6843     static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
6844     - struct sk_buff *skb, u32 mtu)
6845     + struct sk_buff *skb, u32 mtu,
6846     + bool confirm_neigh)
6847     {
6848     struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
6849     struct dst_entry *path = xdst->route;
6850    
6851     - path->ops->update_pmtu(path, sk, skb, mtu);
6852     + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
6853     }
6854    
6855     static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
6856     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
6857     index 34ccef18b40e..f9b5690e94fd 100644
6858     --- a/net/ipv6/addrconf.c
6859     +++ b/net/ipv6/addrconf.c
6860     @@ -5231,16 +5231,16 @@ static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
6861     return -EINVAL;
6862     }
6863    
6864     + if (!netlink_strict_get_check(skb))
6865     + return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
6866     + ifa_ipv6_policy, extack);
6867     +
6868     ifm = nlmsg_data(nlh);
6869     if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
6870     NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
6871     return -EINVAL;
6872     }
6873    
6874     - if (!netlink_strict_get_check(skb))
6875     - return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
6876     - ifa_ipv6_policy, extack);
6877     -
6878     err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
6879     ifa_ipv6_policy, extack);
6880     if (err)
6881     diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
6882     index fe9cb8d1adca..e315526fa244 100644
6883     --- a/net/ipv6/inet6_connection_sock.c
6884     +++ b/net/ipv6/inet6_connection_sock.c
6885     @@ -146,7 +146,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
6886    
6887     if (IS_ERR(dst))
6888     return NULL;
6889     - dst->ops->update_pmtu(dst, sk, NULL, mtu);
6890     + dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
6891    
6892     dst = inet6_csk_route_socket(sk, &fl6);
6893     return IS_ERR(dst) ? NULL : dst;
6894     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
6895     index 923034c52ce4..189de56f5e36 100644
6896     --- a/net/ipv6/ip6_gre.c
6897     +++ b/net/ipv6/ip6_gre.c
6898     @@ -1040,7 +1040,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
6899    
6900     /* TooBig packet may have updated dst->dev's mtu */
6901     if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
6902     - dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
6903     + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
6904    
6905     err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
6906     NEXTHDR_GRE);
6907     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
6908     index 754a484d35df..2f376dbc37d5 100644
6909     --- a/net/ipv6/ip6_tunnel.c
6910     +++ b/net/ipv6/ip6_tunnel.c
6911     @@ -640,7 +640,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
6912     if (rel_info > dst_mtu(skb_dst(skb2)))
6913     goto out;
6914    
6915     - skb_dst_update_pmtu(skb2, rel_info);
6916     + skb_dst_update_pmtu_no_confirm(skb2, rel_info);
6917     }
6918    
6919     icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
6920     @@ -1132,7 +1132,7 @@ route_lookup:
6921     mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
6922     IPV6_MIN_MTU : IPV4_MIN_MTU);
6923    
6924     - skb_dst_update_pmtu(skb, mtu);
6925     + skb_dst_update_pmtu_no_confirm(skb, mtu);
6926     if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
6927     *pmtu = mtu;
6928     err = -EMSGSIZE;
6929     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
6930     index 024db17386d2..6f08b760c2a7 100644
6931     --- a/net/ipv6/ip6_vti.c
6932     +++ b/net/ipv6/ip6_vti.c
6933     @@ -479,7 +479,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
6934    
6935     mtu = dst_mtu(dst);
6936     if (skb->len > mtu) {
6937     - skb_dst_update_pmtu(skb, mtu);
6938     + skb_dst_update_pmtu_no_confirm(skb, mtu);
6939    
6940     if (skb->protocol == htons(ETH_P_IPV6)) {
6941     if (mtu < IPV6_MIN_MTU)
6942     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
6943     index 3f83ea851ebf..e4ed9c7b43b0 100644
6944     --- a/net/ipv6/route.c
6945     +++ b/net/ipv6/route.c
6946     @@ -95,7 +95,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
6947     static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
6948     static void ip6_link_failure(struct sk_buff *skb);
6949     static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
6950     - struct sk_buff *skb, u32 mtu);
6951     + struct sk_buff *skb, u32 mtu,
6952     + bool confirm_neigh);
6953     static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
6954     struct sk_buff *skb);
6955     static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
6956     @@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
6957     }
6958    
6959     static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
6960     - struct sk_buff *skb, u32 mtu)
6961     + struct sk_buff *skb, u32 mtu,
6962     + bool confirm_neigh)
6963     {
6964     }
6965    
6966     @@ -2695,7 +2697,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
6967     }
6968    
6969     static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
6970     - const struct ipv6hdr *iph, u32 mtu)
6971     + const struct ipv6hdr *iph, u32 mtu,
6972     + bool confirm_neigh)
6973     {
6974     const struct in6_addr *daddr, *saddr;
6975     struct rt6_info *rt6 = (struct rt6_info *)dst;
6976     @@ -2713,7 +2716,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
6977     daddr = NULL;
6978     saddr = NULL;
6979     }
6980     - dst_confirm_neigh(dst, daddr);
6981     +
6982     + if (confirm_neigh)
6983     + dst_confirm_neigh(dst, daddr);
6984     +
6985     mtu = max_t(u32, mtu, IPV6_MIN_MTU);
6986     if (mtu >= dst_mtu(dst))
6987     return;
6988     @@ -2767,9 +2773,11 @@ out_unlock:
6989     }
6990    
6991     static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
6992     - struct sk_buff *skb, u32 mtu)
6993     + struct sk_buff *skb, u32 mtu,
6994     + bool confirm_neigh)
6995     {
6996     - __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
6997     + __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
6998     + confirm_neigh);
6999     }
7000    
7001     void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
7002     @@ -2788,7 +2796,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
7003    
7004     dst = ip6_route_output(net, NULL, &fl6);
7005     if (!dst->error)
7006     - __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
7007     + __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
7008     dst_release(dst);
7009     }
7010     EXPORT_SYMBOL_GPL(ip6_update_pmtu);
7011     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
7012     index b2ccbc473127..98954830c40b 100644
7013     --- a/net/ipv6/sit.c
7014     +++ b/net/ipv6/sit.c
7015     @@ -944,7 +944,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
7016     }
7017    
7018     if (tunnel->parms.iph.daddr)
7019     - skb_dst_update_pmtu(skb, mtu);
7020     + skb_dst_update_pmtu_no_confirm(skb, mtu);
7021    
7022     if (skb->len > mtu && !skb_is_gso(skb)) {
7023     icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
7024     diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
7025     index 699e0730ce8e..af7a4b8b1e9c 100644
7026     --- a/net/ipv6/xfrm6_policy.c
7027     +++ b/net/ipv6/xfrm6_policy.c
7028     @@ -98,12 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
7029     }
7030    
7031     static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
7032     - struct sk_buff *skb, u32 mtu)
7033     + struct sk_buff *skb, u32 mtu,
7034     + bool confirm_neigh)
7035     {
7036     struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
7037     struct dst_entry *path = xdst->route;
7038    
7039     - path->ops->update_pmtu(path, sk, skb, mtu);
7040     + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
7041     }
7042    
7043     static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
7044     diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
7045     index 888d3068a492..c62a131a6094 100644
7046     --- a/net/netfilter/ipvs/ip_vs_xmit.c
7047     +++ b/net/netfilter/ipvs/ip_vs_xmit.c
7048     @@ -208,7 +208,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
7049     struct rtable *ort = skb_rtable(skb);
7050    
7051     if (!skb->dev && sk && sk_fullsock(sk))
7052     - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
7053     + ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
7054     }
7055    
7056     static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
7057     diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
7058     index 08923b21e566..f0df0d90b8bd 100644
7059     --- a/net/sched/act_mirred.c
7060     +++ b/net/sched/act_mirred.c
7061     @@ -219,8 +219,10 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
7062     bool use_reinsert;
7063     bool want_ingress;
7064     bool is_redirect;
7065     + bool expects_nh;
7066     int m_eaction;
7067     int mac_len;
7068     + bool at_nh;
7069    
7070     rec_level = __this_cpu_inc_return(mirred_rec_level);
7071     if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
7072     @@ -261,19 +263,19 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
7073     goto out;
7074     }
7075    
7076     - /* If action's target direction differs than filter's direction,
7077     - * and devices expect a mac header on xmit, then mac push/pull is
7078     - * needed.
7079     - */
7080     want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
7081     - if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
7082     - if (!skb_at_tc_ingress(skb)) {
7083     - /* caught at egress, act ingress: pull mac */
7084     - mac_len = skb_network_header(skb) - skb_mac_header(skb);
7085     +
7086     + expects_nh = want_ingress || !m_mac_header_xmit;
7087     + at_nh = skb->data == skb_network_header(skb);
7088     + if (at_nh != expects_nh) {
7089     + mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
7090     + skb_network_header(skb) - skb_mac_header(skb);
7091     + if (expects_nh) {
7092     + /* target device/action expect data at nh */
7093     skb_pull_rcsum(skb2, mac_len);
7094     } else {
7095     - /* caught at ingress, act egress: push mac */
7096     - skb_push_rcsum(skb2, skb->mac_len);
7097     + /* target device/action expect data at mac */
7098     + skb_push_rcsum(skb2, mac_len);
7099     }
7100     }
7101    
7102     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
7103     index 6a0eacafdb19..76e0d122616a 100644
7104     --- a/net/sched/cls_api.c
7105     +++ b/net/sched/cls_api.c
7106     @@ -308,33 +308,12 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
7107     tcf_proto_destroy(tp, rtnl_held, true, extack);
7108     }
7109    
7110     -static int walker_check_empty(struct tcf_proto *tp, void *fh,
7111     - struct tcf_walker *arg)
7112     +static bool tcf_proto_check_delete(struct tcf_proto *tp)
7113     {
7114     - if (fh) {
7115     - arg->nonempty = true;
7116     - return -1;
7117     - }
7118     - return 0;
7119     -}
7120     -
7121     -static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
7122     -{
7123     - struct tcf_walker walker = { .fn = walker_check_empty, };
7124     -
7125     - if (tp->ops->walk) {
7126     - tp->ops->walk(tp, &walker, rtnl_held);
7127     - return !walker.nonempty;
7128     - }
7129     - return true;
7130     -}
7131     + if (tp->ops->delete_empty)
7132     + return tp->ops->delete_empty(tp);
7133    
7134     -static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
7135     -{
7136     - spin_lock(&tp->lock);
7137     - if (tcf_proto_is_empty(tp, rtnl_held))
7138     - tp->deleting = true;
7139     - spin_unlock(&tp->lock);
7140     + tp->deleting = true;
7141     return tp->deleting;
7142     }
7143    
7144     @@ -1751,7 +1730,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
7145     * concurrently.
7146     * Mark tp for deletion if it is empty.
7147     */
7148     - if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
7149     + if (!tp_iter || !tcf_proto_check_delete(tp)) {
7150     mutex_unlock(&chain->filter_chain_lock);
7151     return;
7152     }
7153     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
7154     index 4ac110bf19c5..5cf8163710c8 100644
7155     --- a/net/sched/cls_flower.c
7156     +++ b/net/sched/cls_flower.c
7157     @@ -2519,6 +2519,17 @@ static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
7158     f->res.class = cl;
7159     }
7160    
7161     +static bool fl_delete_empty(struct tcf_proto *tp)
7162     +{
7163     + struct cls_fl_head *head = fl_head_dereference(tp);
7164     +
7165     + spin_lock(&tp->lock);
7166     + tp->deleting = idr_is_empty(&head->handle_idr);
7167     + spin_unlock(&tp->lock);
7168     +
7169     + return tp->deleting;
7170     +}
7171     +
7172     static struct tcf_proto_ops cls_fl_ops __read_mostly = {
7173     .kind = "flower",
7174     .classify = fl_classify,
7175     @@ -2528,6 +2539,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
7176     .put = fl_put,
7177     .change = fl_change,
7178     .delete = fl_delete,
7179     + .delete_empty = fl_delete_empty,
7180     .walk = fl_walk,
7181     .reoffload = fl_reoffload,
7182     .hw_add = fl_hw_add,
7183     diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
7184     index 98dd87ce1510..78ecdf146882 100644
7185     --- a/net/sched/sch_fq.c
7186     +++ b/net/sched/sch_fq.c
7187     @@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
7188     f->socket_hash != sk->sk_hash)) {
7189     f->credit = q->initial_quantum;
7190     f->socket_hash = sk->sk_hash;
7191     + if (q->rate_enable)
7192     + smp_store_release(&sk->sk_pacing_status,
7193     + SK_PACING_FQ);
7194     if (fq_flow_is_throttled(f))
7195     fq_flow_unset_throttled(q, f);
7196     f->time_next_packet = 0ULL;
7197     @@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
7198    
7199     fq_flow_set_detached(f);
7200     f->sk = sk;
7201     - if (skb->sk == sk)
7202     + if (skb->sk == sk) {
7203     f->socket_hash = sk->sk_hash;
7204     + if (q->rate_enable)
7205     + smp_store_release(&sk->sk_pacing_status,
7206     + SK_PACING_FQ);
7207     + }
7208     f->credit = q->initial_quantum;
7209    
7210     rb_link_node(&f->fq_node, parent, p);
7211     @@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
7212     f->qlen++;
7213     qdisc_qstats_backlog_inc(sch, skb);
7214     if (fq_flow_is_detached(f)) {
7215     - struct sock *sk = skb->sk;
7216     -
7217     fq_flow_add_tail(&q->new_flows, f);
7218     if (time_after(jiffies, f->age + q->flow_refill_delay))
7219     f->credit = max_t(u32, f->credit, q->quantum);
7220     - if (sk && q->rate_enable) {
7221     - if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
7222     - SK_PACING_FQ))
7223     - smp_store_release(&sk->sk_pacing_status,
7224     - SK_PACING_FQ);
7225     - }
7226     q->inactive_flows--;
7227     }
7228    
7229     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
7230     index 6a30392068a0..c1a100d2fed3 100644
7231     --- a/net/sctp/stream.c
7232     +++ b/net/sctp/stream.c
7233     @@ -84,10 +84,8 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
7234     return 0;
7235    
7236     ret = genradix_prealloc(&stream->out, outcnt, gfp);
7237     - if (ret) {
7238     - genradix_free(&stream->out);
7239     + if (ret)
7240     return ret;
7241     - }
7242    
7243     stream->outcnt = outcnt;
7244     return 0;
7245     @@ -102,10 +100,8 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
7246     return 0;
7247    
7248     ret = genradix_prealloc(&stream->in, incnt, gfp);
7249     - if (ret) {
7250     - genradix_free(&stream->in);
7251     + if (ret)
7252     return ret;
7253     - }
7254    
7255     stream->incnt = incnt;
7256     return 0;
7257     @@ -123,7 +119,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
7258     * a new one with new outcnt to save memory if needed.
7259     */
7260     if (outcnt == stream->outcnt)
7261     - goto in;
7262     + goto handle_in;
7263    
7264     /* Filter out chunks queued on streams that won't exist anymore */
7265     sched->unsched_all(stream);
7266     @@ -132,24 +128,28 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
7267    
7268     ret = sctp_stream_alloc_out(stream, outcnt, gfp);
7269     if (ret)
7270     - goto out;
7271     + goto out_err;
7272    
7273     for (i = 0; i < stream->outcnt; i++)
7274     SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
7275    
7276     -in:
7277     +handle_in:
7278     sctp_stream_interleave_init(stream);
7279     if (!incnt)
7280     goto out;
7281    
7282     ret = sctp_stream_alloc_in(stream, incnt, gfp);
7283     - if (ret) {
7284     - sched->free(stream);
7285     - genradix_free(&stream->out);
7286     - stream->outcnt = 0;
7287     - goto out;
7288     - }
7289     + if (ret)
7290     + goto in_err;
7291     +
7292     + goto out;
7293    
7294     +in_err:
7295     + sched->free(stream);
7296     + genradix_free(&stream->in);
7297     +out_err:
7298     + genradix_free(&stream->out);
7299     + stream->outcnt = 0;
7300     out:
7301     return ret;
7302     }
7303     diff --git a/net/sctp/transport.c b/net/sctp/transport.c
7304     index 7235a6032671..3bbe1a58ec87 100644
7305     --- a/net/sctp/transport.c
7306     +++ b/net/sctp/transport.c
7307     @@ -263,7 +263,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
7308    
7309     pf->af->from_sk(&addr, sk);
7310     pf->to_sk_daddr(&t->ipaddr, sk);
7311     - dst->ops->update_pmtu(dst, sk, NULL, pmtu);
7312     + dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
7313     pf->to_sk_daddr(&addr, sk);
7314    
7315     dst = sctp_transport_dst_check(t);
7316     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
7317     index 737b49909a7a..6a6d3b2aa5a9 100644
7318     --- a/net/smc/af_smc.c
7319     +++ b/net/smc/af_smc.c
7320     @@ -854,6 +854,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
7321     goto out;
7322    
7323     sock_hold(&smc->sk); /* sock put in passive closing */
7324     + if (smc->use_fallback)
7325     + goto out;
7326     if (flags & O_NONBLOCK) {
7327     if (schedule_work(&smc->connect_work))
7328     smc->connect_nonblock = 1;
7329     @@ -1716,8 +1718,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7330     sk->sk_err = smc->clcsock->sk->sk_err;
7331     sk->sk_error_report(sk);
7332     }
7333     - if (rc)
7334     - return rc;
7335    
7336     if (optlen < sizeof(int))
7337     return -EINVAL;
7338     @@ -1725,6 +1725,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7339     return -EFAULT;
7340    
7341     lock_sock(sk);
7342     + if (rc || smc->use_fallback)
7343     + goto out;
7344     switch (optname) {
7345     case TCP_ULP:
7346     case TCP_FASTOPEN:
7347     @@ -1736,15 +1738,14 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7348     smc_switch_to_fallback(smc);
7349     smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
7350     } else {
7351     - if (!smc->use_fallback)
7352     - rc = -EINVAL;
7353     + rc = -EINVAL;
7354     }
7355     break;
7356     case TCP_NODELAY:
7357     if (sk->sk_state != SMC_INIT &&
7358     sk->sk_state != SMC_LISTEN &&
7359     sk->sk_state != SMC_CLOSED) {
7360     - if (val && !smc->use_fallback)
7361     + if (val)
7362     mod_delayed_work(system_wq, &smc->conn.tx_work,
7363     0);
7364     }
7365     @@ -1753,7 +1754,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7366     if (sk->sk_state != SMC_INIT &&
7367     sk->sk_state != SMC_LISTEN &&
7368     sk->sk_state != SMC_CLOSED) {
7369     - if (!val && !smc->use_fallback)
7370     + if (!val)
7371     mod_delayed_work(system_wq, &smc->conn.tx_work,
7372     0);
7373     }
7374     @@ -1764,6 +1765,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
7375     default:
7376     break;
7377     }
7378     +out:
7379     release_sock(sk);
7380    
7381     return rc;
7382     diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
7383     index 82160808765c..b5a5b1c548c9 100644
7384     --- a/scripts/dtc/Makefile
7385     +++ b/scripts/dtc/Makefile
7386     @@ -11,7 +11,7 @@ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
7387     # Source files need to get at the userspace version of libfdt_env.h to compile
7388     HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt
7389    
7390     -ifeq ($(wildcard /usr/include/yaml.h),)
7391     +ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
7392     ifneq ($(CHECK_DTBS),)
7393     $(error dtc needs libyaml for DT schema validation support. \
7394     Install the necessary libyaml development package.)
7395     @@ -19,7 +19,7 @@ endif
7396     HOST_EXTRACFLAGS += -DNO_YAML
7397     else
7398     dtc-objs += yamltree.o
7399     -HOSTLDLIBS_dtc := -lyaml
7400     +HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs)
7401     endif
7402    
7403     # Generated files need one more search path to include headers in source tree
7404     diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
7405     index ae6504d07fd6..fb15f09e0e38 100644
7406     --- a/scripts/kallsyms.c
7407     +++ b/scripts/kallsyms.c
7408     @@ -489,6 +489,8 @@ static void build_initial_tok_table(void)
7409     table[pos] = table[i];
7410     learn_symbol(table[pos].sym, table[pos].len);
7411     pos++;
7412     + } else {
7413     + free(table[i].sym);
7414     }
7415     }
7416     table_cnt = pos;
7417     diff --git a/security/apparmor/label.c b/security/apparmor/label.c
7418     index 59f1cc2557a7..470693239e64 100644
7419     --- a/security/apparmor/label.c
7420     +++ b/security/apparmor/label.c
7421     @@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
7422     /* helper macro for snprint routines */
7423     #define update_for_len(total, len, size, str) \
7424     do { \
7425     + size_t ulen = len; \
7426     + \
7427     AA_BUG(len < 0); \
7428     - total += len; \
7429     - len = min(len, size); \
7430     - size -= len; \
7431     - str += len; \
7432     + total += ulen; \
7433     + ulen = min(ulen, size); \
7434     + size -= ulen; \
7435     + str += ulen; \
7436     } while (0)
7437    
7438     /**
7439     @@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
7440     struct aa_ns *prev_ns = NULL;
7441     struct label_it i;
7442     int count = 0, total = 0;
7443     - size_t len;
7444     + ssize_t len;
7445    
7446     AA_BUG(!str && size != 0);
7447     AA_BUG(!label);
7448     diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
7449     index e7832448d721..bf38fc1b59b2 100644
7450     --- a/security/tomoyo/realpath.c
7451     +++ b/security/tomoyo/realpath.c
7452     @@ -217,31 +217,6 @@ out:
7453     return ERR_PTR(-ENOMEM);
7454     }
7455    
7456     -/**
7457     - * tomoyo_get_socket_name - Get the name of a socket.
7458     - *
7459     - * @path: Pointer to "struct path".
7460     - * @buffer: Pointer to buffer to return value in.
7461     - * @buflen: Sizeof @buffer.
7462     - *
7463     - * Returns the buffer.
7464     - */
7465     -static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
7466     - const int buflen)
7467     -{
7468     - struct inode *inode = d_backing_inode(path->dentry);
7469     - struct socket *sock = inode ? SOCKET_I(inode) : NULL;
7470     - struct sock *sk = sock ? sock->sk : NULL;
7471     -
7472     - if (sk) {
7473     - snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
7474     - sk->sk_family, sk->sk_type, sk->sk_protocol);
7475     - } else {
7476     - snprintf(buffer, buflen, "socket:[unknown]");
7477     - }
7478     - return buffer;
7479     -}
7480     -
7481     /**
7482     * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
7483     *
7484     @@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
7485     break;
7486     /* To make sure that pos is '\0' terminated. */
7487     buf[buf_len - 1] = '\0';
7488     - /* Get better name for socket. */
7489     - if (sb->s_magic == SOCKFS_MAGIC) {
7490     - pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
7491     - goto encode;
7492     - }
7493     - /* For "pipe:[\$]". */
7494     + /* For "pipe:[\$]" and "socket:[\$]". */
7495     if (dentry->d_op && dentry->d_op->d_dname) {
7496     pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
7497     goto encode;
7498     diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
7499     index c37a78677955..265682296836 100644
7500     --- a/tools/perf/builtin-diff.c
7501     +++ b/tools/perf/builtin-diff.c
7502     @@ -575,8 +575,8 @@ static int64_t block_cycles_diff_cmp(struct hist_entry *left,
7503     if (!pairs_left && !pairs_right)
7504     return 0;
7505    
7506     - l = labs(left->diff.cycles);
7507     - r = labs(right->diff.cycles);
7508     + l = llabs(left->diff.cycles);
7509     + r = llabs(right->diff.cycles);
7510     return r - l;
7511     }
7512    
7513     diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
7514     index 6dba8b728d23..3983d6ccd14d 100644
7515     --- a/tools/perf/builtin-script.c
7516     +++ b/tools/perf/builtin-script.c
7517     @@ -448,7 +448,7 @@ static int perf_evsel__check_attr(struct evsel *evsel,
7518     "selected. Hence, no address to lookup the source line number.\n");
7519     return -EINVAL;
7520     }
7521     - if (PRINT_FIELD(BRSTACKINSN) &&
7522     + if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
7523     !(perf_evlist__combined_branch_type(session->evlist) &
7524     PERF_SAMPLE_BRANCH_ANY)) {
7525     pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
7526     diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
7527     index 47fe34e5f7d5..ec7640cc4c91 100644
7528     --- a/tools/perf/util/perf_regs.h
7529     +++ b/tools/perf/util/perf_regs.h
7530     @@ -41,7 +41,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
7531    
7532     static inline const char *perf_reg_name(int id __maybe_unused)
7533     {
7534     - return NULL;
7535     + return "unknown";
7536     }
7537    
7538     static inline int perf_reg_value(u64 *valp __maybe_unused,
7539     diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
7540     index 2a9890c8395a..21fcfe621d3a 100644
7541     --- a/tools/power/x86/intel-speed-select/isst-config.c
7542     +++ b/tools/power/x86/intel-speed-select/isst-config.c
7543     @@ -169,7 +169,7 @@ int get_topo_max_cpus(void)
7544     static void set_cpu_online_offline(int cpu, int state)
7545     {
7546     char buffer[128];
7547     - int fd;
7548     + int fd, ret;
7549    
7550     snprintf(buffer, sizeof(buffer),
7551     "/sys/devices/system/cpu/cpu%d/online", cpu);
7552     @@ -179,9 +179,12 @@ static void set_cpu_online_offline(int cpu, int state)
7553     err(-1, "%s open failed", buffer);
7554    
7555     if (state)
7556     - write(fd, "1\n", 2);
7557     + ret = write(fd, "1\n", 2);
7558     else
7559     - write(fd, "0\n", 2);
7560     + ret = write(fd, "0\n", 2);
7561     +
7562     + if (ret == -1)
7563     + perror("Online/Offline: Operation failed\n");
7564    
7565     close(fd);
7566     }
7567     diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
7568     index 6dee5332c9d3..fde3f9cefc6d 100644
7569     --- a/tools/power/x86/intel-speed-select/isst-core.c
7570     +++ b/tools/power/x86/intel-speed-select/isst-core.c
7571     @@ -553,7 +553,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
7572     i);
7573     ctdp_level = &pkg_dev->ctdp_level[i];
7574    
7575     - ctdp_level->processed = 1;
7576     ctdp_level->level = i;
7577     ctdp_level->control_cpu = cpu;
7578     ctdp_level->pkg_id = get_physical_package_id(cpu);
7579     @@ -561,7 +560,10 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
7580    
7581     ret = isst_get_ctdp_control(cpu, i, ctdp_level);
7582     if (ret)
7583     - return ret;
7584     + continue;
7585     +
7586     + pkg_dev->processed = 1;
7587     + ctdp_level->processed = 1;
7588    
7589     ret = isst_get_tdp_info(cpu, i, ctdp_level);
7590     if (ret)
7591     @@ -614,8 +616,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
7592     }
7593     }
7594    
7595     - pkg_dev->processed = 1;
7596     -
7597     return 0;
7598     }
7599    
7600     diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
7601     index 40346d534f78..b11575c3e886 100644
7602     --- a/tools/power/x86/intel-speed-select/isst-display.c
7603     +++ b/tools/power/x86/intel-speed-select/isst-display.c
7604     @@ -314,7 +314,8 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
7605     char value[256];
7606     int i, base_level = 1;
7607    
7608     - print_package_info(cpu, outf);
7609     + if (pkg_dev->processed)
7610     + print_package_info(cpu, outf);
7611    
7612     for (i = 0; i <= pkg_dev->levels; ++i) {
7613     struct isst_pkg_ctdp_level_info *ctdp_level;
7614     diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
7615     index 25e23e73c72e..2ecfa1158e2b 100644
7616     --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
7617     +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
7618     @@ -73,7 +73,7 @@ trans:
7619     [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
7620     [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
7621     [tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
7622     - : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
7623     + : "memory", "r0", "r3", "r4", "r5", "r6", "lr"
7624     );
7625    
7626     /* TM failed, analyse */
7627     diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
7628     index f603fe5a445b..6f7fb51f0809 100644
7629     --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
7630     +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
7631     @@ -74,8 +74,8 @@ trans:
7632     "3: ;"
7633     : [res] "=r" (result), [texasr] "=r" (texasr)
7634     : [sprn_texasr] "i" (SPRN_TEXASR)
7635     - : "memory", "r0", "r1", "r3", "r4",
7636     - "r7", "r8", "r9", "r10", "r11"
7637     + : "memory", "r0", "r3", "r4",
7638     + "r7", "r8", "r9", "r10", "r11", "lr"
7639     );
7640    
7641     if (result) {
7642     diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
7643     index e0d37f07bdeb..46ef378a15ec 100644
7644     --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
7645     +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
7646     @@ -62,7 +62,7 @@ trans:
7647     [sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
7648     [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
7649     [dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1])
7650     - : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
7651     + : "memory", "r0", "r3", "r4", "r5", "r6"
7652     );
7653    
7654     /* TM failed, analyse */
7655     diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
7656     index 8027457b97b7..70ca01234f79 100644
7657     --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
7658     +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
7659     @@ -62,8 +62,8 @@ trans:
7660     "3: ;"
7661     : [res] "=r" (result), [texasr] "=r" (texasr)
7662     : [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1])
7663     - : "memory", "r0", "r1", "r3", "r4",
7664     - "r7", "r8", "r9", "r10", "r11"
7665     + : "memory", "r0", "r3", "r4",
7666     + "r7", "r8", "r9", "r10", "r11", "lr"
7667     );
7668    
7669     if (result) {
7670     diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
7671     index 56fbf9f6bbf3..07c388147b75 100644
7672     --- a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
7673     +++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
7674     @@ -10,10 +10,12 @@
7675     */
7676    
7677     #define _GNU_SOURCE
7678     +#include <stdio.h>
7679     #include <stdlib.h>
7680     #include <signal.h>
7681    
7682     #include "utils.h"
7683     +#include "tm.h"
7684    
7685     void trap_signal_handler(int signo, siginfo_t *si, void *uc)
7686     {
7687     @@ -29,6 +31,8 @@ int tm_signal_sigreturn_nt(void)
7688     {
7689     struct sigaction trap_sa;
7690    
7691     + SKIP_IF(!have_htm());
7692     +
7693     trap_sa.sa_flags = SA_SIGINFO;
7694     trap_sa.sa_sigaction = trap_signal_handler;
7695    
7696     diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
7697     index 1c0d76cb5adf..93b90a9b1eeb 100644
7698     --- a/tools/testing/selftests/vm/config
7699     +++ b/tools/testing/selftests/vm/config
7700     @@ -1,2 +1,3 @@
7701     CONFIG_SYSVIPC=y
7702     CONFIG_USERFAULTFD=y
7703     +CONFIG_TEST_VMALLOC=m