Magellan Linux

Annotation of /trunk/kernel-magellan/patches-5.3/0104-5.3.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3458 - (hide annotations) (download)
Thu Oct 17 06:32:39 2019 UTC (4 years, 7 months ago) by niro
File size: 206694 byte(s)
-linux-5.3.5
1 niro 3458 diff --git a/Makefile b/Makefile
2     index fa11c1d89acf..bf03c110ed9b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 3
9     -SUBLEVEL = 4
10     +SUBLEVEL = 5
11     EXTRAVERSION =
12     NAME = Bobtail Squid
13    
14     @@ -751,6 +751,11 @@ else
15     # These warnings generated too much noise in a regular build.
16     # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
17     KBUILD_CFLAGS += -Wno-unused-but-set-variable
18     +
19     +# Warn about unmarked fall-throughs in switch statement.
20     +# Disabled for clang while comment to attribute conversion happens and
21     +# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
22     +KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
23     endif
24    
25     KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
26     @@ -845,9 +850,6 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
27     # warn about C99 declaration after statement
28     KBUILD_CFLAGS += -Wdeclaration-after-statement
29    
30     -# Warn about unmarked fall-throughs in switch statement.
31     -KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
32     -
33     # Variable Length Arrays (VLAs) should not be used anywhere in the kernel
34     KBUILD_CFLAGS += -Wvla
35    
36     diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
37     index 24360211534a..b587a3b3939a 100644
38     --- a/arch/arm/Kconfig
39     +++ b/arch/arm/Kconfig
40     @@ -82,7 +82,7 @@ config ARM
41     select HAVE_FAST_GUP if ARM_LPAE
42     select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
43     select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
44     - select HAVE_FUNCTION_TRACER if !XIP_KERNEL
45     + select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
46     select HAVE_GCC_PLUGINS
47     select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
48     select HAVE_IDE if PCI || ISA || PCMCIA
49     @@ -1572,8 +1572,9 @@ config ARM_PATCH_IDIV
50     code to do integer division.
51    
52     config AEABI
53     - bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
54     - default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
55     + bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
56     + !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
57     + default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
58     help
59     This option allows for the kernel to be compiled using the latest
60     ARM ABI (aka EABI). This is only useful if you are using a user
61     diff --git a/arch/arm/Makefile b/arch/arm/Makefile
62     index c3624ca6c0bc..9b3d4deca9e4 100644
63     --- a/arch/arm/Makefile
64     +++ b/arch/arm/Makefile
65     @@ -112,6 +112,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
66     CFLAGS_ABI +=-funwind-tables
67     endif
68    
69     +ifeq ($(CONFIG_CC_IS_CLANG),y)
70     +CFLAGS_ABI += -meabi gnu
71     +endif
72     +
73     # Accept old syntax despite ".syntax unified"
74     AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
75    
76     diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
77     index bfaa2de63a10..e2030ba16512 100644
78     --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
79     +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
80     @@ -72,7 +72,6 @@
81     reg = <0>;
82     /* 50 ns min period = 20 MHz */
83     spi-max-frequency = <20000000>;
84     - spi-cpol; /* Clock active low */
85     vcc-supply = <&vdisp>;
86     iovcc-supply = <&vdisp>;
87     vci-supply = <&vdisp>;
88     diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
89     index 890eeaac3cbb..bd0f4821f7e1 100644
90     --- a/arch/arm/mm/fault.c
91     +++ b/arch/arm/mm/fault.c
92     @@ -191,7 +191,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
93     {
94     unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
95    
96     - if (fsr & FSR_WRITE)
97     + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
98     mask = VM_WRITE;
99     if (fsr & FSR_LNX_PF)
100     mask = VM_EXEC;
101     @@ -262,7 +262,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
102    
103     if (user_mode(regs))
104     flags |= FAULT_FLAG_USER;
105     - if (fsr & FSR_WRITE)
106     + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
107     flags |= FAULT_FLAG_WRITE;
108    
109     /*
110     diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
111     index c063708fa503..9ecc2097a87a 100644
112     --- a/arch/arm/mm/fault.h
113     +++ b/arch/arm/mm/fault.h
114     @@ -6,6 +6,7 @@
115     * Fault status register encodings. We steal bit 31 for our own purposes.
116     */
117     #define FSR_LNX_PF (1 << 31)
118     +#define FSR_CM (1 << 13)
119     #define FSR_WRITE (1 << 11)
120     #define FSR_FS4 (1 << 10)
121     #define FSR_FS3_0 (15)
122     diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
123     index f866870db749..0b94b674aa91 100644
124     --- a/arch/arm/mm/mmap.c
125     +++ b/arch/arm/mm/mmap.c
126     @@ -18,8 +18,9 @@
127     (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
128    
129     /* gap between mmap and stack */
130     -#define MIN_GAP (128*1024*1024UL)
131     -#define MAX_GAP ((TASK_SIZE)/6*5)
132     +#define MIN_GAP (128*1024*1024UL)
133     +#define MAX_GAP ((STACK_TOP)/6*5)
134     +#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
135    
136     static int mmap_is_legacy(struct rlimit *rlim_stack)
137     {
138     @@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
139     static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
140     {
141     unsigned long gap = rlim_stack->rlim_cur;
142     + unsigned long pad = stack_guard_gap;
143     +
144     + /* Account for stack randomization if necessary */
145     + if (current->flags & PF_RANDOMIZE)
146     + pad += (STACK_RND_MASK << PAGE_SHIFT);
147     +
148     + /* Values close to RLIM_INFINITY can overflow. */
149     + if (gap + pad > gap)
150     + gap += pad;
151    
152     if (gap < MIN_GAP)
153     gap = MIN_GAP;
154     else if (gap > MAX_GAP)
155     gap = MAX_GAP;
156    
157     - return PAGE_ALIGN(TASK_SIZE - gap - rnd);
158     + return PAGE_ALIGN(STACK_TOP - gap - rnd);
159     }
160    
161     /*
162     diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
163     index d9a0038774a6..d5e0b908f0ba 100644
164     --- a/arch/arm/mm/mmu.c
165     +++ b/arch/arm/mm/mmu.c
166     @@ -1177,6 +1177,22 @@ void __init adjust_lowmem_bounds(void)
167     */
168     vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
169    
170     + /*
171     + * The first usable region must be PMD aligned. Mark its start
172     + * as MEMBLOCK_NOMAP if it isn't
173     + */
174     + for_each_memblock(memory, reg) {
175     + if (!memblock_is_nomap(reg)) {
176     + if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
177     + phys_addr_t len;
178     +
179     + len = round_up(reg->base, PMD_SIZE) - reg->base;
180     + memblock_mark_nomap(reg->base, len);
181     + }
182     + break;
183     + }
184     + }
185     +
186     for_each_memblock(memory, reg) {
187     phys_addr_t block_start = reg->base;
188     phys_addr_t block_end = reg->base + reg->size;
189     diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
190     index 7a299a20f6dc..7a8b8bc69e8d 100644
191     --- a/arch/arm64/include/asm/cmpxchg.h
192     +++ b/arch/arm64/include/asm/cmpxchg.h
193     @@ -63,7 +63,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
194     #undef __XCHG_CASE
195    
196     #define __XCHG_GEN(sfx) \
197     -static inline unsigned long __xchg##sfx(unsigned long x, \
198     +static __always_inline unsigned long __xchg##sfx(unsigned long x, \
199     volatile void *ptr, \
200     int size) \
201     { \
202     @@ -105,7 +105,7 @@ __XCHG_GEN(_mb)
203     #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
204    
205     #define __CMPXCHG_GEN(sfx) \
206     -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
207     +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
208     unsigned long old, \
209     unsigned long new, \
210     int size) \
211     @@ -212,7 +212,7 @@ __CMPWAIT_CASE( , , 64);
212     #undef __CMPWAIT_CASE
213    
214     #define __CMPWAIT_GEN(sfx) \
215     -static inline void __cmpwait##sfx(volatile void *ptr, \
216     +static __always_inline void __cmpwait##sfx(volatile void *ptr, \
217     unsigned long val, \
218     int size) \
219     { \
220     diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
221     index b050641b5139..8dac7110f0cb 100644
222     --- a/arch/arm64/mm/mmap.c
223     +++ b/arch/arm64/mm/mmap.c
224     @@ -54,7 +54,11 @@ unsigned long arch_mmap_rnd(void)
225     static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
226     {
227     unsigned long gap = rlim_stack->rlim_cur;
228     - unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
229     + unsigned long pad = stack_guard_gap;
230     +
231     + /* Account for stack randomization if necessary */
232     + if (current->flags & PF_RANDOMIZE)
233     + pad += (STACK_RND_MASK << PAGE_SHIFT);
234    
235     /* Values close to RLIM_INFINITY can overflow. */
236     if (gap + pad > gap)
237     diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
238     index 9a82dd11c0e9..bb8658cc7f12 100644
239     --- a/arch/mips/include/asm/atomic.h
240     +++ b/arch/mips/include/asm/atomic.h
241     @@ -68,7 +68,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
242     "\t" __scbeqz " %0, 1b \n" \
243     " .set pop \n" \
244     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
245     - : "Ir" (i)); \
246     + : "Ir" (i) : __LLSC_CLOBBER); \
247     } else { \
248     unsigned long flags; \
249     \
250     @@ -98,7 +98,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
251     " .set pop \n" \
252     : "=&r" (result), "=&r" (temp), \
253     "+" GCC_OFF_SMALL_ASM() (v->counter) \
254     - : "Ir" (i)); \
255     + : "Ir" (i) : __LLSC_CLOBBER); \
256     } else { \
257     unsigned long flags; \
258     \
259     @@ -132,7 +132,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
260     " move %0, %1 \n" \
261     : "=&r" (result), "=&r" (temp), \
262     "+" GCC_OFF_SMALL_ASM() (v->counter) \
263     - : "Ir" (i)); \
264     + : "Ir" (i) : __LLSC_CLOBBER); \
265     } else { \
266     unsigned long flags; \
267     \
268     @@ -193,6 +193,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
269     if (kernel_uses_llsc) {
270     int temp;
271    
272     + loongson_llsc_mb();
273     __asm__ __volatile__(
274     " .set push \n"
275     " .set "MIPS_ISA_LEVEL" \n"
276     @@ -200,16 +201,16 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
277     " .set pop \n"
278     " subu %0, %1, %3 \n"
279     " move %1, %0 \n"
280     - " bltz %0, 1f \n"
281     + " bltz %0, 2f \n"
282     " .set push \n"
283     " .set "MIPS_ISA_LEVEL" \n"
284     " sc %1, %2 \n"
285     "\t" __scbeqz " %1, 1b \n"
286     - "1: \n"
287     + "2: \n"
288     " .set pop \n"
289     : "=&r" (result), "=&r" (temp),
290     "+" GCC_OFF_SMALL_ASM() (v->counter)
291     - : "Ir" (i));
292     + : "Ir" (i) : __LLSC_CLOBBER);
293     } else {
294     unsigned long flags;
295    
296     @@ -269,7 +270,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
297     "\t" __scbeqz " %0, 1b \n" \
298     " .set pop \n" \
299     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
300     - : "Ir" (i)); \
301     + : "Ir" (i) : __LLSC_CLOBBER); \
302     } else { \
303     unsigned long flags; \
304     \
305     @@ -299,7 +300,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
306     " .set pop \n" \
307     : "=&r" (result), "=&r" (temp), \
308     "+" GCC_OFF_SMALL_ASM() (v->counter) \
309     - : "Ir" (i)); \
310     + : "Ir" (i) : __LLSC_CLOBBER); \
311     } else { \
312     unsigned long flags; \
313     \
314     @@ -333,7 +334,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
315     " .set pop \n" \
316     : "=&r" (result), "=&r" (temp), \
317     "+" GCC_OFF_SMALL_ASM() (v->counter) \
318     - : "Ir" (i)); \
319     + : "Ir" (i) : __LLSC_CLOBBER); \
320     } else { \
321     unsigned long flags; \
322     \
323     diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
324     index b865e317a14f..9228f7386220 100644
325     --- a/arch/mips/include/asm/barrier.h
326     +++ b/arch/mips/include/asm/barrier.h
327     @@ -211,14 +211,22 @@
328     #define __smp_wmb() barrier()
329     #endif
330    
331     +/*
332     + * When LL/SC does imply order, it must also be a compiler barrier to avoid the
333     + * compiler from reordering where the CPU will not. When it does not imply
334     + * order, the compiler is also free to reorder across the LL/SC loop and
335     + * ordering will be done by smp_llsc_mb() and friends.
336     + */
337     #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
338     #define __WEAK_LLSC_MB " sync \n"
339     +#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
340     +#define __LLSC_CLOBBER
341     #else
342     #define __WEAK_LLSC_MB " \n"
343     +#define smp_llsc_mb() do { } while (0)
344     +#define __LLSC_CLOBBER "memory"
345     #endif
346    
347     -#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
348     -
349     #ifdef CONFIG_CPU_CAVIUM_OCTEON
350     #define smp_mb__before_llsc() smp_wmb()
351     #define __smp_mb__before_llsc() __smp_wmb()
352     @@ -238,36 +246,40 @@
353    
354     /*
355     * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
356     - * store or pref) in between an ll & sc can cause the sc instruction to
357     + * store or prefetch) in between an LL & SC can cause the SC instruction to
358     * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
359     * containing such sequences, this bug bites harder than we might otherwise
360     * expect due to reordering & speculation:
361     *
362     - * 1) A memory access appearing prior to the ll in program order may actually
363     - * be executed after the ll - this is the reordering case.
364     + * 1) A memory access appearing prior to the LL in program order may actually
365     + * be executed after the LL - this is the reordering case.
366     *
367     - * In order to avoid this we need to place a memory barrier (ie. a sync
368     - * instruction) prior to every ll instruction, in between it & any earlier
369     - * memory access instructions. Many of these cases are already covered by
370     - * smp_mb__before_llsc() but for the remaining cases, typically ones in
371     - * which multiple CPUs may operate on a memory location but ordering is not
372     - * usually guaranteed, we use loongson_llsc_mb() below.
373     + * In order to avoid this we need to place a memory barrier (ie. a SYNC
374     + * instruction) prior to every LL instruction, in between it and any earlier
375     + * memory access instructions.
376     *
377     * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
378     *
379     - * 2) If a conditional branch exists between an ll & sc with a target outside
380     - * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
381     + * 2) If a conditional branch exists between an LL & SC with a target outside
382     + * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
383     * or similar, then misprediction of the branch may allow speculative
384     - * execution of memory accesses from outside of the ll-sc loop.
385     + * execution of memory accesses from outside of the LL-SC loop.
386     *
387     - * In order to avoid this we need a memory barrier (ie. a sync instruction)
388     + * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
389     * at each affected branch target, for which we also use loongson_llsc_mb()
390     * defined below.
391     *
392     * This case affects all current Loongson 3 CPUs.
393     + *
394     + * The above described cases cause an error in the cache coherence protocol;
395     + * such that the Invalidate of a competing LL-SC goes 'missing' and SC
396     + * erroneously observes its core still has Exclusive state and lets the SC
397     + * proceed.
398     + *
399     + * Therefore the error only occurs on SMP systems.
400     */
401     #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
402     -#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
403     +#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory")
404     #else
405     #define loongson_llsc_mb() do { } while (0)
406     #endif
407     diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
408     index 9a466dde9b96..985d6a02f9ea 100644
409     --- a/arch/mips/include/asm/bitops.h
410     +++ b/arch/mips/include/asm/bitops.h
411     @@ -66,7 +66,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
412     " beqzl %0, 1b \n"
413     " .set pop \n"
414     : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
415     - : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
416     + : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
417     + : __LLSC_CLOBBER);
418     #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
419     } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
420     loongson_llsc_mb();
421     @@ -76,7 +77,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
422     " " __INS "%0, %3, %2, 1 \n"
423     " " __SC "%0, %1 \n"
424     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
425     - : "ir" (bit), "r" (~0));
426     + : "ir" (bit), "r" (~0)
427     + : __LLSC_CLOBBER);
428     } while (unlikely(!temp));
429     #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
430     } else if (kernel_uses_llsc) {
431     @@ -90,7 +92,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
432     " " __SC "%0, %1 \n"
433     " .set pop \n"
434     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
435     - : "ir" (1UL << bit));
436     + : "ir" (1UL << bit)
437     + : __LLSC_CLOBBER);
438     } while (unlikely(!temp));
439     } else
440     __mips_set_bit(nr, addr);
441     @@ -122,7 +125,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
442     " beqzl %0, 1b \n"
443     " .set pop \n"
444     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
445     - : "ir" (~(1UL << bit)));
446     + : "ir" (~(1UL << bit))
447     + : __LLSC_CLOBBER);
448     #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
449     } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
450     loongson_llsc_mb();
451     @@ -132,7 +136,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
452     " " __INS "%0, $0, %2, 1 \n"
453     " " __SC "%0, %1 \n"
454     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
455     - : "ir" (bit));
456     + : "ir" (bit)
457     + : __LLSC_CLOBBER);
458     } while (unlikely(!temp));
459     #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
460     } else if (kernel_uses_llsc) {
461     @@ -146,7 +151,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
462     " " __SC "%0, %1 \n"
463     " .set pop \n"
464     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
465     - : "ir" (~(1UL << bit)));
466     + : "ir" (~(1UL << bit))
467     + : __LLSC_CLOBBER);
468     } while (unlikely(!temp));
469     } else
470     __mips_clear_bit(nr, addr);
471     @@ -192,7 +198,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
472     " beqzl %0, 1b \n"
473     " .set pop \n"
474     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
475     - : "ir" (1UL << bit));
476     + : "ir" (1UL << bit)
477     + : __LLSC_CLOBBER);
478     } else if (kernel_uses_llsc) {
479     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
480     unsigned long temp;
481     @@ -207,7 +214,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
482     " " __SC "%0, %1 \n"
483     " .set pop \n"
484     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
485     - : "ir" (1UL << bit));
486     + : "ir" (1UL << bit)
487     + : __LLSC_CLOBBER);
488     } while (unlikely(!temp));
489     } else
490     __mips_change_bit(nr, addr);
491     @@ -244,11 +252,12 @@ static inline int test_and_set_bit(unsigned long nr,
492     " .set pop \n"
493     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
494     : "r" (1UL << bit)
495     - : "memory");
496     + : __LLSC_CLOBBER);
497     } else if (kernel_uses_llsc) {
498     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
499     unsigned long temp;
500    
501     + loongson_llsc_mb();
502     do {
503     __asm__ __volatile__(
504     " .set push \n"
505     @@ -259,7 +268,7 @@ static inline int test_and_set_bit(unsigned long nr,
506     " .set pop \n"
507     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
508     : "r" (1UL << bit)
509     - : "memory");
510     + : __LLSC_CLOBBER);
511     } while (unlikely(!res));
512    
513     res = temp & (1UL << bit);
514     @@ -300,11 +309,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
515     " .set pop \n"
516     : "=&r" (temp), "+m" (*m), "=&r" (res)
517     : "r" (1UL << bit)
518     - : "memory");
519     + : __LLSC_CLOBBER);
520     } else if (kernel_uses_llsc) {
521     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
522     unsigned long temp;
523    
524     + loongson_llsc_mb();
525     do {
526     __asm__ __volatile__(
527     " .set push \n"
528     @@ -315,7 +325,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
529     " .set pop \n"
530     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
531     : "r" (1UL << bit)
532     - : "memory");
533     + : __LLSC_CLOBBER);
534     } while (unlikely(!res));
535    
536     res = temp & (1UL << bit);
537     @@ -358,12 +368,13 @@ static inline int test_and_clear_bit(unsigned long nr,
538     " .set pop \n"
539     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
540     : "r" (1UL << bit)
541     - : "memory");
542     + : __LLSC_CLOBBER);
543     #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
544     } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
545     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
546     unsigned long temp;
547    
548     + loongson_llsc_mb();
549     do {
550     __asm__ __volatile__(
551     " " __LL "%0, %1 # test_and_clear_bit \n"
552     @@ -372,13 +383,14 @@ static inline int test_and_clear_bit(unsigned long nr,
553     " " __SC "%0, %1 \n"
554     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
555     : "ir" (bit)
556     - : "memory");
557     + : __LLSC_CLOBBER);
558     } while (unlikely(!temp));
559     #endif
560     } else if (kernel_uses_llsc) {
561     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
562     unsigned long temp;
563    
564     + loongson_llsc_mb();
565     do {
566     __asm__ __volatile__(
567     " .set push \n"
568     @@ -390,7 +402,7 @@ static inline int test_and_clear_bit(unsigned long nr,
569     " .set pop \n"
570     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
571     : "r" (1UL << bit)
572     - : "memory");
573     + : __LLSC_CLOBBER);
574     } while (unlikely(!res));
575    
576     res = temp & (1UL << bit);
577     @@ -433,11 +445,12 @@ static inline int test_and_change_bit(unsigned long nr,
578     " .set pop \n"
579     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
580     : "r" (1UL << bit)
581     - : "memory");
582     + : __LLSC_CLOBBER);
583     } else if (kernel_uses_llsc) {
584     unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
585     unsigned long temp;
586    
587     + loongson_llsc_mb();
588     do {
589     __asm__ __volatile__(
590     " .set push \n"
591     @@ -448,7 +461,7 @@ static inline int test_and_change_bit(unsigned long nr,
592     " .set pop \n"
593     : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
594     : "r" (1UL << bit)
595     - : "memory");
596     + : __LLSC_CLOBBER);
597     } while (unlikely(!res));
598    
599     res = temp & (1UL << bit);
600     diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
601     index f345a873742d..c8a47d18f628 100644
602     --- a/arch/mips/include/asm/cmpxchg.h
603     +++ b/arch/mips/include/asm/cmpxchg.h
604     @@ -46,6 +46,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
605     __typeof(*(m)) __ret; \
606     \
607     if (kernel_uses_llsc) { \
608     + loongson_llsc_mb(); \
609     __asm__ __volatile__( \
610     " .set push \n" \
611     " .set noat \n" \
612     @@ -60,7 +61,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
613     " .set pop \n" \
614     : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
615     : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
616     - : "memory"); \
617     + : __LLSC_CLOBBER); \
618     } else { \
619     unsigned long __flags; \
620     \
621     @@ -117,6 +118,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
622     __typeof(*(m)) __ret; \
623     \
624     if (kernel_uses_llsc) { \
625     + loongson_llsc_mb(); \
626     __asm__ __volatile__( \
627     " .set push \n" \
628     " .set noat \n" \
629     @@ -132,8 +134,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
630     " .set pop \n" \
631     "2: \n" \
632     : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
633     - : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
634     - : "memory"); \
635     + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
636     + : __LLSC_CLOBBER); \
637     + loongson_llsc_mb(); \
638     } else { \
639     unsigned long __flags; \
640     \
641     @@ -229,6 +232,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
642     */
643     local_irq_save(flags);
644    
645     + loongson_llsc_mb();
646     asm volatile(
647     " .set push \n"
648     " .set " MIPS_ISA_ARCH_LEVEL " \n"
649     @@ -274,6 +278,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
650     "r" (old),
651     "r" (new)
652     : "memory");
653     + loongson_llsc_mb();
654    
655     local_irq_restore(flags);
656     return ret;
657     diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
658     index 1e6966e8527e..bdbdc19a2b8f 100644
659     --- a/arch/mips/include/asm/mipsregs.h
660     +++ b/arch/mips/include/asm/mipsregs.h
661     @@ -689,6 +689,9 @@
662     #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
663     #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
664    
665     +/* Ingenic Config7 bits */
666     +#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
667     +
668     /* Config7 Bits specific to MIPS Technologies. */
669    
670     /* Performance counters implemented Per TC */
671     @@ -2813,6 +2816,7 @@ __BUILD_SET_C0(status)
672     __BUILD_SET_C0(cause)
673     __BUILD_SET_C0(config)
674     __BUILD_SET_C0(config5)
675     +__BUILD_SET_C0(config7)
676     __BUILD_SET_C0(intcontrol)
677     __BUILD_SET_C0(intctl)
678     __BUILD_SET_C0(srsmap)
679     diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
680     index 1db29957a931..2c38f75d87ff 100644
681     --- a/arch/mips/kernel/branch.c
682     +++ b/arch/mips/kernel/branch.c
683     @@ -58,6 +58,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
684     unsigned long *contpc)
685     {
686     union mips_instruction insn = (union mips_instruction)dec_insn.insn;
687     + int __maybe_unused bc_false = 0;
688    
689     if (!cpu_has_mmips)
690     return 0;
691     @@ -139,7 +140,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
692     #ifdef CONFIG_MIPS_FP_SUPPORT
693     case mm_bc2f_op:
694     case mm_bc1f_op: {
695     - int bc_false = 0;
696     unsigned int fcr31;
697     unsigned int bit;
698    
699     diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
700     index 9635c1db3ae6..e654ffc1c8a0 100644
701     --- a/arch/mips/kernel/cpu-probe.c
702     +++ b/arch/mips/kernel/cpu-probe.c
703     @@ -1964,6 +1964,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
704     c->cputype = CPU_JZRISC;
705     c->writecombine = _CACHE_UNCACHED_ACCELERATED;
706     __cpu_name[cpu] = "Ingenic JZRISC";
707     + /*
708     + * The XBurst core by default attempts to avoid branch target
709     + * buffer lookups by detecting & special casing loops. This
710     + * feature will cause BogoMIPS and lpj calculate in error.
711     + * Set cp0 config7 bit 4 to disable this feature.
712     + */
713     + set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
714     break;
715     default:
716     panic("Unknown Ingenic Processor ID!");
717     diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
718     index b6dc78ad5d8c..b0e25e913bdb 100644
719     --- a/arch/mips/kernel/syscall.c
720     +++ b/arch/mips/kernel/syscall.c
721     @@ -132,6 +132,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
722     [efault] "i" (-EFAULT)
723     : "memory");
724     } else if (cpu_has_llsc) {
725     + loongson_llsc_mb();
726     __asm__ __volatile__ (
727     " .set push \n"
728     " .set "MIPS_ISA_ARCH_LEVEL" \n"
729     diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
730     index d79f2b432318..f5c778113384 100644
731     --- a/arch/mips/mm/mmap.c
732     +++ b/arch/mips/mm/mmap.c
733     @@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
734     EXPORT_SYMBOL(shm_align_mask);
735    
736     /* gap between mmap and stack */
737     -#define MIN_GAP (128*1024*1024UL)
738     -#define MAX_GAP ((TASK_SIZE)/6*5)
739     +#define MIN_GAP (128*1024*1024UL)
740     +#define MAX_GAP ((TASK_SIZE)/6*5)
741     +#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
742    
743     static int mmap_is_legacy(struct rlimit *rlim_stack)
744     {
745     @@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
746     static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
747     {
748     unsigned long gap = rlim_stack->rlim_cur;
749     + unsigned long pad = stack_guard_gap;
750     +
751     + /* Account for stack randomization if necessary */
752     + if (current->flags & PF_RANDOMIZE)
753     + pad += (STACK_RND_MASK << PAGE_SHIFT);
754     +
755     + /* Values close to RLIM_INFINITY can overflow. */
756     + if (gap + pad > gap)
757     + gap += pad;
758    
759     if (gap < MIN_GAP)
760     gap = MIN_GAP;
761     diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
762     index 144ceb0fba88..bece1264d1c5 100644
763     --- a/arch/mips/mm/tlbex.c
764     +++ b/arch/mips/mm/tlbex.c
765     @@ -631,7 +631,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
766     return;
767     }
768    
769     - if (cpu_has_rixi && _PAGE_NO_EXEC) {
770     + if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
771     if (fill_includes_sw_bits) {
772     UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
773     } else {
774     diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
775     index 3a6aa57b9d90..eea28ca679db 100644
776     --- a/arch/powerpc/include/asm/futex.h
777     +++ b/arch/powerpc/include/asm/futex.h
778     @@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
779    
780     pagefault_enable();
781    
782     - if (!ret)
783     - *oval = oldval;
784     + *oval = oldval;
785    
786     prevent_write_to_user(uaddr, sizeof(*uaddr));
787     return ret;
788     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
789     index 89623962c727..fe0c32fb9f96 100644
790     --- a/arch/powerpc/kernel/eeh_driver.c
791     +++ b/arch/powerpc/kernel/eeh_driver.c
792     @@ -744,6 +744,33 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
793     */
794     #define MAX_WAIT_FOR_RECOVERY 300
795    
796     +
797     +/* Walks the PE tree after processing an event to remove any stale PEs.
798     + *
799     + * NB: This needs to be recursive to ensure the leaf PEs get removed
800     + * before their parents do. Although this is possible to do recursively
801     + * we don't since this is easier to read and we need to garantee
802     + * the leaf nodes will be handled first.
803     + */
804     +static void eeh_pe_cleanup(struct eeh_pe *pe)
805     +{
806     + struct eeh_pe *child_pe, *tmp;
807     +
808     + list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
809     + eeh_pe_cleanup(child_pe);
810     +
811     + if (pe->state & EEH_PE_KEEP)
812     + return;
813     +
814     + if (!(pe->state & EEH_PE_INVALID))
815     + return;
816     +
817     + if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
818     + list_del(&pe->child);
819     + kfree(pe);
820     + }
821     +}
822     +
823     /**
824     * eeh_handle_normal_event - Handle EEH events on a specific PE
825     * @pe: EEH PE - which should not be used after we return, as it may
826     @@ -782,8 +809,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
827     return;
828     }
829    
830     - eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
831     -
832     eeh_pe_update_time_stamp(pe);
833     pe->freeze_count++;
834     if (pe->freeze_count > eeh_max_freezes) {
835     @@ -793,6 +818,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
836     result = PCI_ERS_RESULT_DISCONNECT;
837     }
838    
839     + eeh_for_each_pe(pe, tmp_pe)
840     + eeh_pe_for_each_dev(tmp_pe, edev, tmp)
841     + edev->mode &= ~EEH_DEV_NO_HANDLER;
842     +
843     /* Walk the various device drivers attached to this slot through
844     * a reset sequence, giving each an opportunity to do what it needs
845     * to accomplish the reset. Each child gets a report of the
846     @@ -969,6 +998,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
847     return;
848     }
849     }
850     +
851     + /*
852     + * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
853     + * we don't want to modify the PE tree structure so we do it here.
854     + */
855     + eeh_pe_cleanup(pe);
856     eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
857     }
858    
859     @@ -981,7 +1016,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
860     */
861     void eeh_handle_special_event(void)
862     {
863     - struct eeh_pe *pe, *phb_pe;
864     + struct eeh_pe *pe, *phb_pe, *tmp_pe;
865     + struct eeh_dev *edev, *tmp_edev;
866     struct pci_bus *bus;
867     struct pci_controller *hose;
868     unsigned long flags;
869     @@ -1040,6 +1076,7 @@ void eeh_handle_special_event(void)
870     */
871     if (rc == EEH_NEXT_ERR_FROZEN_PE ||
872     rc == EEH_NEXT_ERR_FENCED_PHB) {
873     + eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
874     eeh_handle_normal_event(pe);
875     } else {
876     pci_lock_rescan_remove();
877     @@ -1050,6 +1087,10 @@ void eeh_handle_special_event(void)
878     (phb_pe->state & EEH_PE_RECOVERING))
879     continue;
880    
881     + eeh_for_each_pe(pe, tmp_pe)
882     + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
883     + edev->mode &= ~EEH_DEV_NO_HANDLER;
884     +
885     /* Notify all devices to be down */
886     eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
887     eeh_set_channel_state(pe, pci_channel_io_perm_failure);
888     diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
889     index 64cfbe41174b..e36653e5f76b 100644
890     --- a/arch/powerpc/kernel/eeh_event.c
891     +++ b/arch/powerpc/kernel/eeh_event.c
892     @@ -121,6 +121,14 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
893     }
894     event->pe = pe;
895    
896     + /*
897     + * Mark the PE as recovering before inserting it in the queue.
898     + * This prevents the PE from being free()ed by a hotplug driver
899     + * while the PE is sitting in the event queue.
900     + */
901     + if (pe)
902     + eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
903     +
904     /* We may or may not be called in an interrupt context */
905     spin_lock_irqsave(&eeh_eventlist_lock, flags);
906     list_add(&event->list, &eeh_eventlist);
907     diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
908     index 854cef7b18f4..f0813d50e0b1 100644
909     --- a/arch/powerpc/kernel/eeh_pe.c
910     +++ b/arch/powerpc/kernel/eeh_pe.c
911     @@ -491,6 +491,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
912     int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
913     {
914     struct eeh_pe *pe, *parent, *child;
915     + bool keep, recover;
916     int cnt;
917     struct pci_dn *pdn = eeh_dev_to_pdn(edev);
918    
919     @@ -516,10 +517,21 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
920     */
921     while (1) {
922     parent = pe->parent;
923     +
924     + /* PHB PEs should never be removed */
925     if (pe->type & EEH_PE_PHB)
926     break;
927    
928     - if (!(pe->state & EEH_PE_KEEP)) {
929     + /*
930     + * XXX: KEEP is set while resetting a PE. I don't think it's
931     + * ever set without RECOVERING also being set. I could
932     + * be wrong though so catch that with a WARN.
933     + */
934     + keep = !!(pe->state & EEH_PE_KEEP);
935     + recover = !!(pe->state & EEH_PE_RECOVERING);
936     + WARN_ON(keep && !recover);
937     +
938     + if (!keep && !recover) {
939     if (list_empty(&pe->edevs) &&
940     list_empty(&pe->child_list)) {
941     list_del(&pe->child);
942     @@ -528,6 +540,15 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
943     break;
944     }
945     } else {
946     + /*
947     + * Mark the PE as invalid. At the end of the recovery
948     + * process any invalid PEs will be garbage collected.
949     + *
950     + * We need to delay the free()ing of them since we can
951     + * remove edev's while traversing the PE tree which
952     + * might trigger the removal of a PE and we can't
953     + * deal with that (yet).
954     + */
955     if (list_empty(&pe->edevs)) {
956     cnt = 0;
957     list_for_each_entry(child, &pe->child_list, child) {
958     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
959     index 6ba3cc2ef8ab..36c8a3652cf3 100644
960     --- a/arch/powerpc/kernel/exceptions-64s.S
961     +++ b/arch/powerpc/kernel/exceptions-64s.S
962     @@ -1211,6 +1211,10 @@ FTR_SECTION_ELSE
963     ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
964     9:
965     /* Deliver the machine check to host kernel in V mode. */
966     +BEGIN_FTR_SECTION
967     + ld r10,ORIG_GPR3(r1)
968     + mtspr SPRN_CFAR,r10
969     +END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
970     MACHINE_CHECK_HANDLER_WINDUP
971     EXCEPTION_PROLOG_0 PACA_EXMC
972     b machine_check_pSeries_0
973     diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
974     index 5faf0a64c92b..05824eb4323b 100644
975     --- a/arch/powerpc/kernel/rtas.c
976     +++ b/arch/powerpc/kernel/rtas.c
977     @@ -871,15 +871,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
978     return 0;
979    
980     for_each_cpu(cpu, cpus) {
981     + struct device *dev = get_cpu_device(cpu);
982     +
983     switch (state) {
984     case DOWN:
985     - cpuret = cpu_down(cpu);
986     + cpuret = device_offline(dev);
987     break;
988     case UP:
989     - cpuret = cpu_up(cpu);
990     + cpuret = device_online(dev);
991     break;
992     }
993     - if (cpuret) {
994     + if (cpuret < 0) {
995     pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
996     __func__,
997     ((state == UP) ? "up" : "down"),
998     @@ -968,6 +970,8 @@ int rtas_ibm_suspend_me(u64 handle)
999     data.token = rtas_token("ibm,suspend-me");
1000     data.complete = &done;
1001    
1002     + lock_device_hotplug();
1003     +
1004     /* All present CPUs must be online */
1005     cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
1006     cpuret = rtas_online_cpus_mask(offline_mask);
1007     @@ -1006,6 +1010,7 @@ out_hotplug_enable:
1008     __func__);
1009    
1010     out:
1011     + unlock_device_hotplug();
1012     free_cpumask_var(offline_mask);
1013     return atomic_read(&data.error);
1014     }
1015     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
1016     index 11caa0291254..82f43535e686 100644
1017     --- a/arch/powerpc/kernel/traps.c
1018     +++ b/arch/powerpc/kernel/traps.c
1019     @@ -472,6 +472,7 @@ void system_reset_exception(struct pt_regs *regs)
1020     if (debugger(regs))
1021     goto out;
1022    
1023     + kmsg_dump(KMSG_DUMP_OOPS);
1024     /*
1025     * A system reset is a request to dump, so we always send
1026     * it through the crashdump code (if fadump or kdump are
1027     diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
1028     index b4ca9e95e678..c5cc16ab1954 100644
1029     --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
1030     +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
1031     @@ -902,7 +902,7 @@ int __meminit radix__create_section_mapping(unsigned long start, unsigned long e
1032     return -1;
1033     }
1034    
1035     - return create_physical_mapping(start, end, nid);
1036     + return create_physical_mapping(__pa(start), __pa(end), nid);
1037     }
1038    
1039     int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
1040     diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
1041     index 6a88a9f585d4..5d6111a9ee0e 100644
1042     --- a/arch/powerpc/mm/ptdump/ptdump.c
1043     +++ b/arch/powerpc/mm/ptdump/ptdump.c
1044     @@ -299,17 +299,15 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
1045    
1046     static void walk_pagetables(struct pg_state *st)
1047     {
1048     - pgd_t *pgd = pgd_offset_k(0UL);
1049     unsigned int i;
1050     - unsigned long addr;
1051     -
1052     - addr = st->start_address;
1053     + unsigned long addr = st->start_address & PGDIR_MASK;
1054     + pgd_t *pgd = pgd_offset_k(addr);
1055    
1056     /*
1057     * Traverse the linux pagetable structure and dump pages that are in
1058     * the hash pagetable.
1059     */
1060     - for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
1061     + for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
1062     if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
1063     /* pgd exists */
1064     walk_pud(st, pgd, addr);
1065     diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
1066     index dea243185ea4..cb50a9e1fd2d 100644
1067     --- a/arch/powerpc/perf/imc-pmu.c
1068     +++ b/arch/powerpc/perf/imc-pmu.c
1069     @@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size)
1070     {
1071     int nid, rc = 0, core_id = (cpu / threads_per_core);
1072     struct imc_mem_info *mem_info;
1073     + struct page *page;
1074    
1075     /*
1076     * alloc_pages_node() will allocate memory for core in the
1077     @@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size)
1078     mem_info->id = core_id;
1079    
1080     /* We need only vbase for core counters */
1081     - mem_info->vbase = page_address(alloc_pages_node(nid,
1082     - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1083     - __GFP_NOWARN, get_order(size)));
1084     - if (!mem_info->vbase)
1085     + page = alloc_pages_node(nid,
1086     + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1087     + __GFP_NOWARN, get_order(size));
1088     + if (!page)
1089     return -ENOMEM;
1090     + mem_info->vbase = page_address(page);
1091    
1092     /* Init the mutex */
1093     core_imc_refc[core_id].id = core_id;
1094     @@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
1095     int nid = cpu_to_node(cpu_id);
1096    
1097     if (!local_mem) {
1098     + struct page *page;
1099     /*
1100     * This case could happen only once at start, since we dont
1101     * free the memory in cpu offline path.
1102     */
1103     - local_mem = page_address(alloc_pages_node(nid,
1104     + page = alloc_pages_node(nid,
1105     GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1106     - __GFP_NOWARN, get_order(size)));
1107     - if (!local_mem)
1108     + __GFP_NOWARN, get_order(size));
1109     + if (!page)
1110     return -ENOMEM;
1111     + local_mem = page_address(page);
1112    
1113     per_cpu(thread_imc_mem, cpu_id) = local_mem;
1114     }
1115     @@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
1116     int core_id = (cpu_id / threads_per_core);
1117    
1118     if (!local_mem) {
1119     - local_mem = page_address(alloc_pages_node(phys_id,
1120     - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1121     - __GFP_NOWARN, get_order(size)));
1122     - if (!local_mem)
1123     + struct page *page;
1124     +
1125     + page = alloc_pages_node(phys_id,
1126     + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1127     + __GFP_NOWARN, get_order(size));
1128     + if (!page)
1129     return -ENOMEM;
1130     + local_mem = page_address(page);
1131     per_cpu(trace_imc_mem, cpu_id) = local_mem;
1132    
1133     /* Initialise the counters for trace mode */
1134     diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1135     index e28f03e1eb5e..c75ec37bf0cd 100644
1136     --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1137     +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
1138     @@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
1139     struct page *tce_mem = NULL;
1140     __be64 *addr;
1141    
1142     - tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
1143     + tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
1144     + shift - PAGE_SHIFT);
1145     if (!tce_mem) {
1146     pr_err("Failed to allocate a TCE memory, level shift=%d\n",
1147     shift);
1148     @@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
1149    
1150     if (ptce)
1151     *ptce = cpu_to_be64(0);
1152     + else
1153     + /* Skip the rest of the level */
1154     + i |= tbl->it_level_size - 1;
1155     }
1156     }
1157    
1158     @@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
1159     unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
1160     PAGE_SHIFT);
1161     const unsigned long tce_table_size = 1UL << table_shift;
1162     - unsigned int tmplevels = levels;
1163    
1164     if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
1165     return -EINVAL;
1166     @@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
1167     if (!is_power_of_2(window_size))
1168     return -EINVAL;
1169    
1170     - if (alloc_userspace_copy && (window_size > (1ULL << 32)))
1171     - tmplevels = 1;
1172     -
1173     /* Adjust direct table size from window_size and levels */
1174     entries_shift = (entries_shift + levels - 1) / levels;
1175     level_shift = entries_shift + 3;
1176     @@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
1177    
1178     /* Allocate TCE table */
1179     addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
1180     - tmplevels, tce_table_size, &offset, &total_allocated);
1181     + 1, tce_table_size, &offset, &total_allocated);
1182    
1183     /* addr==NULL means that the first level allocation failed */
1184     if (!addr)
1185     @@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
1186     * we did not allocate as much as we wanted,
1187     * release partially allocated table.
1188     */
1189     - if (tmplevels == levels && offset < tce_table_size)
1190     + if (levels == 1 && offset < tce_table_size)
1191     goto free_tces_exit;
1192    
1193     /* Allocate userspace view of the TCE table */
1194     if (alloc_userspace_copy) {
1195     offset = 0;
1196     uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
1197     - tmplevels, tce_table_size, &offset,
1198     + 1, tce_table_size, &offset,
1199     &total_allocated_uas);
1200     if (!uas)
1201     goto free_tces_exit;
1202     - if (tmplevels == levels && (offset < tce_table_size ||
1203     + if (levels == 1 && (offset < tce_table_size ||
1204     total_allocated_uas != total_allocated))
1205     goto free_uas_exit;
1206     }
1207     @@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
1208    
1209     pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
1210     window_size, tce_table_size, bus_offset, tbl->it_base,
1211     - tbl->it_userspace, tmplevels, levels);
1212     + tbl->it_userspace, 1, levels);
1213    
1214     return 0;
1215    
1216     diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
1217     index 469c24463247..f914f0b14e4e 100644
1218     --- a/arch/powerpc/platforms/powernv/pci.h
1219     +++ b/arch/powerpc/platforms/powernv/pci.h
1220     @@ -219,7 +219,7 @@ extern struct iommu_table_group *pnv_npu_compound_attach(
1221     struct pnv_ioda_pe *pe);
1222    
1223     /* pci-ioda-tce.c */
1224     -#define POWERNV_IOMMU_DEFAULT_LEVELS 1
1225     +#define POWERNV_IOMMU_DEFAULT_LEVELS 2
1226     #define POWERNV_IOMMU_MAX_LEVELS 5
1227    
1228     extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
1229     diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
1230     index fe812bebdf5e..b571285f6c14 100644
1231     --- a/arch/powerpc/platforms/pseries/mobility.c
1232     +++ b/arch/powerpc/platforms/pseries/mobility.c
1233     @@ -9,6 +9,7 @@
1234     #include <linux/cpu.h>
1235     #include <linux/kernel.h>
1236     #include <linux/kobject.h>
1237     +#include <linux/sched.h>
1238     #include <linux/smp.h>
1239     #include <linux/stat.h>
1240     #include <linux/completion.h>
1241     @@ -207,7 +208,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
1242    
1243     prop_data += vd;
1244     }
1245     +
1246     + cond_resched();
1247     }
1248     +
1249     + cond_resched();
1250     } while (rtas_rc == 1);
1251    
1252     of_node_put(dn);
1253     @@ -310,8 +315,12 @@ int pseries_devicetree_update(s32 scope)
1254     add_dt_node(phandle, drc_index);
1255     break;
1256     }
1257     +
1258     + cond_resched();
1259     }
1260     }
1261     +
1262     + cond_resched();
1263     } while (rc == 1);
1264    
1265     kfree(rtas_buf);
1266     diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
1267     index f5940cc71c37..63462e96cf0e 100644
1268     --- a/arch/powerpc/platforms/pseries/setup.c
1269     +++ b/arch/powerpc/platforms/pseries/setup.c
1270     @@ -316,6 +316,9 @@ static void pseries_lpar_idle(void)
1271     * low power mode by ceding processor to hypervisor
1272     */
1273    
1274     + if (!prep_irq_for_idle())
1275     + return;
1276     +
1277     /* Indicate to hypervisor that we are idle. */
1278     get_lppaca()->idle = 1;
1279    
1280     diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
1281     index 14e56c25879f..25d4adccf750 100644
1282     --- a/arch/powerpc/xmon/xmon.c
1283     +++ b/arch/powerpc/xmon/xmon.c
1284     @@ -2534,13 +2534,16 @@ static void dump_pacas(void)
1285     static void dump_one_xive(int cpu)
1286     {
1287     unsigned int hwid = get_hard_smp_processor_id(cpu);
1288     -
1289     - opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
1290     - opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
1291     - opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
1292     - opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
1293     - opal_xive_dump(XIVE_DUMP_VP, hwid);
1294     - opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
1295     + bool hv = cpu_has_feature(CPU_FTR_HVMODE);
1296     +
1297     + if (hv) {
1298     + opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
1299     + opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
1300     + opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
1301     + opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
1302     + opal_xive_dump(XIVE_DUMP_VP, hwid);
1303     + opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
1304     + }
1305    
1306     if (setjmp(bus_error_jmp) != 0) {
1307     catch_memory_errors = 0;
1308     diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
1309     index ccad1398abd4..b5cfcad953c2 100644
1310     --- a/arch/s390/hypfs/inode.c
1311     +++ b/arch/s390/hypfs/inode.c
1312     @@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
1313     static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
1314     {
1315     struct inode *root_inode;
1316     - struct dentry *root_dentry;
1317     + struct dentry *root_dentry, *update_file;
1318     int rc = 0;
1319     struct hypfs_sb_info *sbi;
1320    
1321     @@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
1322     rc = hypfs_diag_create_files(root_dentry);
1323     if (rc)
1324     return rc;
1325     - sbi->update_file = hypfs_create_update_file(root_dentry);
1326     - if (IS_ERR(sbi->update_file))
1327     - return PTR_ERR(sbi->update_file);
1328     + update_file = hypfs_create_update_file(root_dentry);
1329     + if (IS_ERR(update_file))
1330     + return PTR_ERR(update_file);
1331     + sbi->update_file = update_file;
1332     hypfs_update_update(sb);
1333     pr_info("Hypervisor filesystem mounted\n");
1334     return 0;
1335     diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
1336     index fff790a3f4ee..c0867b0aae3e 100644
1337     --- a/arch/x86/kvm/hyperv.c
1338     +++ b/arch/x86/kvm/hyperv.c
1339     @@ -645,7 +645,9 @@ static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
1340     .vector = stimer->config.apic_vector
1341     };
1342    
1343     - return !kvm_apic_set_irq(vcpu, &irq, NULL);
1344     + if (lapic_in_kernel(vcpu))
1345     + return !kvm_apic_set_irq(vcpu, &irq, NULL);
1346     + return 0;
1347     }
1348    
1349     static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
1350     @@ -1852,7 +1854,13 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1351    
1352     ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
1353     ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1354     - ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
1355     +
1356     + /*
1357     + * Direct Synthetic timers only make sense with in-kernel
1358     + * LAPIC
1359     + */
1360     + if (lapic_in_kernel(vcpu))
1361     + ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
1362    
1363     break;
1364    
1365     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1366     index b33be928d164..70bcbd02edcb 100644
1367     --- a/block/bfq-iosched.c
1368     +++ b/block/bfq-iosched.c
1369     @@ -5809,12 +5809,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
1370     */
1371     if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
1372     tot_time_ns < bfqq->last_serv_time_ns) {
1373     + if (bfqq->last_serv_time_ns == 0) {
1374     + /*
1375     + * Now we certainly have a base value: make sure we
1376     + * start trying injection.
1377     + */
1378     + bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
1379     + }
1380     bfqq->last_serv_time_ns = tot_time_ns;
1381     - /*
1382     - * Now we certainly have a base value: make sure we
1383     - * start trying injection.
1384     - */
1385     - bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
1386     } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
1387     /*
1388     * No I/O injected and no request still in service in
1389     diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
1390     index 024060165afa..76457003f140 100644
1391     --- a/drivers/block/pktcdvd.c
1392     +++ b/drivers/block/pktcdvd.c
1393     @@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
1394     if (ret)
1395     return ret;
1396     if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
1397     - WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
1398     blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
1399     return -EINVAL;
1400     }
1401     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1402     index da5b6723329a..28693dbcb0c3 100644
1403     --- a/drivers/char/ipmi/ipmi_si_intf.c
1404     +++ b/drivers/char/ipmi/ipmi_si_intf.c
1405     @@ -221,6 +221,9 @@ struct smi_info {
1406     */
1407     bool irq_enable_broken;
1408    
1409     + /* Is the driver in maintenance mode? */
1410     + bool in_maintenance_mode;
1411     +
1412     /*
1413     * Did we get an attention that we did not handle?
1414     */
1415     @@ -1007,11 +1010,20 @@ static int ipmi_thread(void *data)
1416     spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1417     busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1418     &busy_until);
1419     - if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1420     + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1421     ; /* do nothing */
1422     - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1423     - schedule();
1424     - else if (smi_result == SI_SM_IDLE) {
1425     + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
1426     + /*
1427     + * In maintenance mode we run as fast as
1428     + * possible to allow firmware updates to
1429     + * complete as fast as possible, but normally
1430     + * don't bang on the scheduler.
1431     + */
1432     + if (smi_info->in_maintenance_mode)
1433     + schedule();
1434     + else
1435     + usleep_range(100, 200);
1436     + } else if (smi_result == SI_SM_IDLE) {
1437     if (atomic_read(&smi_info->need_watch)) {
1438     schedule_timeout_interruptible(100);
1439     } else {
1440     @@ -1019,8 +1031,9 @@ static int ipmi_thread(void *data)
1441     __set_current_state(TASK_INTERRUPTIBLE);
1442     schedule();
1443     }
1444     - } else
1445     + } else {
1446     schedule_timeout_interruptible(1);
1447     + }
1448     }
1449     return 0;
1450     }
1451     @@ -1198,6 +1211,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
1452    
1453     if (!enable)
1454     atomic_set(&smi_info->req_events, 0);
1455     + smi_info->in_maintenance_mode = enable;
1456     }
1457    
1458     static void shutdown_smi(void *send_info);
1459     diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
1460     index 32dd29e0a37e..4de97cc7cb54 100644
1461     --- a/drivers/clk/actions/owl-common.c
1462     +++ b/drivers/clk/actions/owl-common.c
1463     @@ -68,16 +68,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
1464     struct clk_hw *hw;
1465    
1466     for (i = 0; i < hw_clks->num; i++) {
1467     + const char *name;
1468    
1469     hw = hw_clks->hws[i];
1470     -
1471     if (IS_ERR_OR_NULL(hw))
1472     continue;
1473    
1474     + name = hw->init->name;
1475     ret = devm_clk_hw_register(dev, hw);
1476     if (ret) {
1477     dev_err(dev, "Couldn't register clock %d - %s\n",
1478     - i, hw->init->name);
1479     + i, name);
1480     return ret;
1481     }
1482     }
1483     diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
1484     index f607ee702c83..311cea0c3ae2 100644
1485     --- a/drivers/clk/at91/clk-main.c
1486     +++ b/drivers/clk/at91/clk-main.c
1487     @@ -21,6 +21,10 @@
1488    
1489     #define MOR_KEY_MASK (0xff << 16)
1490    
1491     +#define clk_main_parent_select(s) (((s) & \
1492     + (AT91_PMC_MOSCEN | \
1493     + AT91_PMC_OSCBYPASS)) ? 1 : 0)
1494     +
1495     struct clk_main_osc {
1496     struct clk_hw hw;
1497     struct regmap *regmap;
1498     @@ -113,7 +117,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
1499    
1500     regmap_read(regmap, AT91_PMC_SR, &status);
1501    
1502     - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
1503     + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
1504     }
1505    
1506     static const struct clk_ops main_osc_ops = {
1507     @@ -450,7 +454,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
1508    
1509     regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
1510    
1511     - return status & AT91_PMC_MOSCEN ? 1 : 0;
1512     + return clk_main_parent_select(status);
1513     }
1514    
1515     static const struct clk_ops sam9x5_main_ops = {
1516     @@ -492,7 +496,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
1517     clkmain->hw.init = &init;
1518     clkmain->regmap = regmap;
1519     regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
1520     - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
1521     + clkmain->parent = clk_main_parent_select(status);
1522    
1523     hw = &clkmain->hw;
1524     ret = clk_hw_register(NULL, &clkmain->hw);
1525     diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
1526     index 524bf9a53098..e9e16425c739 100644
1527     --- a/drivers/clk/clk-bulk.c
1528     +++ b/drivers/clk/clk-bulk.c
1529     @@ -18,10 +18,13 @@ static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
1530     int ret;
1531     int i;
1532    
1533     - for (i = 0; i < num_clks; i++)
1534     + for (i = 0; i < num_clks; i++) {
1535     + clks[i].id = NULL;
1536     clks[i].clk = NULL;
1537     + }
1538    
1539     for (i = 0; i < num_clks; i++) {
1540     + of_property_read_string_index(np, "clock-names", i, &clks[i].id);
1541     clks[i].clk = of_clk_get(np, i);
1542     if (IS_ERR(clks[i].clk)) {
1543     ret = PTR_ERR(clks[i].clk);
1544     diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
1545     index 07f3b252f3e0..bed140f7375f 100644
1546     --- a/drivers/clk/clk-qoriq.c
1547     +++ b/drivers/clk/clk-qoriq.c
1548     @@ -686,7 +686,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
1549     .guts_compat = "fsl,qoriq-device-config-1.0",
1550     .init_periph = p5020_init_periph,
1551     .cmux_groups = {
1552     - &p2041_cmux_grp1, &p2041_cmux_grp2
1553     + &p5020_cmux_grp1, &p5020_cmux_grp2
1554     },
1555     .cmux_to_group = {
1556     0, 1, -1
1557     diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
1558     index d407a07e7e6d..e07c69afc359 100644
1559     --- a/drivers/clk/imx/clk-imx8mq.c
1560     +++ b/drivers/clk/imx/clk-imx8mq.c
1561     @@ -406,7 +406,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
1562     clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
1563    
1564     /* AHB */
1565     - clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000);
1566     + /* AHB clock is used by the AHB bus therefore marked as critical */
1567     + clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
1568     clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
1569    
1570     /* IPG */
1571     diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
1572     index b7213023b238..7a815ec76aa5 100644
1573     --- a/drivers/clk/imx/clk-pll14xx.c
1574     +++ b/drivers/clk/imx/clk-pll14xx.c
1575     @@ -191,6 +191,10 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
1576     tmp &= ~RST_MASK;
1577     writel_relaxed(tmp, pll->base);
1578    
1579     + /* Enable BYPASS */
1580     + tmp |= BYPASS_MASK;
1581     + writel(tmp, pll->base);
1582     +
1583     div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
1584     (rate->sdiv << SDIV_SHIFT);
1585     writel_relaxed(div_val, pll->base + 0x4);
1586     @@ -250,6 +254,10 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
1587     tmp &= ~RST_MASK;
1588     writel_relaxed(tmp, pll->base);
1589    
1590     + /* Enable BYPASS */
1591     + tmp |= BYPASS_MASK;
1592     + writel_relaxed(tmp, pll->base);
1593     +
1594     div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
1595     (rate->sdiv << SDIV_SHIFT);
1596     writel_relaxed(div_val, pll->base + 0x4);
1597     @@ -283,16 +291,28 @@ static int clk_pll14xx_prepare(struct clk_hw *hw)
1598     {
1599     struct clk_pll14xx *pll = to_clk_pll14xx(hw);
1600     u32 val;
1601     + int ret;
1602    
1603     /*
1604     * RESETB = 1 from 0, PLL starts its normal
1605     * operation after lock time
1606     */
1607     val = readl_relaxed(pll->base + GNRL_CTL);
1608     + if (val & RST_MASK)
1609     + return 0;
1610     + val |= BYPASS_MASK;
1611     + writel_relaxed(val, pll->base + GNRL_CTL);
1612     val |= RST_MASK;
1613     writel_relaxed(val, pll->base + GNRL_CTL);
1614    
1615     - return clk_pll14xx_wait_lock(pll);
1616     + ret = clk_pll14xx_wait_lock(pll);
1617     + if (ret)
1618     + return ret;
1619     +
1620     + val &= ~BYPASS_MASK;
1621     + writel_relaxed(val, pll->base + GNRL_CTL);
1622     +
1623     + return 0;
1624     }
1625    
1626     static int clk_pll14xx_is_prepared(struct clk_hw *hw)
1627     @@ -348,6 +368,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
1628     struct clk_pll14xx *pll;
1629     struct clk *clk;
1630     struct clk_init_data init;
1631     + u32 val;
1632    
1633     pll = kzalloc(sizeof(*pll), GFP_KERNEL);
1634     if (!pll)
1635     @@ -379,6 +400,10 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
1636     pll->rate_table = pll_clk->rate_table;
1637     pll->rate_count = pll_clk->rate_count;
1638    
1639     + val = readl_relaxed(pll->base + GNRL_CTL);
1640     + val &= ~BYPASS_MASK;
1641     + writel_relaxed(val, pll->base + GNRL_CTL);
1642     +
1643     clk = clk_register(NULL, &pll->hw);
1644     if (IS_ERR(clk)) {
1645     pr_err("%s: failed to register pll %s %lu\n",
1646     diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
1647     index 4c0a20949c2c..9b27d75d9485 100644
1648     --- a/drivers/clk/ingenic/jz4740-cgu.c
1649     +++ b/drivers/clk/ingenic/jz4740-cgu.c
1650     @@ -53,6 +53,10 @@ static const u8 jz4740_cgu_cpccr_div_table[] = {
1651     1, 2, 3, 4, 6, 8, 12, 16, 24, 32,
1652     };
1653    
1654     +static const u8 jz4740_cgu_pll_half_div_table[] = {
1655     + 2, 1,
1656     +};
1657     +
1658     static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
1659    
1660     /* External clocks */
1661     @@ -86,7 +90,10 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
1662     [JZ4740_CLK_PLL_HALF] = {
1663     "pll half", CGU_CLK_DIV,
1664     .parents = { JZ4740_CLK_PLL, -1, -1, -1 },
1665     - .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 },
1666     + .div = {
1667     + CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1,
1668     + jz4740_cgu_pll_half_div_table,
1669     + },
1670     },
1671    
1672     [JZ4740_CLK_CCLK] = {
1673     diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
1674     index 8028ff6f6610..db0b73d53551 100644
1675     --- a/drivers/clk/meson/axg-audio.c
1676     +++ b/drivers/clk/meson/axg-audio.c
1677     @@ -992,15 +992,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
1678    
1679     /* Take care to skip the registered input clocks */
1680     for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
1681     + const char *name;
1682     +
1683     hw = data->hw_onecell_data->hws[i];
1684     /* array might be sparse */
1685     if (!hw)
1686     continue;
1687    
1688     + name = hw->init->name;
1689     +
1690     ret = devm_clk_hw_register(dev, hw);
1691     if (ret) {
1692     - dev_err(dev, "failed to register clock %s\n",
1693     - hw->init->name);
1694     + dev_err(dev, "failed to register clock %s\n", name);
1695     return ret;
1696     }
1697     }
1698     diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
1699     index 7131dcf9b060..95be125c3bdd 100644
1700     --- a/drivers/clk/qcom/gcc-sdm845.c
1701     +++ b/drivers/clk/qcom/gcc-sdm845.c
1702     @@ -685,7 +685,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
1703     .name = "gcc_sdcc2_apps_clk_src",
1704     .parent_names = gcc_parent_names_10,
1705     .num_parents = 5,
1706     - .ops = &clk_rcg2_ops,
1707     + .ops = &clk_rcg2_floor_ops,
1708     },
1709     };
1710    
1711     @@ -709,7 +709,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
1712     .name = "gcc_sdcc4_apps_clk_src",
1713     .parent_names = gcc_parent_names_0,
1714     .num_parents = 4,
1715     - .ops = &clk_rcg2_ops,
1716     + .ops = &clk_rcg2_floor_ops,
1717     },
1718     };
1719    
1720     diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
1721     index 2db9093546c6..e326e6dc09fc 100644
1722     --- a/drivers/clk/renesas/clk-mstp.c
1723     +++ b/drivers/clk/renesas/clk-mstp.c
1724     @@ -334,7 +334,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
1725     return;
1726    
1727     pd->name = np->name;
1728     - pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1729     + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1730     + GENPD_FLAG_ACTIVE_WAKEUP;
1731     pd->attach_dev = cpg_mstp_attach_dev;
1732     pd->detach_dev = cpg_mstp_detach_dev;
1733     pm_genpd_init(pd, &pm_domain_always_on_gov, false);
1734     diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
1735     index d4075b130674..132cc96895e3 100644
1736     --- a/drivers/clk/renesas/renesas-cpg-mssr.c
1737     +++ b/drivers/clk/renesas/renesas-cpg-mssr.c
1738     @@ -551,7 +551,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
1739    
1740     genpd = &pd->genpd;
1741     genpd->name = np->name;
1742     - genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1743     + genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1744     + GENPD_FLAG_ACTIVE_WAKEUP;
1745     genpd->attach_dev = cpg_mssr_attach_dev;
1746     genpd->detach_dev = cpg_mssr_detach_dev;
1747     pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1748     diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
1749     index ad7951b6b285..dcf4e25a0216 100644
1750     --- a/drivers/clk/sirf/clk-common.c
1751     +++ b/drivers/clk/sirf/clk-common.c
1752     @@ -297,9 +297,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
1753     {
1754     struct clk_dmn *clk = to_dmnclk(hw);
1755     u32 cfg = clkc_readl(clk->regofs);
1756     + const char *name = clk_hw_get_name(hw);
1757    
1758     /* parent of io domain can only be pll3 */
1759     - if (strcmp(hw->init->name, "io") == 0)
1760     + if (strcmp(name, "io") == 0)
1761     return 4;
1762    
1763     WARN_ON((cfg & (BIT(3) - 1)) > 4);
1764     @@ -311,9 +312,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
1765     {
1766     struct clk_dmn *clk = to_dmnclk(hw);
1767     u32 cfg = clkc_readl(clk->regofs);
1768     + const char *name = clk_hw_get_name(hw);
1769    
1770     /* parent of io domain can only be pll3 */
1771     - if (strcmp(hw->init->name, "io") == 0)
1772     + if (strcmp(name, "io") == 0)
1773     return -EINVAL;
1774    
1775     cfg &= ~(BIT(3) - 1);
1776     @@ -353,7 +355,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1777     {
1778     unsigned long fin;
1779     unsigned ratio, wait, hold;
1780     - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
1781     + const char *name = clk_hw_get_name(hw);
1782     + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
1783    
1784     fin = *parent_rate;
1785     ratio = fin / rate;
1786     @@ -375,7 +378,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1787     struct clk_dmn *clk = to_dmnclk(hw);
1788     unsigned long fin;
1789     unsigned ratio, wait, hold, reg;
1790     - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
1791     + const char *name = clk_hw_get_name(hw);
1792     + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
1793    
1794     fin = parent_rate;
1795     ratio = fin / rate;
1796     diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
1797     index a5bdca1de5d0..9d56eac43832 100644
1798     --- a/drivers/clk/sprd/common.c
1799     +++ b/drivers/clk/sprd/common.c
1800     @@ -76,16 +76,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
1801     struct clk_hw *hw;
1802    
1803     for (i = 0; i < clkhw->num; i++) {
1804     + const char *name;
1805    
1806     hw = clkhw->hws[i];
1807     -
1808     if (!hw)
1809     continue;
1810    
1811     + name = hw->init->name;
1812     ret = devm_clk_hw_register(dev, hw);
1813     if (ret) {
1814     dev_err(dev, "Couldn't register clock %d - %s\n",
1815     - i, hw->init->name);
1816     + i, name);
1817     return ret;
1818     }
1819     }
1820     diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
1821     index 36b4402bf09e..640270f51aa5 100644
1822     --- a/drivers/clk/sprd/pll.c
1823     +++ b/drivers/clk/sprd/pll.c
1824     @@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
1825     k2 + refin * nint * CLK_PLL_1M;
1826     }
1827    
1828     + kfree(cfg);
1829     return rate;
1830     }
1831    
1832     @@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
1833     if (!ret)
1834     udelay(pll->udelay);
1835    
1836     + kfree(cfg);
1837     return ret;
1838     }
1839    
1840     diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1841     index 9b3939fc7faa..5ca4d34b4094 100644
1842     --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1843     +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1844     @@ -502,6 +502,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
1845     [CLK_MMC1] = &mmc1_clk.common.hw,
1846     [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
1847     [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
1848     + [CLK_MMC2] = &mmc2_clk.common.hw,
1849     + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
1850     + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
1851     [CLK_CE] = &ce_clk.common.hw,
1852     [CLK_SPI0] = &spi0_clk.common.hw,
1853     [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
1854     diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
1855     index 7fe3ac980e5f..2e20e650b6c0 100644
1856     --- a/drivers/clk/sunxi-ng/ccu_common.c
1857     +++ b/drivers/clk/sunxi-ng/ccu_common.c
1858     @@ -97,14 +97,15 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
1859    
1860     for (i = 0; i < desc->hw_clks->num ; i++) {
1861     struct clk_hw *hw = desc->hw_clks->hws[i];
1862     + const char *name;
1863    
1864     if (!hw)
1865     continue;
1866    
1867     + name = hw->init->name;
1868     ret = of_clk_hw_register(node, hw);
1869     if (ret) {
1870     - pr_err("Couldn't register clock %d - %s\n",
1871     - i, clk_hw_get_name(hw));
1872     + pr_err("Couldn't register clock %d - %s\n", i, name);
1873     goto err_clk_unreg;
1874     }
1875     }
1876     diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
1877     index fd6c347bec6a..dd7045bc48c1 100644
1878     --- a/drivers/clk/zte/clk-zx296718.c
1879     +++ b/drivers/clk/zte/clk-zx296718.c
1880     @@ -564,6 +564,7 @@ static int __init top_clocks_init(struct device_node *np)
1881     {
1882     void __iomem *reg_base;
1883     int i, ret;
1884     + const char *name;
1885    
1886     reg_base = of_iomap(np, 0);
1887     if (!reg_base) {
1888     @@ -573,11 +574,10 @@ static int __init top_clocks_init(struct device_node *np)
1889    
1890     for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
1891     zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
1892     + name = zx296718_pll_clk[i].hw.init->name;
1893     ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
1894     - if (ret) {
1895     - pr_warn("top clk %s init error!\n",
1896     - zx296718_pll_clk[i].hw.init->name);
1897     - }
1898     + if (ret)
1899     + pr_warn("top clk %s init error!\n", name);
1900     }
1901    
1902     for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
1903     @@ -585,11 +585,10 @@ static int __init top_clocks_init(struct device_node *np)
1904     top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
1905     &top_ffactor_clk[i].factor.hw;
1906    
1907     + name = top_ffactor_clk[i].factor.hw.init->name;
1908     ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
1909     - if (ret) {
1910     - pr_warn("top clk %s init error!\n",
1911     - top_ffactor_clk[i].factor.hw.init->name);
1912     - }
1913     + if (ret)
1914     + pr_warn("top clk %s init error!\n", name);
1915     }
1916    
1917     for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
1918     @@ -598,11 +597,10 @@ static int __init top_clocks_init(struct device_node *np)
1919     &top_mux_clk[i].mux.hw;
1920    
1921     top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1922     + name = top_mux_clk[i].mux.hw.init->name;
1923     ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
1924     - if (ret) {
1925     - pr_warn("top clk %s init error!\n",
1926     - top_mux_clk[i].mux.hw.init->name);
1927     - }
1928     + if (ret)
1929     + pr_warn("top clk %s init error!\n", name);
1930     }
1931    
1932     for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
1933     @@ -611,11 +609,10 @@ static int __init top_clocks_init(struct device_node *np)
1934     &top_gate_clk[i].gate.hw;
1935    
1936     top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1937     + name = top_gate_clk[i].gate.hw.init->name;
1938     ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
1939     - if (ret) {
1940     - pr_warn("top clk %s init error!\n",
1941     - top_gate_clk[i].gate.hw.init->name);
1942     - }
1943     + if (ret)
1944     + pr_warn("top clk %s init error!\n", name);
1945     }
1946    
1947     for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
1948     @@ -624,11 +621,10 @@ static int __init top_clocks_init(struct device_node *np)
1949     &top_div_clk[i].div.hw;
1950    
1951     top_div_clk[i].div.reg += (uintptr_t)reg_base;
1952     + name = top_div_clk[i].div.hw.init->name;
1953     ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
1954     - if (ret) {
1955     - pr_warn("top clk %s init error!\n",
1956     - top_div_clk[i].div.hw.init->name);
1957     - }
1958     + if (ret)
1959     + pr_warn("top clk %s init error!\n", name);
1960     }
1961    
1962     ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
1963     @@ -754,6 +750,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
1964     {
1965     void __iomem *reg_base;
1966     int i, ret;
1967     + const char *name;
1968    
1969     reg_base = of_iomap(np, 0);
1970     if (!reg_base) {
1971     @@ -767,11 +764,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
1972     &lsp0_mux_clk[i].mux.hw;
1973    
1974     lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1975     + name = lsp0_mux_clk[i].mux.hw.init->name;
1976     ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
1977     - if (ret) {
1978     - pr_warn("lsp0 clk %s init error!\n",
1979     - lsp0_mux_clk[i].mux.hw.init->name);
1980     - }
1981     + if (ret)
1982     + pr_warn("lsp0 clk %s init error!\n", name);
1983     }
1984    
1985     for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
1986     @@ -780,11 +776,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
1987     &lsp0_gate_clk[i].gate.hw;
1988    
1989     lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1990     + name = lsp0_gate_clk[i].gate.hw.init->name;
1991     ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
1992     - if (ret) {
1993     - pr_warn("lsp0 clk %s init error!\n",
1994     - lsp0_gate_clk[i].gate.hw.init->name);
1995     - }
1996     + if (ret)
1997     + pr_warn("lsp0 clk %s init error!\n", name);
1998     }
1999    
2000     for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
2001     @@ -793,11 +788,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
2002     &lsp0_div_clk[i].div.hw;
2003    
2004     lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
2005     + name = lsp0_div_clk[i].div.hw.init->name;
2006     ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
2007     - if (ret) {
2008     - pr_warn("lsp0 clk %s init error!\n",
2009     - lsp0_div_clk[i].div.hw.init->name);
2010     - }
2011     + if (ret)
2012     + pr_warn("lsp0 clk %s init error!\n", name);
2013     }
2014    
2015     ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
2016     @@ -862,6 +856,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
2017     {
2018     void __iomem *reg_base;
2019     int i, ret;
2020     + const char *name;
2021    
2022     reg_base = of_iomap(np, 0);
2023     if (!reg_base) {
2024     @@ -875,11 +870,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
2025     &lsp0_mux_clk[i].mux.hw;
2026    
2027     lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
2028     + name = lsp1_mux_clk[i].mux.hw.init->name;
2029     ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
2030     - if (ret) {
2031     - pr_warn("lsp1 clk %s init error!\n",
2032     - lsp1_mux_clk[i].mux.hw.init->name);
2033     - }
2034     + if (ret)
2035     + pr_warn("lsp1 clk %s init error!\n", name);
2036     }
2037    
2038     for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
2039     @@ -888,11 +882,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
2040     &lsp1_gate_clk[i].gate.hw;
2041    
2042     lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
2043     + name = lsp1_gate_clk[i].gate.hw.init->name;
2044     ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
2045     - if (ret) {
2046     - pr_warn("lsp1 clk %s init error!\n",
2047     - lsp1_gate_clk[i].gate.hw.init->name);
2048     - }
2049     + if (ret)
2050     + pr_warn("lsp1 clk %s init error!\n", name);
2051     }
2052    
2053     for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
2054     @@ -901,11 +894,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
2055     &lsp1_div_clk[i].div.hw;
2056    
2057     lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
2058     + name = lsp1_div_clk[i].div.hw.init->name;
2059     ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
2060     - if (ret) {
2061     - pr_warn("lsp1 clk %s init error!\n",
2062     - lsp1_div_clk[i].div.hw.init->name);
2063     - }
2064     + if (ret)
2065     + pr_warn("lsp1 clk %s init error!\n", name);
2066     }
2067    
2068     ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
2069     @@ -979,6 +971,7 @@ static int __init audio_clocks_init(struct device_node *np)
2070     {
2071     void __iomem *reg_base;
2072     int i, ret;
2073     + const char *name;
2074    
2075     reg_base = of_iomap(np, 0);
2076     if (!reg_base) {
2077     @@ -992,11 +985,10 @@ static int __init audio_clocks_init(struct device_node *np)
2078     &audio_mux_clk[i].mux.hw;
2079    
2080     audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
2081     + name = audio_mux_clk[i].mux.hw.init->name;
2082     ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
2083     - if (ret) {
2084     - pr_warn("audio clk %s init error!\n",
2085     - audio_mux_clk[i].mux.hw.init->name);
2086     - }
2087     + if (ret)
2088     + pr_warn("audio clk %s init error!\n", name);
2089     }
2090    
2091     for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
2092     @@ -1005,11 +997,10 @@ static int __init audio_clocks_init(struct device_node *np)
2093     &audio_adiv_clk[i].hw;
2094    
2095     audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
2096     + name = audio_adiv_clk[i].hw.init->name;
2097     ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
2098     - if (ret) {
2099     - pr_warn("audio clk %s init error!\n",
2100     - audio_adiv_clk[i].hw.init->name);
2101     - }
2102     + if (ret)
2103     + pr_warn("audio clk %s init error!\n", name);
2104     }
2105    
2106     for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
2107     @@ -1018,11 +1009,10 @@ static int __init audio_clocks_init(struct device_node *np)
2108     &audio_div_clk[i].div.hw;
2109    
2110     audio_div_clk[i].div.reg += (uintptr_t)reg_base;
2111     + name = audio_div_clk[i].div.hw.init->name;
2112     ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
2113     - if (ret) {
2114     - pr_warn("audio clk %s init error!\n",
2115     - audio_div_clk[i].div.hw.init->name);
2116     - }
2117     + if (ret)
2118     + pr_warn("audio clk %s init error!\n", name);
2119     }
2120    
2121     for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
2122     @@ -1031,11 +1021,10 @@ static int __init audio_clocks_init(struct device_node *np)
2123     &audio_gate_clk[i].gate.hw;
2124    
2125     audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
2126     + name = audio_gate_clk[i].gate.hw.init->name;
2127     ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
2128     - if (ret) {
2129     - pr_warn("audio clk %s init error!\n",
2130     - audio_gate_clk[i].gate.hw.init->name);
2131     - }
2132     + if (ret)
2133     + pr_warn("audio clk %s init error!\n", name);
2134     }
2135    
2136     ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
2137     diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
2138     index 02768af0dccd..8c789b8671fc 100644
2139     --- a/drivers/crypto/hisilicon/sec/sec_algs.c
2140     +++ b/drivers/crypto/hisilicon/sec/sec_algs.c
2141     @@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
2142     dma_addr_t psec_sgl, struct sec_dev_info *info)
2143     {
2144     struct sec_hw_sgl *sgl_current, *sgl_next;
2145     + dma_addr_t sgl_next_dma;
2146    
2147     - if (!hw_sgl)
2148     - return;
2149     sgl_current = hw_sgl;
2150     - while (sgl_current->next) {
2151     + while (sgl_current) {
2152     sgl_next = sgl_current->next;
2153     - dma_pool_free(info->hw_sgl_pool, sgl_current,
2154     - sgl_current->next_sgl);
2155     + sgl_next_dma = sgl_current->next_sgl;
2156     +
2157     + dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
2158     +
2159     sgl_current = sgl_next;
2160     + psec_sgl = sgl_next_dma;
2161     }
2162     - dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
2163     }
2164    
2165     static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
2166     diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
2167     index 051f6c2873c7..6713cfb1995c 100644
2168     --- a/drivers/dma-buf/sw_sync.c
2169     +++ b/drivers/dma-buf/sw_sync.c
2170     @@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
2171     {
2172     struct sync_pt *pt = dma_fence_to_sync_pt(fence);
2173     struct sync_timeline *parent = dma_fence_parent(fence);
2174     + unsigned long flags;
2175    
2176     + spin_lock_irqsave(fence->lock, flags);
2177     if (!list_empty(&pt->link)) {
2178     - unsigned long flags;
2179     -
2180     - spin_lock_irqsave(fence->lock, flags);
2181     - if (!list_empty(&pt->link)) {
2182     - list_del(&pt->link);
2183     - rb_erase(&pt->node, &parent->pt_tree);
2184     - }
2185     - spin_unlock_irqrestore(fence->lock, flags);
2186     + list_del(&pt->link);
2187     + rb_erase(&pt->node, &parent->pt_tree);
2188     }
2189     + spin_unlock_irqrestore(fence->lock, flags);
2190    
2191     sync_timeline_put(parent);
2192     dma_fence_free(fence);
2193     @@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
2194     p = &parent->rb_left;
2195     } else {
2196     if (dma_fence_get_rcu(&other->base)) {
2197     - dma_fence_put(&pt->base);
2198     + sync_timeline_put(obj);
2199     + kfree(pt);
2200     pt = other;
2201     goto unlock;
2202     }
2203     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
2204     index eb3569b46c1e..430c56f9544a 100644
2205     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
2206     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
2207     @@ -139,14 +139,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
2208     mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
2209     fb_tiled);
2210     domain = amdgpu_display_supported_domains(adev);
2211     -
2212     height = ALIGN(mode_cmd->height, 8);
2213     size = mode_cmd->pitches[0] * height;
2214     aligned_size = ALIGN(size, PAGE_SIZE);
2215     ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
2216     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
2217     - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2218     - AMDGPU_GEM_CREATE_VRAM_CLEARED,
2219     + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
2220     + AMDGPU_GEM_CREATE_VRAM_CLEARED |
2221     + AMDGPU_GEM_CREATE_CPU_GTT_USWC,
2222     ttm_bo_type_kernel, NULL, &gobj);
2223     if (ret) {
2224     pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
2225     @@ -168,7 +168,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
2226     dev_err(adev->dev, "FB failed to set tiling flags\n");
2227     }
2228    
2229     -
2230     ret = amdgpu_bo_pin(abo, domain);
2231     if (ret) {
2232     amdgpu_bo_unreserve(abo);
2233     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2234     index 939f8305511b..fb291366d5ad 100644
2235     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2236     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
2237     @@ -747,7 +747,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
2238     struct amdgpu_device *adev = dev->dev_private;
2239     struct drm_gem_object *gobj;
2240     uint32_t handle;
2241     - u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2242     + u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
2243     + AMDGPU_GEM_CREATE_CPU_GTT_USWC;
2244     u32 domain;
2245     int r;
2246    
2247     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
2248     index 3747c3f1f0cc..15c371fac469 100644
2249     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
2250     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
2251     @@ -1583,7 +1583,8 @@ static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
2252    
2253     static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
2254     {
2255     - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
2256     + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
2257     + adev->sdma.num_instances;
2258     adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
2259     adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
2260     }
2261     diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
2262     index 4d74453f3cfb..602397016b64 100644
2263     --- a/drivers/gpu/drm/amd/amdgpu/si.c
2264     +++ b/drivers/gpu/drm/amd/amdgpu/si.c
2265     @@ -1881,7 +1881,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
2266     if (orig != data)
2267     si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
2268    
2269     - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
2270     + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
2271     orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
2272     data &= ~PLL_RAMP_UP_TIME_0_MASK;
2273     if (orig != data)
2274     @@ -1930,14 +1930,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
2275    
2276     orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
2277     data &= ~LS2_EXIT_TIME_MASK;
2278     - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
2279     + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2280     data |= LS2_EXIT_TIME(5);
2281     if (orig != data)
2282     si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
2283    
2284     orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
2285     data &= ~LS2_EXIT_TIME_MASK;
2286     - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
2287     + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2288     data |= LS2_EXIT_TIME(5);
2289     if (orig != data)
2290     si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
2291     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2292     index 592fa499c9f8..9594c154664f 100644
2293     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2294     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
2295     @@ -334,7 +334,7 @@ bool dm_pp_get_clock_levels_by_type(
2296     }
2297     } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
2298     if (smu_get_clock_by_type(&adev->smu,
2299     - dc_to_smu_clock_type(clk_type),
2300     + dc_to_pp_clock_type(clk_type),
2301     &pp_clks)) {
2302     get_default_clock_levels(clk_type, dc_clks);
2303     return true;
2304     @@ -419,7 +419,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
2305     return false;
2306     } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
2307     if (smu_get_clock_by_type_with_latency(&adev->smu,
2308     - dc_to_pp_clock_type(clk_type),
2309     + dc_to_smu_clock_type(clk_type),
2310     &pp_clks))
2311     return false;
2312     }
2313     diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
2314     index 50bfb5921de0..2ab0f97719b5 100644
2315     --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
2316     +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
2317     @@ -348,6 +348,8 @@ void dcn20_clk_mgr_construct(
2318    
2319     clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
2320    
2321     + clk_mgr->pp_smu = pp_smu;
2322     +
2323     if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
2324     dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
2325     clk_mgr->dentist_vco_freq_khz = 3850000;
2326     diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
2327     index cbc480a33376..730f97ba8dbb 100644
2328     --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
2329     +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
2330     @@ -2187,6 +2187,14 @@ void dc_set_power_state(
2331     dc_resource_state_construct(dc, dc->current_state);
2332    
2333     dc->hwss.init_hw(dc);
2334     +
2335     +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
2336     + if (dc->hwss.init_sys_ctx != NULL &&
2337     + dc->vm_pa_config.valid) {
2338     + dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
2339     + }
2340     +#endif
2341     +
2342     break;
2343     default:
2344     ASSERT(dc->current_state->stream_count == 0);
2345     diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
2346     index 2c7aaed907b9..0bf85a7a2cd3 100644
2347     --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
2348     +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
2349     @@ -3033,6 +3033,8 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
2350     link_enc->funcs->fec_set_ready(link_enc, true);
2351     link->fec_state = dc_link_fec_ready;
2352     } else {
2353     + link->link_enc->funcs->fec_set_ready(link->link_enc, false);
2354     + link->fec_state = dc_link_fec_not_ready;
2355     dm_error("dpcd write failed to set fec_ready");
2356     }
2357     } else if (link->fec_state == dc_link_fec_ready && !ready) {
2358     diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
2359     index 2d019e1f6135..a9135764e580 100644
2360     --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
2361     +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
2362     @@ -160,6 +160,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
2363     break;
2364     udelay(25); //MAx T7 is 50ms
2365     } while (++tries < 300);
2366     +
2367     + if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
2368     + udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
2369     +
2370     return result;
2371     }
2372    
2373     diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2374     index 2ceaab4fb5de..68db60e4caf3 100644
2375     --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2376     +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2377     @@ -265,12 +265,10 @@ bool resource_construct(
2378     DC_ERR("DC: failed to create audio!\n");
2379     return false;
2380     }
2381     -
2382     if (!aud->funcs->endpoint_valid(aud)) {
2383     aud->funcs->destroy(&aud);
2384     break;
2385     }
2386     -
2387     pool->audios[i] = aud;
2388     pool->audio_count++;
2389     }
2390     @@ -1659,24 +1657,25 @@ static struct audio *find_first_free_audio(
2391     const struct resource_pool *pool,
2392     enum engine_id id)
2393     {
2394     - int i;
2395     - for (i = 0; i < pool->audio_count; i++) {
2396     + int i, available_audio_count;
2397     +
2398     + available_audio_count = pool->audio_count;
2399     +
2400     + for (i = 0; i < available_audio_count; i++) {
2401     if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
2402     /*we have enough audio endpoint, find the matching inst*/
2403     if (id != i)
2404     continue;
2405     -
2406     return pool->audios[i];
2407     }
2408     }
2409    
2410     - /* use engine id to find free audio */
2411     - if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
2412     + /* use engine id to find free audio */
2413     + if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
2414     return pool->audios[id];
2415     }
2416     -
2417     /*not found the matching one, first come first serve*/
2418     - for (i = 0; i < pool->audio_count; i++) {
2419     + for (i = 0; i < available_audio_count; i++) {
2420     if (res_ctx->is_audio_acquired[i] == false) {
2421     return pool->audios[i];
2422     }
2423     diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
2424     index 6eabb6491a3d..ce6d73d21cca 100644
2425     --- a/drivers/gpu/drm/amd/display/dc/dc_types.h
2426     +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
2427     @@ -202,6 +202,7 @@ struct dc_panel_patch {
2428     unsigned int dppowerup_delay;
2429     unsigned int extra_t12_ms;
2430     unsigned int extra_delay_backlight_off;
2431     + unsigned int extra_t7_ms;
2432     };
2433    
2434     struct dc_edid_caps {
2435     diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
2436     index 4a10a5d22c90..5de9623bdf66 100644
2437     --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
2438     +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
2439     @@ -613,6 +613,8 @@ void dce_aud_az_configure(
2440    
2441     AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
2442     value);
2443     + DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
2444     + audio->inst, value, audio_info->display_name);
2445    
2446     /*
2447     *write the port ID:
2448     @@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
2449     .az_configure = dce_aud_az_configure,
2450     .destroy = dce_aud_destroy,
2451     };
2452     -
2453     void dce_aud_destroy(struct audio **audio)
2454     {
2455     struct dce_audio *aud = DCE_AUD(*audio);
2456     @@ -953,7 +954,6 @@ struct audio *dce_audio_create(
2457     audio->regs = reg;
2458     audio->shifts = shifts;
2459     audio->masks = masks;
2460     -
2461     return &audio->base;
2462     }
2463    
2464     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
2465     index 7469333a2c8a..8166fdbacd73 100644
2466     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
2467     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
2468     @@ -357,9 +357,10 @@ bool cm_helper_translate_curve_to_hw_format(
2469     seg_distr[7] = 4;
2470     seg_distr[8] = 4;
2471     seg_distr[9] = 4;
2472     + seg_distr[10] = 1;
2473    
2474     region_start = -10;
2475     - region_end = 0;
2476     + region_end = 1;
2477     }
2478    
2479     for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
2480     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
2481     index a546c2bc9129..e365f2dd7f9a 100644
2482     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
2483     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
2484     @@ -824,6 +824,9 @@ void optc1_program_manual_trigger(struct timing_generator *optc)
2485    
2486     REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
2487     MANUAL_FLOW_CONTROL, 1);
2488     +
2489     + REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
2490     + MANUAL_FLOW_CONTROL, 0);
2491     }
2492    
2493    
2494     diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2495     index d810c8940129..8fdb53a44bfb 100644
2496     --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2497     +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
2498     @@ -585,6 +585,10 @@ static void dcn20_init_hw(struct dc *dc)
2499     }
2500     }
2501    
2502     + /* Power gate DSCs */
2503     + for (i = 0; i < res_pool->res_cap->num_dsc; i++)
2504     + dcn20_dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
2505     +
2506     /* Blank pixel data with OPP DPG */
2507     for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
2508     struct timing_generator *tg = dc->res_pool->timing_generators[i];
2509     @@ -1106,6 +1110,9 @@ void dcn20_enable_plane(
2510     /* enable DCFCLK current DCHUB */
2511     pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2512    
2513     + /* initialize HUBP on power up */
2514     + pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
2515     +
2516     /* make sure OPP_PIPE_CLOCK_EN = 1 */
2517     pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2518     pipe_ctx->stream_res.opp,
2519     @@ -1315,6 +1322,18 @@ static void dcn20_apply_ctx_for_surface(
2520     if (!top_pipe_to_program)
2521     return;
2522    
2523     + /* Carry over GSL groups in case the context is changing. */
2524     + for (i = 0; i < dc->res_pool->pipe_count; i++) {
2525     + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2526     + struct pipe_ctx *old_pipe_ctx =
2527     + &dc->current_state->res_ctx.pipe_ctx[i];
2528     +
2529     + if (pipe_ctx->stream == stream &&
2530     + pipe_ctx->stream == old_pipe_ctx->stream)
2531     + pipe_ctx->stream_res.gsl_group =
2532     + old_pipe_ctx->stream_res.gsl_group;
2533     + }
2534     +
2535     tg = top_pipe_to_program->stream_res.tg;
2536    
2537     interdependent_update = top_pipe_to_program->plane_state &&
2538     diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
2539     index 3cc0f2a1f77c..5db29bf582d3 100644
2540     --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
2541     +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
2542     @@ -167,6 +167,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
2543     .ack = NULL
2544     };
2545    
2546     +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
2547     + .set = NULL,
2548     + .ack = NULL
2549     +};
2550     +
2551     #undef BASE_INNER
2552     #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
2553    
2554     @@ -221,12 +226,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
2555     .funcs = &pflip_irq_info_funcs\
2556     }
2557    
2558     -#define vupdate_int_entry(reg_num)\
2559     +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
2560     + * of DCE's DC_IRQ_SOURCE_VUPDATEx.
2561     + */
2562     +#define vupdate_no_lock_int_entry(reg_num)\
2563     [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
2564     IRQ_REG_ENTRY(OTG, reg_num,\
2565     - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
2566     - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
2567     - .funcs = &vblank_irq_info_funcs\
2568     + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
2569     + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
2570     + .funcs = &vupdate_no_lock_irq_info_funcs\
2571     }
2572    
2573     #define vblank_int_entry(reg_num)\
2574     @@ -333,12 +341,12 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
2575     dc_underflow_int_entry(6),
2576     [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
2577     [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
2578     - vupdate_int_entry(0),
2579     - vupdate_int_entry(1),
2580     - vupdate_int_entry(2),
2581     - vupdate_int_entry(3),
2582     - vupdate_int_entry(4),
2583     - vupdate_int_entry(5),
2584     + vupdate_no_lock_int_entry(0),
2585     + vupdate_no_lock_int_entry(1),
2586     + vupdate_no_lock_int_entry(2),
2587     + vupdate_no_lock_int_entry(3),
2588     + vupdate_no_lock_int_entry(4),
2589     + vupdate_no_lock_int_entry(5),
2590     vblank_int_entry(0),
2591     vblank_int_entry(1),
2592     vblank_int_entry(2),
2593     diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
2594     index 7c20171a3b6d..a53666ff6cf8 100644
2595     --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
2596     +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
2597     @@ -435,6 +435,12 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
2598     /* Either we've calculated the number of frames to insert,
2599     * or we need to insert min duration frames
2600     */
2601     + if (last_render_time_in_us / frames_to_insert <
2602     + in_out_vrr->min_duration_in_us){
2603     + frames_to_insert -= (frames_to_insert > 1) ?
2604     + 1 : 0;
2605     + }
2606     +
2607     if (frames_to_insert > 0)
2608     inserted_frame_duration_in_us = last_render_time_in_us /
2609     frames_to_insert;
2610     @@ -887,8 +893,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
2611     struct core_freesync *core_freesync = NULL;
2612     unsigned long long nominal_field_rate_in_uhz = 0;
2613     unsigned int refresh_range = 0;
2614     - unsigned int min_refresh_in_uhz = 0;
2615     - unsigned int max_refresh_in_uhz = 0;
2616     + unsigned long long min_refresh_in_uhz = 0;
2617     + unsigned long long max_refresh_in_uhz = 0;
2618    
2619     if (mod_freesync == NULL)
2620     return;
2621     @@ -915,7 +921,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
2622     min_refresh_in_uhz = nominal_field_rate_in_uhz;
2623    
2624     if (!vrr_settings_require_update(core_freesync,
2625     - in_config, min_refresh_in_uhz, max_refresh_in_uhz,
2626     + in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
2627     in_out_vrr))
2628     return;
2629    
2630     @@ -931,15 +937,15 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
2631     return;
2632    
2633     } else {
2634     - in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz;
2635     + in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz;
2636     in_out_vrr->max_duration_in_us =
2637     calc_duration_in_us_from_refresh_in_uhz(
2638     - min_refresh_in_uhz);
2639     + (unsigned int)min_refresh_in_uhz);
2640    
2641     - in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz;
2642     + in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz;
2643     in_out_vrr->min_duration_in_us =
2644     calc_duration_in_us_from_refresh_in_uhz(
2645     - max_refresh_in_uhz);
2646     + (unsigned int)max_refresh_in_uhz);
2647    
2648     refresh_range = in_out_vrr->max_refresh_in_uhz -
2649     in_out_vrr->min_refresh_in_uhz;
2650     @@ -950,17 +956,18 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
2651     in_out_vrr->fixed.ramping_active = in_config->ramping;
2652    
2653     in_out_vrr->btr.btr_enabled = in_config->btr;
2654     +
2655     if (in_out_vrr->max_refresh_in_uhz <
2656     2 * in_out_vrr->min_refresh_in_uhz)
2657     in_out_vrr->btr.btr_enabled = false;
2658     +
2659     in_out_vrr->btr.btr_active = false;
2660     in_out_vrr->btr.inserted_duration_in_us = 0;
2661     in_out_vrr->btr.frames_to_insert = 0;
2662     in_out_vrr->btr.frame_counter = 0;
2663     in_out_vrr->btr.mid_point_in_us =
2664     - in_out_vrr->min_duration_in_us +
2665     - (in_out_vrr->max_duration_in_us -
2666     - in_out_vrr->min_duration_in_us) / 2;
2667     + (in_out_vrr->min_duration_in_us +
2668     + in_out_vrr->max_duration_in_us) / 2;
2669    
2670     if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) {
2671     in_out_vrr->adjust.v_total_min = stream->timing.v_total;
2672     diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2673     index b81c7e715dc9..9aaf2deff6e9 100644
2674     --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2675     +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
2676     @@ -1627,6 +1627,10 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
2677     static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
2678     {
2679     int ret = 0;
2680     + struct amdgpu_device *adev = smu->adev;
2681     +
2682     + if (adev->asic_type != CHIP_NAVI10)
2683     + return -EINVAL;
2684    
2685     switch (level) {
2686     case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2687     diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
2688     index 3f7f4880be09..37bd541166a5 100644
2689     --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
2690     +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
2691     @@ -1035,16 +1035,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
2692     if (ret)
2693     return ret;
2694    
2695     + /* Check whether panel supports fast training */
2696     + ret = analogix_dp_fast_link_train_detection(dp);
2697     + if (ret)
2698     + dp->psr_enable = false;
2699     +
2700     if (dp->psr_enable) {
2701     ret = analogix_dp_enable_sink_psr(dp);
2702     if (ret)
2703     return ret;
2704     }
2705    
2706     - /* Check whether panel supports fast training */
2707     - ret = analogix_dp_fast_link_train_detection(dp);
2708     - if (ret)
2709     - dp->psr_enable = false;
2710    
2711     return ret;
2712     }
2713     diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
2714     index dd7aa466b280..36acc256e67e 100644
2715     --- a/drivers/gpu/drm/bridge/sii902x.c
2716     +++ b/drivers/gpu/drm/bridge/sii902x.c
2717     @@ -750,6 +750,7 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
2718     sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] |
2719     i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE;
2720    
2721     + sii902x->audio.mclk = devm_clk_get(dev, "mclk");
2722     if (IS_ERR(sii902x->audio.mclk)) {
2723     dev_err(dev, "%s: No clock (audio mclk) found: %ld\n",
2724     __func__, PTR_ERR(sii902x->audio.mclk));
2725     diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
2726     index 13ade28a36a8..b3a7d5f1250c 100644
2727     --- a/drivers/gpu/drm/bridge/tc358767.c
2728     +++ b/drivers/gpu/drm/bridge/tc358767.c
2729     @@ -313,7 +313,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
2730     struct drm_dp_aux_msg *msg)
2731     {
2732     struct tc_data *tc = aux_to_tc(aux);
2733     - size_t size = min_t(size_t, 8, msg->size);
2734     + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
2735     u8 request = msg->request & ~DP_AUX_I2C_MOT;
2736     u8 *buf = msg->buffer;
2737     u32 tmp = 0;
2738     diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
2739     index baf63fb6850a..a810568c76df 100644
2740     --- a/drivers/gpu/drm/mcde/mcde_drv.c
2741     +++ b/drivers/gpu/drm/mcde/mcde_drv.c
2742     @@ -319,7 +319,7 @@ static int mcde_probe(struct platform_device *pdev)
2743     struct device *dev = &pdev->dev;
2744     struct drm_device *drm;
2745     struct mcde *mcde;
2746     - struct component_match *match;
2747     + struct component_match *match = NULL;
2748     struct resource *res;
2749     u32 pid;
2750     u32 val;
2751     @@ -485,6 +485,10 @@ static int mcde_probe(struct platform_device *pdev)
2752     }
2753     put_device(p);
2754     }
2755     + if (!match) {
2756     + dev_err(dev, "no matching components\n");
2757     + return -ENODEV;
2758     + }
2759     if (IS_ERR(match)) {
2760     dev_err(dev, "could not create component match\n");
2761     ret = PTR_ERR(match);
2762     diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2763     index 283ff690350e..50303ec194bb 100644
2764     --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2765     +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2766     @@ -320,7 +320,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
2767     asyh->wndw.olut &= ~BIT(wndw->id);
2768     }
2769    
2770     - if (!ilut && wndw->func->ilut_identity) {
2771     + if (!ilut && wndw->func->ilut_identity &&
2772     + asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
2773     + asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
2774     static struct drm_property_blob dummy = {};
2775     ilut = &dummy;
2776     }
2777     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
2778     index 7143ea4611aa..33a9fb5ac558 100644
2779     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
2780     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
2781     @@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
2782     info->min = min(info->base,
2783     info->base + info->step * info->vidmask);
2784     info->max = nvbios_rd32(bios, volt + 0x0e);
2785     + if (!info->max)
2786     + info->max = max(info->base, info->base + info->step * info->vidmask);
2787     break;
2788     case 0x50:
2789     info->min = nvbios_rd32(bios, volt + 0x0a);
2790     diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
2791     index 28c0620dfe0f..b5b14aa059ea 100644
2792     --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
2793     +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
2794     @@ -399,7 +399,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
2795    
2796     /* Look up the DSI host. It needs to probe before we do. */
2797     endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
2798     + if (!endpoint)
2799     + return -ENODEV;
2800     +
2801     dsi_host_node = of_graph_get_remote_port_parent(endpoint);
2802     + if (!dsi_host_node)
2803     + goto error;
2804     +
2805     host = of_find_mipi_dsi_host_by_node(dsi_host_node);
2806     of_node_put(dsi_host_node);
2807     if (!host) {
2808     @@ -408,6 +414,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
2809     }
2810    
2811     info.node = of_graph_get_remote_port(endpoint);
2812     + if (!info.node)
2813     + goto error;
2814     +
2815     of_node_put(endpoint);
2816    
2817     ts->dsi = mipi_dsi_device_register_full(host, &info);
2818     @@ -428,6 +437,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
2819     return ret;
2820    
2821     return 0;
2822     +
2823     +error:
2824     + of_node_put(endpoint);
2825     + return -ENODEV;
2826     }
2827    
2828     static int rpi_touchscreen_remove(struct i2c_client *i2c)
2829     diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
2830     index 5a93c4edf1e4..ee6900eb3906 100644
2831     --- a/drivers/gpu/drm/panel/panel-simple.c
2832     +++ b/drivers/gpu/drm/panel/panel-simple.c
2833     @@ -724,9 +724,9 @@ static const struct panel_desc auo_g133han01 = {
2834     static const struct display_timing auo_g185han01_timings = {
2835     .pixelclock = { 120000000, 144000000, 175000000 },
2836     .hactive = { 1920, 1920, 1920 },
2837     - .hfront_porch = { 18, 60, 74 },
2838     - .hback_porch = { 12, 44, 54 },
2839     - .hsync_len = { 10, 24, 32 },
2840     + .hfront_porch = { 36, 120, 148 },
2841     + .hback_porch = { 24, 88, 108 },
2842     + .hsync_len = { 20, 48, 64 },
2843     .vactive = { 1080, 1080, 1080 },
2844     .vfront_porch = { 6, 10, 40 },
2845     .vback_porch = { 2, 5, 20 },
2846     diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2847     index c60d1a44d22a..b684cd719612 100644
2848     --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2849     +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2850     @@ -752,7 +752,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
2851    
2852     radeon_encoder->output_csc = val;
2853    
2854     - if (connector->encoder->crtc) {
2855     + if (connector->encoder && connector->encoder->crtc) {
2856     struct drm_crtc *crtc = connector->encoder->crtc;
2857     struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
2858    
2859     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
2860     index a6cbe11f79c6..15d7bebe1729 100644
2861     --- a/drivers/gpu/drm/radeon/radeon_drv.c
2862     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
2863     @@ -349,11 +349,19 @@ radeon_pci_remove(struct pci_dev *pdev)
2864     static void
2865     radeon_pci_shutdown(struct pci_dev *pdev)
2866     {
2867     + struct drm_device *ddev = pci_get_drvdata(pdev);
2868     +
2869     /* if we are running in a VM, make sure the device
2870     * torn down properly on reboot/shutdown
2871     */
2872     if (radeon_device_is_virtual())
2873     radeon_pci_remove(pdev);
2874     +
2875     + /* Some adapters need to be suspended before a
2876     + * shutdown occurs in order to prevent an error
2877     + * during kexec.
2878     + */
2879     + radeon_suspend_kms(ddev, true, true, false);
2880     }
2881    
2882     static int radeon_pmops_suspend(struct device *dev)
2883     diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
2884     index 2fe6c4a8d915..3ab4fbf8eb0d 100644
2885     --- a/drivers/gpu/drm/stm/ltdc.c
2886     +++ b/drivers/gpu/drm/stm/ltdc.c
2887     @@ -26,6 +26,7 @@
2888     #include <drm/drm_fb_cma_helper.h>
2889     #include <drm/drm_fourcc.h>
2890     #include <drm/drm_gem_cma_helper.h>
2891     +#include <drm/drm_gem_framebuffer_helper.h>
2892     #include <drm/drm_of.h>
2893     #include <drm/drm_plane_helper.h>
2894     #include <drm/drm_probe_helper.h>
2895     @@ -922,6 +923,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
2896     };
2897    
2898     static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
2899     + .prepare_fb = drm_gem_fb_prepare_fb,
2900     .atomic_check = ltdc_plane_atomic_check,
2901     .atomic_update = ltdc_plane_atomic_update,
2902     .atomic_disable = ltdc_plane_atomic_disable,
2903     diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
2904     index 87819c82bcce..f2f0739d1035 100644
2905     --- a/drivers/gpu/drm/tinydrm/Kconfig
2906     +++ b/drivers/gpu/drm/tinydrm/Kconfig
2907     @@ -14,8 +14,8 @@ config TINYDRM_MIPI_DBI
2908     config TINYDRM_HX8357D
2909     tristate "DRM support for HX8357D display panels"
2910     depends on DRM_TINYDRM && SPI
2911     - depends on BACKLIGHT_CLASS_DEVICE
2912     select TINYDRM_MIPI_DBI
2913     + select BACKLIGHT_CLASS_DEVICE
2914     help
2915     DRM driver for the following HX8357D panels:
2916     * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
2917     @@ -35,8 +35,8 @@ config TINYDRM_ILI9225
2918     config TINYDRM_ILI9341
2919     tristate "DRM support for ILI9341 display panels"
2920     depends on DRM_TINYDRM && SPI
2921     - depends on BACKLIGHT_CLASS_DEVICE
2922     select TINYDRM_MIPI_DBI
2923     + select BACKLIGHT_CLASS_DEVICE
2924     help
2925     DRM driver for the following Ilitek ILI9341 panels:
2926     * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
2927     @@ -46,8 +46,8 @@ config TINYDRM_ILI9341
2928     config TINYDRM_MI0283QT
2929     tristate "DRM support for MI0283QT"
2930     depends on DRM_TINYDRM && SPI
2931     - depends on BACKLIGHT_CLASS_DEVICE
2932     select TINYDRM_MIPI_DBI
2933     + select BACKLIGHT_CLASS_DEVICE
2934     help
2935     DRM driver for the Multi-Inno MI0283QT display panel
2936     If M is selected the module will be called mi0283qt.
2937     @@ -78,8 +78,8 @@ config TINYDRM_ST7586
2938     config TINYDRM_ST7735R
2939     tristate "DRM support for Sitronix ST7735R display panels"
2940     depends on DRM_TINYDRM && SPI
2941     - depends on BACKLIGHT_CLASS_DEVICE
2942     select TINYDRM_MIPI_DBI
2943     + select BACKLIGHT_CLASS_DEVICE
2944     help
2945     DRM driver Sitronix ST7735R with one of the following LCDs:
2946     * JD-T18003-T01 1.8" 128x160 TFT
2947     diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
2948     index e66ff25c008e..e9fb4ebb789f 100644
2949     --- a/drivers/gpu/drm/vkms/vkms_crc.c
2950     +++ b/drivers/gpu/drm/vkms/vkms_crc.c
2951     @@ -166,16 +166,24 @@ void vkms_crc_work_handle(struct work_struct *work)
2952     struct drm_plane *plane;
2953     u32 crc32 = 0;
2954     u64 frame_start, frame_end;
2955     + bool crc_pending;
2956     unsigned long flags;
2957    
2958     spin_lock_irqsave(&out->state_lock, flags);
2959     frame_start = crtc_state->frame_start;
2960     frame_end = crtc_state->frame_end;
2961     + crc_pending = crtc_state->crc_pending;
2962     + crtc_state->frame_start = 0;
2963     + crtc_state->frame_end = 0;
2964     + crtc_state->crc_pending = false;
2965     spin_unlock_irqrestore(&out->state_lock, flags);
2966    
2967     - /* _vblank_handle() hasn't updated frame_start yet */
2968     - if (!frame_start || frame_start == frame_end)
2969     - goto out;
2970     + /*
2971     + * We raced with the vblank hrtimer and previous work already computed
2972     + * the crc, nothing to do.
2973     + */
2974     + if (!crc_pending)
2975     + return;
2976    
2977     drm_for_each_plane(plane, &vdev->drm) {
2978     struct vkms_plane_state *vplane_state;
2979     @@ -196,20 +204,11 @@ void vkms_crc_work_handle(struct work_struct *work)
2980     if (primary_crc)
2981     crc32 = _vkms_get_crc(primary_crc, cursor_crc);
2982    
2983     - frame_end = drm_crtc_accurate_vblank_count(crtc);
2984     -
2985     - /* queue_work can fail to schedule crc_work; add crc for
2986     - * missing frames
2987     + /*
2988     + * The worker can fall behind the vblank hrtimer, make sure we catch up.
2989     */
2990     while (frame_start <= frame_end)
2991     drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
2992     -
2993     -out:
2994     - /* to avoid using the same value for frame number again */
2995     - spin_lock_irqsave(&out->state_lock, flags);
2996     - crtc_state->frame_end = frame_end;
2997     - crtc_state->frame_start = 0;
2998     - spin_unlock_irqrestore(&out->state_lock, flags);
2999     }
3000    
3001     static const char * const pipe_crc_sources[] = {"auto"};
3002     diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
3003     index 4d11292bc6f3..f392fa13015b 100644
3004     --- a/drivers/gpu/drm/vkms/vkms_crtc.c
3005     +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
3006     @@ -30,13 +30,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
3007     * has read the data
3008     */
3009     spin_lock(&output->state_lock);
3010     - if (!state->frame_start)
3011     + if (!state->crc_pending)
3012     state->frame_start = frame;
3013     + else
3014     + DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
3015     + state->frame_start, frame);
3016     + state->frame_end = frame;
3017     + state->crc_pending = true;
3018     spin_unlock(&output->state_lock);
3019    
3020     ret = queue_work(output->crc_workq, &state->crc_work);
3021     if (!ret)
3022     - DRM_WARN("failed to queue vkms_crc_work_handle");
3023     + DRM_DEBUG_DRIVER("vkms_crc_work_handle already queued\n");
3024     }
3025    
3026     spin_unlock(&output->lock);
3027     diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
3028     index 738dd6206d85..92296bd8f623 100644
3029     --- a/drivers/gpu/drm/vkms/vkms_drv.c
3030     +++ b/drivers/gpu/drm/vkms/vkms_drv.c
3031     @@ -92,7 +92,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
3032     dev->mode_config.max_height = YRES_MAX;
3033     dev->mode_config.preferred_depth = 24;
3034    
3035     - return vkms_output_init(vkmsdev);
3036     + return vkms_output_init(vkmsdev, 0);
3037     }
3038    
3039     static int __init vkms_init(void)
3040     diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
3041     index b92c30c66a6f..2fee10a00051 100644
3042     --- a/drivers/gpu/drm/vkms/vkms_drv.h
3043     +++ b/drivers/gpu/drm/vkms/vkms_drv.h
3044     @@ -48,6 +48,8 @@ struct vkms_plane_state {
3045     struct vkms_crtc_state {
3046     struct drm_crtc_state base;
3047     struct work_struct crc_work;
3048     +
3049     + bool crc_pending;
3050     u64 frame_start;
3051     u64 frame_end;
3052     };
3053     @@ -105,10 +107,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
3054     int *max_error, ktime_t *vblank_time,
3055     bool in_vblank_irq);
3056    
3057     -int vkms_output_init(struct vkms_device *vkmsdev);
3058     +int vkms_output_init(struct vkms_device *vkmsdev, int index);
3059    
3060     struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
3061     - enum drm_plane_type type);
3062     + enum drm_plane_type type, int index);
3063    
3064     /* Gem stuff */
3065     struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
3066     diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
3067     index 56fb5c2a2315..fb1941a6522c 100644
3068     --- a/drivers/gpu/drm/vkms/vkms_output.c
3069     +++ b/drivers/gpu/drm/vkms/vkms_output.c
3070     @@ -35,7 +35,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
3071     .get_modes = vkms_conn_get_modes,
3072     };
3073    
3074     -int vkms_output_init(struct vkms_device *vkmsdev)
3075     +int vkms_output_init(struct vkms_device *vkmsdev, int index)
3076     {
3077     struct vkms_output *output = &vkmsdev->output;
3078     struct drm_device *dev = &vkmsdev->drm;
3079     @@ -45,12 +45,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
3080     struct drm_plane *primary, *cursor = NULL;
3081     int ret;
3082    
3083     - primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
3084     + primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
3085     if (IS_ERR(primary))
3086     return PTR_ERR(primary);
3087    
3088     if (enable_cursor) {
3089     - cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
3090     + cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
3091     if (IS_ERR(cursor)) {
3092     ret = PTR_ERR(cursor);
3093     goto err_cursor;
3094     diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
3095     index 0fceb6258422..18c630cfc485 100644
3096     --- a/drivers/gpu/drm/vkms/vkms_plane.c
3097     +++ b/drivers/gpu/drm/vkms/vkms_plane.c
3098     @@ -176,7 +176,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
3099     };
3100    
3101     struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
3102     - enum drm_plane_type type)
3103     + enum drm_plane_type type, int index)
3104     {
3105     struct drm_device *dev = &vkmsdev->drm;
3106     const struct drm_plane_helper_funcs *funcs;
3107     @@ -198,7 +198,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
3108     funcs = &vkms_primary_helper_funcs;
3109     }
3110    
3111     - ret = drm_universal_plane_init(dev, plane, 0,
3112     + ret = drm_universal_plane_init(dev, plane, 1 << index,
3113     &vkms_plane_funcs,
3114     formats, nformats,
3115     NULL, type, NULL);
3116     diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
3117     index 81df62f48c4c..6ac8becc2372 100644
3118     --- a/drivers/hid/hid-apple.c
3119     +++ b/drivers/hid/hid-apple.c
3120     @@ -54,7 +54,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
3121     struct apple_sc {
3122     unsigned long quirks;
3123     unsigned int fn_on;
3124     - DECLARE_BITMAP(pressed_fn, KEY_CNT);
3125     DECLARE_BITMAP(pressed_numlock, KEY_CNT);
3126     };
3127    
3128     @@ -181,6 +180,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
3129     {
3130     struct apple_sc *asc = hid_get_drvdata(hid);
3131     const struct apple_key_translation *trans, *table;
3132     + bool do_translate;
3133     + u16 code = 0;
3134    
3135     if (usage->code == KEY_FN) {
3136     asc->fn_on = !!value;
3137     @@ -189,8 +190,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
3138     }
3139    
3140     if (fnmode) {
3141     - int do_translate;
3142     -
3143     if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
3144     hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
3145     table = macbookair_fn_keys;
3146     @@ -202,25 +201,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
3147     trans = apple_find_translation (table, usage->code);
3148    
3149     if (trans) {
3150     - if (test_bit(usage->code, asc->pressed_fn))
3151     - do_translate = 1;
3152     - else if (trans->flags & APPLE_FLAG_FKEY)
3153     - do_translate = (fnmode == 2 && asc->fn_on) ||
3154     - (fnmode == 1 && !asc->fn_on);
3155     - else
3156     - do_translate = asc->fn_on;
3157     -
3158     - if (do_translate) {
3159     - if (value)
3160     - set_bit(usage->code, asc->pressed_fn);
3161     - else
3162     - clear_bit(usage->code, asc->pressed_fn);
3163     -
3164     - input_event(input, usage->type, trans->to,
3165     - value);
3166     -
3167     - return 1;
3168     + if (test_bit(trans->from, input->key))
3169     + code = trans->from;
3170     + else if (test_bit(trans->to, input->key))
3171     + code = trans->to;
3172     +
3173     + if (!code) {
3174     + if (trans->flags & APPLE_FLAG_FKEY) {
3175     + switch (fnmode) {
3176     + case 1:
3177     + do_translate = !asc->fn_on;
3178     + break;
3179     + case 2:
3180     + do_translate = asc->fn_on;
3181     + break;
3182     + default:
3183     + /* should never happen */
3184     + do_translate = false;
3185     + }
3186     + } else {
3187     + do_translate = asc->fn_on;
3188     + }
3189     +
3190     + code = do_translate ? trans->to : trans->from;
3191     }
3192     +
3193     + input_event(input, usage->type, code, value);
3194     + return 1;
3195     }
3196    
3197     if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
3198     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
3199     index 53bddb50aeba..602219a8710d 100644
3200     --- a/drivers/hid/wacom_sys.c
3201     +++ b/drivers/hid/wacom_sys.c
3202     @@ -88,7 +88,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
3203     }
3204    
3205     static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
3206     - struct hid_report *report, u8 *raw_data, int size)
3207     + struct hid_report *report, u8 *raw_data, int report_size)
3208     {
3209     struct wacom *wacom = hid_get_drvdata(hdev);
3210     struct wacom_wac *wacom_wac = &wacom->wacom_wac;
3211     @@ -149,7 +149,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
3212     if (flush)
3213     wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
3214     else if (insert)
3215     - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
3216     + wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
3217     + raw_data, report_size);
3218    
3219     return insert && !flush;
3220     }
3221     @@ -2176,7 +2177,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
3222     {
3223     struct wacom_wac *wacom_wac = &wacom->wacom_wac;
3224     struct wacom_features *features = &wacom_wac->features;
3225     - char name[WACOM_NAME_MAX];
3226     + char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
3227    
3228     /* Generic devices name unspecified */
3229     if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
3230     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
3231     index 1713235d28cb..2b4640397375 100644
3232     --- a/drivers/hid/wacom_wac.c
3233     +++ b/drivers/hid/wacom_wac.c
3234     @@ -251,7 +251,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
3235    
3236     static int wacom_dtus_irq(struct wacom_wac *wacom)
3237     {
3238     - char *data = wacom->data;
3239     + unsigned char *data = wacom->data;
3240     struct input_dev *input = wacom->pen_input;
3241     unsigned short prox, pressure = 0;
3242    
3243     @@ -572,7 +572,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
3244     strip2 = ((data[3] & 0x1f) << 8) | data[4];
3245     }
3246    
3247     - prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
3248     + prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
3249     (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
3250    
3251     wacom_report_numbered_buttons(input, nbuttons, buttons);
3252     diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
3253     index 66af44bfa67d..f6546de66fbc 100644
3254     --- a/drivers/i2c/busses/i2c-cht-wc.c
3255     +++ b/drivers/i2c/busses/i2c-cht-wc.c
3256     @@ -178,6 +178,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
3257     .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
3258     };
3259    
3260     +/*
3261     + * We are an i2c-adapter which itself is part of an i2c-client. This means that
3262     + * transfers done through us take adapter->bus_lock twice, once for our parent
3263     + * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
3264     + * nested locking, to make lockdep happy in the case of busses with muxes, the
3265     + * i2c-core's i2c_adapter_lock_bus function calls:
3266     + * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
3267     + *
3268     + * But i2c_adapter_depth only works when the direct parent of the adapter is
3269     + * another adapter, as it is only meant for muxes. In our case there is an
3270     + * i2c-client and MFD instantiated platform_device in the parent->child chain
3271     + * between the 2 devices.
3272     + *
3273     + * So we override the default i2c_lock_operations and pass a hardcoded
3274     + * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
3275     + *
3276     + * Note that if there were to be a mux attached to our adapter, this would
3277     + * break things again since the i2c-mux code expects the root-adapter to have
3278     + * a locking depth of 0. But we always have only 1 client directly attached
3279     + * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
3280     + */
3281     +static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
3282     + unsigned int flags)
3283     +{
3284     + rt_mutex_lock_nested(&adapter->bus_lock, 1);
3285     +}
3286     +
3287     +static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
3288     + unsigned int flags)
3289     +{
3290     + return rt_mutex_trylock(&adapter->bus_lock);
3291     +}
3292     +
3293     +static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
3294     + unsigned int flags)
3295     +{
3296     + rt_mutex_unlock(&adapter->bus_lock);
3297     +}
3298     +
3299     +static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
3300     + .lock_bus = cht_wc_i2c_adap_lock_bus,
3301     + .trylock_bus = cht_wc_i2c_adap_trylock_bus,
3302     + .unlock_bus = cht_wc_i2c_adap_unlock_bus,
3303     +};
3304     +
3305     /**** irqchip for the client connected to the extchgr i2c adapter ****/
3306     static void cht_wc_i2c_irq_lock(struct irq_data *data)
3307     {
3308     @@ -286,6 +331,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
3309     adap->adapter.owner = THIS_MODULE;
3310     adap->adapter.class = I2C_CLASS_HWMON;
3311     adap->adapter.algo = &cht_wc_i2c_adap_algo;
3312     + adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
3313     strlcpy(adap->adapter.name, "PMIC I2C Adapter",
3314     sizeof(adap->adapter.name));
3315     adap->adapter.dev.parent = &pdev->dev;
3316     diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
3317     index 9fcb13beeb8f..7a3291d91a5e 100644
3318     --- a/drivers/i2c/busses/i2c-tegra.c
3319     +++ b/drivers/i2c/busses/i2c-tegra.c
3320     @@ -713,12 +713,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
3321     u32 tsu_thd;
3322     u8 tlow, thigh;
3323    
3324     - err = pm_runtime_get_sync(i2c_dev->dev);
3325     - if (err < 0) {
3326     - dev_err(i2c_dev->dev, "runtime resume failed %d\n", err);
3327     - return err;
3328     - }
3329     -
3330     reset_control_assert(i2c_dev->rst);
3331     udelay(2);
3332     reset_control_deassert(i2c_dev->rst);
3333     @@ -772,7 +766,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
3334     if (err) {
3335     dev_err(i2c_dev->dev,
3336     "failed changing clock rate: %d\n", err);
3337     - goto err;
3338     + return err;
3339     }
3340     }
3341    
3342     @@ -787,23 +781,21 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
3343    
3344     err = tegra_i2c_flush_fifos(i2c_dev);
3345     if (err)
3346     - goto err;
3347     + return err;
3348    
3349     if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
3350     i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
3351    
3352     err = tegra_i2c_wait_for_config_load(i2c_dev);
3353     if (err)
3354     - goto err;
3355     + return err;
3356    
3357     if (i2c_dev->irq_disabled) {
3358     i2c_dev->irq_disabled = false;
3359     enable_irq(i2c_dev->irq);
3360     }
3361    
3362     -err:
3363     - pm_runtime_put(i2c_dev->dev);
3364     - return err;
3365     + return 0;
3366     }
3367    
3368     static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
3369     @@ -1616,12 +1608,14 @@ static int tegra_i2c_probe(struct platform_device *pdev)
3370     }
3371    
3372     pm_runtime_enable(&pdev->dev);
3373     - if (!pm_runtime_enabled(&pdev->dev)) {
3374     + if (!pm_runtime_enabled(&pdev->dev))
3375     ret = tegra_i2c_runtime_resume(&pdev->dev);
3376     - if (ret < 0) {
3377     - dev_err(&pdev->dev, "runtime resume failed\n");
3378     - goto unprepare_div_clk;
3379     - }
3380     + else
3381     + ret = pm_runtime_get_sync(i2c_dev->dev);
3382     +
3383     + if (ret < 0) {
3384     + dev_err(&pdev->dev, "runtime resume failed\n");
3385     + goto unprepare_div_clk;
3386     }
3387    
3388     if (i2c_dev->is_multimaster_mode) {
3389     @@ -1666,6 +1660,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
3390     if (ret)
3391     goto release_dma;
3392    
3393     + pm_runtime_put(&pdev->dev);
3394     +
3395     return 0;
3396    
3397     release_dma:
3398     @@ -1726,17 +1722,25 @@ static int tegra_i2c_resume(struct device *dev)
3399     struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
3400     int err;
3401    
3402     + err = tegra_i2c_runtime_resume(dev);
3403     + if (err)
3404     + return err;
3405     +
3406     err = tegra_i2c_init(i2c_dev, false);
3407     if (err)
3408     return err;
3409    
3410     + err = tegra_i2c_runtime_suspend(dev);
3411     + if (err)
3412     + return err;
3413     +
3414     i2c_mark_adapter_resumed(&i2c_dev->adapter);
3415    
3416     return 0;
3417     }
3418    
3419     static const struct dev_pm_ops tegra_i2c_pm = {
3420     - SET_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
3421     + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
3422     SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume,
3423     NULL)
3424     };
3425     diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
3426     index 00d5219094e5..48bba4913952 100644
3427     --- a/drivers/mailbox/mtk-cmdq-mailbox.c
3428     +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
3429     @@ -22,6 +22,7 @@
3430     #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
3431    
3432     #define CMDQ_CURR_IRQ_STATUS 0x10
3433     +#define CMDQ_SYNC_TOKEN_UPDATE 0x68
3434     #define CMDQ_THR_SLOT_CYCLES 0x30
3435     #define CMDQ_THR_BASE 0x100
3436     #define CMDQ_THR_SIZE 0x80
3437     @@ -104,8 +105,12 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
3438    
3439     static void cmdq_init(struct cmdq *cmdq)
3440     {
3441     + int i;
3442     +
3443     WARN_ON(clk_enable(cmdq->clock) < 0);
3444     writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
3445     + for (i = 0; i <= CMDQ_MAX_EVENT; i++)
3446     + writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
3447     clk_disable(cmdq->clock);
3448     }
3449    
3450     diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
3451     index 705e17a5479c..d3676fd3cf94 100644
3452     --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
3453     +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
3454     @@ -47,7 +47,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
3455    
3456     static int qcom_apcs_ipc_probe(struct platform_device *pdev)
3457     {
3458     - struct device_node *np = pdev->dev.of_node;
3459     struct qcom_apcs_ipc *apcs;
3460     struct regmap *regmap;
3461     struct resource *res;
3462     @@ -55,6 +54,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
3463     void __iomem *base;
3464     unsigned long i;
3465     int ret;
3466     + const struct of_device_id apcs_clk_match_table[] = {
3467     + { .compatible = "qcom,msm8916-apcs-kpss-global", },
3468     + { .compatible = "qcom,qcs404-apcs-apps-global", },
3469     + {}
3470     + };
3471    
3472     apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
3473     if (!apcs)
3474     @@ -89,7 +93,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
3475     return ret;
3476     }
3477    
3478     - if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
3479     + if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
3480     apcs->clk = platform_device_register_data(&pdev->dev,
3481     "qcom-apcs-msm8916-clk",
3482     -1, NULL, 0);
3483     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
3484     index 1f933dd197cd..b0aa595e4375 100644
3485     --- a/drivers/md/dm-raid.c
3486     +++ b/drivers/md/dm-raid.c
3487     @@ -3738,18 +3738,18 @@ static int raid_iterate_devices(struct dm_target *ti,
3488     static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3489     {
3490     struct raid_set *rs = ti->private;
3491     - unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
3492     + unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
3493    
3494     - blk_limits_io_min(limits, chunk_size);
3495     - blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
3496     + blk_limits_io_min(limits, chunk_size_bytes);
3497     + blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
3498    
3499     /*
3500     * RAID1 and RAID10 personalities require bio splitting,
3501     * RAID0/4/5/6 don't and process large discard bios properly.
3502     */
3503     if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
3504     - limits->discard_granularity = chunk_size;
3505     - limits->max_discard_sectors = chunk_size;
3506     + limits->discard_granularity = chunk_size_bytes;
3507     + limits->max_discard_sectors = rs->md.chunk_sectors;
3508     }
3509     }
3510    
3511     diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
3512     index 31478fef6032..d3bcc4197f5d 100644
3513     --- a/drivers/md/dm-zoned-target.c
3514     +++ b/drivers/md/dm-zoned-target.c
3515     @@ -134,8 +134,6 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
3516    
3517     refcount_inc(&bioctx->ref);
3518     generic_make_request(clone);
3519     - if (clone->bi_status == BLK_STS_IOERR)
3520     - return -EIO;
3521    
3522     if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
3523     zone->wp_block += nr_blocks;
3524     diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
3525     index ade6e1ce5a98..e3a04929aaa3 100644
3526     --- a/drivers/mfd/intel-lpss-pci.c
3527     +++ b/drivers/mfd/intel-lpss-pci.c
3528     @@ -35,6 +35,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
3529     info->mem = &pdev->resource[0];
3530     info->irq = pdev->irq;
3531    
3532     + pdev->d3cold_delay = 0;
3533     +
3534     /* Probably it is enough to set this for iDMA capable devices only */
3535     pci_set_master(pdev);
3536     pci_try_set_mwi(pdev);
3537     diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
3538     index ca3d17e43ed8..ac88caca5ad4 100644
3539     --- a/drivers/net/dsa/rtl8366.c
3540     +++ b/drivers/net/dsa/rtl8366.c
3541     @@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
3542     const struct switchdev_obj_port_vlan *vlan)
3543     {
3544     struct realtek_smi *smi = ds->priv;
3545     + u16 vid;
3546     int ret;
3547    
3548     - if (!smi->ops->is_vlan_valid(smi, port))
3549     - return -EINVAL;
3550     + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
3551     + if (!smi->ops->is_vlan_valid(smi, vid))
3552     + return -EINVAL;
3553    
3554     dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
3555     vlan->vid_begin, vlan->vid_end);
3556     @@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
3557     u16 vid;
3558     int ret;
3559    
3560     - if (!smi->ops->is_vlan_valid(smi, port))
3561     - return;
3562     + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
3563     + if (!smi->ops->is_vlan_valid(smi, vid))
3564     + return;
3565    
3566     dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
3567     port,
3568     diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
3569     index df976b259e43..296286f4fb39 100644
3570     --- a/drivers/net/dsa/sja1105/sja1105_main.c
3571     +++ b/drivers/net/dsa/sja1105/sja1105_main.c
3572     @@ -1875,7 +1875,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
3573     return sja1105_static_config_reload(priv);
3574     }
3575    
3576     -/* Caller must hold priv->tagger_data.meta_lock */
3577     +/* Must be called only with priv->tagger_data.state bit
3578     + * SJA1105_HWTS_RX_EN cleared
3579     + */
3580     static int sja1105_change_rxtstamping(struct sja1105_private *priv,
3581     bool on)
3582     {
3583     @@ -1932,16 +1934,17 @@ static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
3584     break;
3585     }
3586    
3587     - if (rx_on != priv->tagger_data.hwts_rx_en) {
3588     - spin_lock(&priv->tagger_data.meta_lock);
3589     + if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
3590     + clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
3591     +
3592     rc = sja1105_change_rxtstamping(priv, rx_on);
3593     - spin_unlock(&priv->tagger_data.meta_lock);
3594     if (rc < 0) {
3595     dev_err(ds->dev,
3596     "Failed to change RX timestamping: %d\n", rc);
3597     - return -EFAULT;
3598     + return rc;
3599     }
3600     - priv->tagger_data.hwts_rx_en = rx_on;
3601     + if (rx_on)
3602     + set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
3603     }
3604    
3605     if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
3606     @@ -1960,7 +1963,7 @@ static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
3607     config.tx_type = HWTSTAMP_TX_ON;
3608     else
3609     config.tx_type = HWTSTAMP_TX_OFF;
3610     - if (priv->tagger_data.hwts_rx_en)
3611     + if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
3612     config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
3613     else
3614     config.rx_filter = HWTSTAMP_FILTER_NONE;
3615     @@ -1983,12 +1986,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
3616    
3617     mutex_lock(&priv->ptp_lock);
3618    
3619     - now = priv->tstamp_cc.read(&priv->tstamp_cc);
3620     -
3621     while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
3622     struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
3623     u64 ts;
3624    
3625     + now = priv->tstamp_cc.read(&priv->tstamp_cc);
3626     +
3627     *shwt = (struct skb_shared_hwtstamps) {0};
3628    
3629     ts = SJA1105_SKB_CB(skb)->meta_tstamp;
3630     @@ -2009,7 +2012,7 @@ static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
3631     struct sja1105_private *priv = ds->priv;
3632     struct sja1105_tagger_data *data = &priv->tagger_data;
3633    
3634     - if (!data->hwts_rx_en)
3635     + if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
3636     return false;
3637    
3638     /* We need to read the full PTP clock to reconstruct the Rx
3639     @@ -2165,6 +2168,7 @@ static int sja1105_probe(struct spi_device *spi)
3640     tagger_data = &priv->tagger_data;
3641     skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
3642     INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
3643     + spin_lock_init(&tagger_data->meta_lock);
3644    
3645     /* Connections between dsa_port and sja1105_port */
3646     for (i = 0; i < SJA1105_NUM_PORTS; i++) {
3647     diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
3648     index 84dc603138cf..58dd37ecde17 100644
3649     --- a/drivers/net/dsa/sja1105/sja1105_spi.c
3650     +++ b/drivers/net/dsa/sja1105/sja1105_spi.c
3651     @@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
3652     rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
3653     if (rc < 0) {
3654     dev_err(dev, "Invalid config, cannot upload\n");
3655     - return -EINVAL;
3656     + rc = -EINVAL;
3657     + goto out;
3658     }
3659     /* Prevent PHY jabbering during switch reset by inhibiting
3660     * Tx on all ports and waiting for current packet to drain.
3661     @@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
3662     rc = sja1105_inhibit_tx(priv, port_bitmap, true);
3663     if (rc < 0) {
3664     dev_err(dev, "Failed to inhibit Tx on ports\n");
3665     - return -ENXIO;
3666     + rc = -ENXIO;
3667     + goto out;
3668     }
3669     /* Wait for an eventual egress packet to finish transmission
3670     * (reach IFG). It is guaranteed that a second one will not
3671     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
3672     index 5b602243d573..a4dead4ab0ed 100644
3673     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
3674     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
3675     @@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
3676     static int alloc_uld_rxqs(struct adapter *adap,
3677     struct sge_uld_rxq_info *rxq_info, bool lro)
3678     {
3679     - struct sge *s = &adap->sge;
3680     unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
3681     + int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
3682     struct sge_ofld_rxq *q = rxq_info->uldrxq;
3683     unsigned short *ids = rxq_info->rspq_id;
3684     - unsigned int bmap_idx = 0;
3685     + struct sge *s = &adap->sge;
3686     unsigned int per_chan;
3687     - int i, err, msi_idx, que_idx = 0;
3688    
3689     per_chan = rxq_info->nrxq / adap->params.nports;
3690    
3691     @@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
3692    
3693     if (msi_idx >= 0) {
3694     bmap_idx = get_msix_idx_from_bmap(adap);
3695     + if (bmap_idx < 0) {
3696     + err = -ENOSPC;
3697     + goto freeout;
3698     + }
3699     msi_idx = adap->msix_info_ulds[bmap_idx].idx;
3700     }
3701     err = t4_sge_alloc_rxq(adap, &q->rspq, false,
3702     diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
3703     index 457444894d80..b4b8ba00ee01 100644
3704     --- a/drivers/net/ethernet/qlogic/qla3xxx.c
3705     +++ b/drivers/net/ethernet/qlogic/qla3xxx.c
3706     @@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
3707     netdev_err(qdev->ndev,
3708     "PCI mapping failed with error: %d\n",
3709     err);
3710     + dev_kfree_skb_irq(skb);
3711     ql_free_large_buffers(qdev);
3712     return -ENOMEM;
3713     }
3714     diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
3715     index 1502fe8b0456..b9ac45d9dee8 100644
3716     --- a/drivers/net/ethernet/socionext/netsec.c
3717     +++ b/drivers/net/ethernet/socionext/netsec.c
3718     @@ -282,7 +282,6 @@ struct netsec_desc_ring {
3719     void *vaddr;
3720     u16 head, tail;
3721     u16 xdp_xmit; /* netsec_xdp_xmit packets */
3722     - bool is_xdp;
3723     struct page_pool *page_pool;
3724     struct xdp_rxq_info xdp_rxq;
3725     spinlock_t lock; /* XDP tx queue locking */
3726     @@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
3727     unsigned int bytes;
3728     int cnt = 0;
3729    
3730     - if (dring->is_xdp)
3731     - spin_lock(&dring->lock);
3732     + spin_lock(&dring->lock);
3733    
3734     bytes = 0;
3735     entry = dring->vaddr + DESC_SZ * tail;
3736     @@ -682,8 +680,8 @@ next:
3737     entry = dring->vaddr + DESC_SZ * tail;
3738     cnt++;
3739     }
3740     - if (dring->is_xdp)
3741     - spin_unlock(&dring->lock);
3742     +
3743     + spin_unlock(&dring->lock);
3744    
3745     if (!cnt)
3746     return false;
3747     @@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
3748     de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
3749     de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
3750     de->attr = attr;
3751     - /* under spin_lock if using XDP */
3752     - if (!dring->is_xdp)
3753     - dma_wmb();
3754    
3755     dring->desc[idx] = *desc;
3756     if (desc->buf_type == TYPE_NETSEC_SKB)
3757     @@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
3758     u16 tso_seg_len = 0;
3759     int filled;
3760    
3761     - if (dring->is_xdp)
3762     - spin_lock_bh(&dring->lock);
3763     + spin_lock_bh(&dring->lock);
3764     filled = netsec_desc_used(dring);
3765     if (netsec_check_stop_tx(priv, filled)) {
3766     - if (dring->is_xdp)
3767     - spin_unlock_bh(&dring->lock);
3768     + spin_unlock_bh(&dring->lock);
3769     net_warn_ratelimited("%s %s Tx queue full\n",
3770     dev_name(priv->dev), ndev->name);
3771     return NETDEV_TX_BUSY;
3772     @@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
3773     tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
3774     skb_headlen(skb), DMA_TO_DEVICE);
3775     if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
3776     - if (dring->is_xdp)
3777     - spin_unlock_bh(&dring->lock);
3778     + spin_unlock_bh(&dring->lock);
3779     netif_err(priv, drv, priv->ndev,
3780     "%s: DMA mapping failed\n", __func__);
3781     ndev->stats.tx_dropped++;
3782     @@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
3783     netdev_sent_queue(priv->ndev, skb->len);
3784    
3785     netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
3786     - if (dring->is_xdp)
3787     - spin_unlock_bh(&dring->lock);
3788     + spin_unlock_bh(&dring->lock);
3789     netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
3790    
3791     return NETDEV_TX_OK;
3792     @@ -1262,7 +1253,6 @@ err:
3793     static void netsec_setup_tx_dring(struct netsec_priv *priv)
3794     {
3795     struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
3796     - struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
3797     int i;
3798    
3799     for (i = 0; i < DESC_NUM; i++) {
3800     @@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
3801     */
3802     de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
3803     }
3804     -
3805     - if (xdp_prog)
3806     - dring->is_xdp = true;
3807     - else
3808     - dring->is_xdp = false;
3809     -
3810     }
3811    
3812     static int netsec_setup_rx_dring(struct netsec_priv *priv)
3813     diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
3814     index ce78714f536f..a505b2ab88b8 100644
3815     --- a/drivers/net/usb/hso.c
3816     +++ b/drivers/net/usb/hso.c
3817     @@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bulk_serial_device(
3818     */
3819     if (serial->tiocmget) {
3820     tiocmget = serial->tiocmget;
3821     + tiocmget->endp = hso_get_ep(interface,
3822     + USB_ENDPOINT_XFER_INT,
3823     + USB_DIR_IN);
3824     + if (!tiocmget->endp) {
3825     + dev_err(&interface->dev, "Failed to find INT IN ep\n");
3826     + goto exit;
3827     + }
3828     +
3829     tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
3830     if (tiocmget->urb) {
3831     mutex_init(&tiocmget->mutex);
3832     init_waitqueue_head(&tiocmget->waitq);
3833     - tiocmget->endp = hso_get_ep(
3834     - interface,
3835     - USB_ENDPOINT_XFER_INT,
3836     - USB_DIR_IN);
3837     } else
3838     hso_free_tiomget(serial);
3839     }
3840     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3841     index b6dc5d714b5e..3d77cd402ba9 100644
3842     --- a/drivers/net/usb/qmi_wwan.c
3843     +++ b/drivers/net/usb/qmi_wwan.c
3844     @@ -1350,6 +1350,7 @@ static const struct usb_device_id products[] = {
3845     {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
3846     {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
3847     {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
3848     + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
3849     {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
3850     {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
3851     {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
3852     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
3853     index 5f5722bf6762..7370e06a0e4b 100644
3854     --- a/drivers/net/xen-netfront.c
3855     +++ b/drivers/net/xen-netfront.c
3856     @@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
3857     return 0;
3858     }
3859    
3860     -static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
3861     - struct sk_buff *skb,
3862     - struct sk_buff_head *list)
3863     +static int xennet_fill_frags(struct netfront_queue *queue,
3864     + struct sk_buff *skb,
3865     + struct sk_buff_head *list)
3866     {
3867     RING_IDX cons = queue->rx.rsp_cons;
3868     struct sk_buff *nskb;
3869     @@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
3870     if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
3871     queue->rx.rsp_cons = ++cons + skb_queue_len(list);
3872     kfree_skb(nskb);
3873     - return ~0U;
3874     + return -ENOENT;
3875     }
3876    
3877     skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3878     @@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
3879     kfree_skb(nskb);
3880     }
3881    
3882     - return cons;
3883     + queue->rx.rsp_cons = cons;
3884     +
3885     + return 0;
3886     }
3887    
3888     static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
3889     @@ -1045,8 +1047,7 @@ err:
3890     skb->data_len = rx->status;
3891     skb->len += rx->status;
3892    
3893     - i = xennet_fill_frags(queue, skb, &tmpq);
3894     - if (unlikely(i == ~0U))
3895     + if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
3896     goto err;
3897    
3898     if (rx->flags & XEN_NETRXF_csum_blank)
3899     @@ -1056,7 +1057,7 @@ err:
3900    
3901     __skb_queue_tail(&rxq, skb);
3902    
3903     - queue->rx.rsp_cons = ++i;
3904     + i = ++queue->rx.rsp_cons;
3905     work_done++;
3906     }
3907    
3908     diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
3909     index 2ab92409210a..297bf928d652 100644
3910     --- a/drivers/pci/Kconfig
3911     +++ b/drivers/pci/Kconfig
3912     @@ -181,7 +181,7 @@ config PCI_LABEL
3913    
3914     config PCI_HYPERV
3915     tristate "Hyper-V PCI Frontend"
3916     - depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
3917     + depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
3918     help
3919     The PCI device frontend driver allows the kernel to import arbitrary
3920     PCI devices from a PCI backend to support PCI driver domains.
3921     diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
3922     index cee5f2f590e2..14a6ba4067fb 100644
3923     --- a/drivers/pci/controller/dwc/pci-exynos.c
3924     +++ b/drivers/pci/controller/dwc/pci-exynos.c
3925     @@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
3926    
3927     ep->phy = devm_of_phy_get(dev, np, NULL);
3928     if (IS_ERR(ep->phy)) {
3929     - if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
3930     + if (PTR_ERR(ep->phy) != -ENODEV)
3931     return PTR_ERR(ep->phy);
3932    
3933     ep->phy = NULL;
3934     diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
3935     index 9b5cb5b70389..aabf22eaa6b9 100644
3936     --- a/drivers/pci/controller/dwc/pci-imx6.c
3937     +++ b/drivers/pci/controller/dwc/pci-imx6.c
3938     @@ -1173,8 +1173,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
3939    
3940     imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
3941     if (IS_ERR(imx6_pcie->vpcie)) {
3942     - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
3943     - return -EPROBE_DEFER;
3944     + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
3945     + return PTR_ERR(imx6_pcie->vpcie);
3946     imx6_pcie->vpcie = NULL;
3947     }
3948    
3949     diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
3950     index be61d96cc95e..ca9aa4501e7e 100644
3951     --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
3952     +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
3953     @@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = {
3954     .linkup_notifier = false,
3955     .msi_capable = true,
3956     .msix_capable = false,
3957     + .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
3958     };
3959    
3960     static const struct pci_epc_features*
3961     diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
3962     index 954bc2b74bbc..811b5c6d62ea 100644
3963     --- a/drivers/pci/controller/dwc/pcie-histb.c
3964     +++ b/drivers/pci/controller/dwc/pcie-histb.c
3965     @@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
3966    
3967     hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
3968     if (IS_ERR(hipcie->vpcie)) {
3969     - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
3970     - return -EPROBE_DEFER;
3971     + if (PTR_ERR(hipcie->vpcie) != -ENODEV)
3972     + return PTR_ERR(hipcie->vpcie);
3973     hipcie->vpcie = NULL;
3974     }
3975    
3976     diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
3977     index 9a917b2456f6..673a1725ef38 100644
3978     --- a/drivers/pci/controller/pci-tegra.c
3979     +++ b/drivers/pci/controller/pci-tegra.c
3980     @@ -2237,14 +2237,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
3981     err = of_pci_get_devfn(port);
3982     if (err < 0) {
3983     dev_err(dev, "failed to parse address: %d\n", err);
3984     - return err;
3985     + goto err_node_put;
3986     }
3987    
3988     index = PCI_SLOT(err);
3989    
3990     if (index < 1 || index > soc->num_ports) {
3991     dev_err(dev, "invalid port number: %d\n", index);
3992     - return -EINVAL;
3993     + err = -EINVAL;
3994     + goto err_node_put;
3995     }
3996    
3997     index--;
3998     @@ -2253,12 +2254,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
3999     if (err < 0) {
4000     dev_err(dev, "failed to parse # of lanes: %d\n",
4001     err);
4002     - return err;
4003     + goto err_node_put;
4004     }
4005    
4006     if (value > 16) {
4007     dev_err(dev, "invalid # of lanes: %u\n", value);
4008     - return -EINVAL;
4009     + err = -EINVAL;
4010     + goto err_node_put;
4011     }
4012    
4013     lanes |= value << (index << 3);
4014     @@ -2272,13 +2274,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
4015     lane += value;
4016    
4017     rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
4018     - if (!rp)
4019     - return -ENOMEM;
4020     + if (!rp) {
4021     + err = -ENOMEM;
4022     + goto err_node_put;
4023     + }
4024    
4025     err = of_address_to_resource(port, 0, &rp->regs);
4026     if (err < 0) {
4027     dev_err(dev, "failed to parse address: %d\n", err);
4028     - return err;
4029     + goto err_node_put;
4030     }
4031    
4032     INIT_LIST_HEAD(&rp->list);
4033     @@ -2330,6 +2334,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
4034     return err;
4035    
4036     return 0;
4037     +
4038     +err_node_put:
4039     + of_node_put(port);
4040     + return err;
4041     }
4042    
4043     /*
4044     diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
4045     index 672e633601c7..a45a6447b01d 100644
4046     --- a/drivers/pci/controller/pcie-mobiveil.c
4047     +++ b/drivers/pci/controller/pcie-mobiveil.c
4048     @@ -88,6 +88,7 @@
4049     #define AMAP_CTRL_TYPE_MASK 3
4050    
4051     #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
4052     +#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
4053     #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
4054     #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
4055     #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
4056     @@ -462,7 +463,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
4057     }
4058    
4059     static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
4060     - u64 pci_addr, u32 type, u64 size)
4061     + u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
4062     {
4063     u32 value;
4064     u64 size64 = ~(size - 1);
4065     @@ -482,7 +483,10 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
4066     csr_writel(pcie, upper_32_bits(size64),
4067     PAB_EXT_PEX_AMAP_SIZEN(win_num));
4068    
4069     - csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
4070     + csr_writel(pcie, lower_32_bits(cpu_addr),
4071     + PAB_PEX_AMAP_AXI_WIN(win_num));
4072     + csr_writel(pcie, upper_32_bits(cpu_addr),
4073     + PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
4074    
4075     csr_writel(pcie, lower_32_bits(pci_addr),
4076     PAB_PEX_AMAP_PEX_WIN_L(win_num));
4077     @@ -624,7 +628,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
4078     CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
4079    
4080     /* memory inbound translation window */
4081     - program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
4082     + program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
4083    
4084     /* Get the I/O and memory ranges from DT */
4085     resource_list_for_each_entry(win, &pcie->resources) {
4086     diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
4087     index 8d20f1793a61..ef8e677ce9d1 100644
4088     --- a/drivers/pci/controller/pcie-rockchip-host.c
4089     +++ b/drivers/pci/controller/pcie-rockchip-host.c
4090     @@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
4091    
4092     rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
4093     if (IS_ERR(rockchip->vpcie12v)) {
4094     - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
4095     - return -EPROBE_DEFER;
4096     + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
4097     + return PTR_ERR(rockchip->vpcie12v);
4098     dev_info(dev, "no vpcie12v regulator found\n");
4099     }
4100    
4101     rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
4102     if (IS_ERR(rockchip->vpcie3v3)) {
4103     - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
4104     - return -EPROBE_DEFER;
4105     + if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
4106     + return PTR_ERR(rockchip->vpcie3v3);
4107     dev_info(dev, "no vpcie3v3 regulator found\n");
4108     }
4109    
4110     rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
4111     if (IS_ERR(rockchip->vpcie1v8)) {
4112     - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
4113     - return -EPROBE_DEFER;
4114     + if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
4115     + return PTR_ERR(rockchip->vpcie1v8);
4116     dev_info(dev, "no vpcie1v8 regulator found\n");
4117     }
4118    
4119     rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
4120     if (IS_ERR(rockchip->vpcie0v9)) {
4121     - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
4122     - return -EPROBE_DEFER;
4123     + if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
4124     + return PTR_ERR(rockchip->vpcie0v9);
4125     dev_info(dev, "no vpcie0v9 regulator found\n");
4126     }
4127    
4128     diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
4129     index bcd5d357ca23..c3899ee1db99 100644
4130     --- a/drivers/pci/hotplug/rpaphp_core.c
4131     +++ b/drivers/pci/hotplug/rpaphp_core.c
4132     @@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
4133     struct of_drc_info drc;
4134     const __be32 *value;
4135     char cell_drc_name[MAX_DRC_NAME_LEN];
4136     - int j, fndit;
4137     + int j;
4138    
4139     info = of_find_property(dn->parent, "ibm,drc-info", NULL);
4140     if (info == NULL)
4141     @@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
4142    
4143     /* Should now know end of current entry */
4144    
4145     - if (my_index > drc.last_drc_index)
4146     - continue;
4147     -
4148     - fndit = 1;
4149     - break;
4150     + /* Found it */
4151     + if (my_index <= drc.last_drc_index) {
4152     + sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
4153     + my_index);
4154     + break;
4155     + }
4156     }
4157     - /* Found it */
4158     -
4159     - if (fndit)
4160     - sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
4161     - my_index);
4162    
4163     if (((drc_name == NULL) ||
4164     (drc_name && !strcmp(drc_name, cell_drc_name))) &&
4165     diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
4166     index 06083b86d4f4..5fd90105510d 100644
4167     --- a/drivers/pci/pci-bridge-emul.c
4168     +++ b/drivers/pci/pci-bridge-emul.c
4169     @@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior {
4170     u32 rsvd;
4171     };
4172    
4173     -const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
4174     +static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
4175     [PCI_VENDOR_ID / 4] = { .ro = ~0 },
4176     [PCI_COMMAND / 4] = {
4177     .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
4178     @@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
4179     },
4180     };
4181    
4182     -const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
4183     +static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
4184     [PCI_CAP_LIST_ID / 4] = {
4185     /*
4186     * Capability ID, Next Capability Pointer and
4187     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4188     index 1b27b5af3d55..1f17da3dfeac 100644
4189     --- a/drivers/pci/pci.c
4190     +++ b/drivers/pci/pci.c
4191     @@ -890,8 +890,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
4192    
4193     pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
4194     dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
4195     - if (dev->current_state != state && printk_ratelimit())
4196     - pci_info(dev, "Refused to change power state, currently in D%d\n",
4197     + if (dev->current_state != state)
4198     + pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
4199     dev->current_state);
4200    
4201     /*
4202     diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
4203     index 6c640837073e..5bfa56f3847e 100644
4204     --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
4205     +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
4206     @@ -192,8 +192,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
4207    
4208     static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
4209     static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
4210     -static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
4211     -static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
4212     +static const unsigned int uart_cts_c_pins[] = { GPIOY_11 };
4213     +static const unsigned int uart_rts_c_pins[] = { GPIOY_12 };
4214    
4215     static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
4216     static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
4217     @@ -439,10 +439,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
4218     GROUP(pwm_f_x, 3, 18),
4219    
4220     /* Bank Y */
4221     - GROUP(uart_cts_c, 1, 19),
4222     - GROUP(uart_rts_c, 1, 18),
4223     - GROUP(uart_tx_c, 1, 17),
4224     - GROUP(uart_rx_c, 1, 16),
4225     + GROUP(uart_cts_c, 1, 17),
4226     + GROUP(uart_rts_c, 1, 16),
4227     + GROUP(uart_tx_c, 1, 19),
4228     + GROUP(uart_rx_c, 1, 18),
4229     GROUP(pwm_a_y, 1, 21),
4230     GROUP(pwm_f_y, 1, 20),
4231     GROUP(i2s_out_ch23_y, 1, 5),
4232     diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
4233     index 9b9c61e3f065..977792654e01 100644
4234     --- a/drivers/pinctrl/pinctrl-amd.c
4235     +++ b/drivers/pinctrl/pinctrl-amd.c
4236     @@ -565,15 +565,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
4237     !(regval & BIT(INTERRUPT_MASK_OFF)))
4238     continue;
4239     irq = irq_find_mapping(gc->irq.domain, irqnr + i);
4240     - generic_handle_irq(irq);
4241     + if (irq != 0)
4242     + generic_handle_irq(irq);
4243    
4244     /* Clear interrupt.
4245     * We must read the pin register again, in case the
4246     * value was changed while executing
4247     * generic_handle_irq() above.
4248     + * If we didn't find a mapping for the interrupt,
4249     + * disable it in order to avoid a system hang caused
4250     + * by an interrupt storm.
4251     */
4252     raw_spin_lock_irqsave(&gpio_dev->lock, flags);
4253     regval = readl(regs + i);
4254     + if (irq == 0) {
4255     + regval &= ~BIT(INTERRUPT_ENABLE_OFF);
4256     + dev_dbg(&gpio_dev->pdev->dev,
4257     + "Disabling spurious GPIO IRQ %d\n",
4258     + irqnr + i);
4259     + }
4260     writel(regval, regs + i);
4261     raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
4262     ret = IRQ_HANDLED;
4263     diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
4264     index d3332da35637..31b6e511670f 100644
4265     --- a/drivers/pinctrl/pinctrl-stmfx.c
4266     +++ b/drivers/pinctrl/pinctrl-stmfx.c
4267     @@ -296,29 +296,29 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
4268     switch (param) {
4269     case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
4270     case PIN_CONFIG_BIAS_DISABLE:
4271     + case PIN_CONFIG_DRIVE_PUSH_PULL:
4272     + ret = stmfx_pinconf_set_type(pctl, pin, 0);
4273     + if (ret)
4274     + return ret;
4275     + break;
4276     case PIN_CONFIG_BIAS_PULL_DOWN:
4277     + ret = stmfx_pinconf_set_type(pctl, pin, 1);
4278     + if (ret)
4279     + return ret;
4280     ret = stmfx_pinconf_set_pupd(pctl, pin, 0);
4281     if (ret)
4282     return ret;
4283     break;
4284     case PIN_CONFIG_BIAS_PULL_UP:
4285     - ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
4286     + ret = stmfx_pinconf_set_type(pctl, pin, 1);
4287     if (ret)
4288     return ret;
4289     - break;
4290     - case PIN_CONFIG_DRIVE_OPEN_DRAIN:
4291     - if (!dir)
4292     - ret = stmfx_pinconf_set_type(pctl, pin, 1);
4293     - else
4294     - ret = stmfx_pinconf_set_type(pctl, pin, 0);
4295     + ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
4296     if (ret)
4297     return ret;
4298     break;
4299     - case PIN_CONFIG_DRIVE_PUSH_PULL:
4300     - if (!dir)
4301     - ret = stmfx_pinconf_set_type(pctl, pin, 0);
4302     - else
4303     - ret = stmfx_pinconf_set_type(pctl, pin, 1);
4304     + case PIN_CONFIG_DRIVE_OPEN_DRAIN:
4305     + ret = stmfx_pinconf_set_type(pctl, pin, 1);
4306     if (ret)
4307     return ret;
4308     break;
4309     diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
4310     index 186ef98e7b2b..f1b523beec5b 100644
4311     --- a/drivers/pinctrl/tegra/pinctrl-tegra.c
4312     +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
4313     @@ -32,7 +32,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
4314    
4315     static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
4316     {
4317     - writel(val, pmx->regs[bank] + reg);
4318     + writel_relaxed(val, pmx->regs[bank] + reg);
4319     + /* make sure pinmux register write completed */
4320     + pmx_readl(pmx, bank, reg);
4321     }
4322    
4323     static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
4324     diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
4325     index 51fe60440d12..75cf861ba492 100644
4326     --- a/drivers/power/supply/power_supply_hwmon.c
4327     +++ b/drivers/power/supply/power_supply_hwmon.c
4328     @@ -284,6 +284,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
4329     struct device *dev = &psy->dev;
4330     struct device *hwmon;
4331     int ret, i;
4332     + const char *name;
4333    
4334     if (!devres_open_group(dev, power_supply_add_hwmon_sysfs,
4335     GFP_KERNEL))
4336     @@ -334,7 +335,19 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
4337     }
4338     }
4339    
4340     - hwmon = devm_hwmon_device_register_with_info(dev, psy->desc->name,
4341     + name = psy->desc->name;
4342     + if (strchr(name, '-')) {
4343     + char *new_name;
4344     +
4345     + new_name = devm_kstrdup(dev, name, GFP_KERNEL);
4346     + if (!new_name) {
4347     + ret = -ENOMEM;
4348     + goto error;
4349     + }
4350     + strreplace(new_name, '-', '_');
4351     + name = new_name;
4352     + }
4353     + hwmon = devm_hwmon_device_register_with_info(dev, name,
4354     psyhw,
4355     &power_supply_hwmon_chip_info,
4356     NULL);
4357     diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
4358     index c61f00b72e15..a577218d1ab7 100644
4359     --- a/drivers/ptp/ptp_qoriq.c
4360     +++ b/drivers/ptp/ptp_qoriq.c
4361     @@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
4362     ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
4363     }
4364    
4365     + spin_lock_init(&ptp_qoriq->lock);
4366     +
4367     ktime_get_real_ts64(&now);
4368     ptp_qoriq_settime(&ptp_qoriq->caps, &now);
4369    
4370     @@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
4371     (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
4372     (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
4373    
4374     - spin_lock_init(&ptp_qoriq->lock);
4375     spin_lock_irqsave(&ptp_qoriq->lock, flags);
4376    
4377     regs = &ptp_qoriq->regs;
4378     diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
4379     index e72f65b61176..add43c337489 100644
4380     --- a/drivers/rtc/Kconfig
4381     +++ b/drivers/rtc/Kconfig
4382     @@ -500,6 +500,7 @@ config RTC_DRV_M41T80_WDT
4383     watchdog timer in the ST M41T60 and M41T80 RTC chips series.
4384     config RTC_DRV_BD70528
4385     tristate "ROHM BD70528 PMIC RTC"
4386     + depends on MFD_ROHM_BD70528 && (BD70528_WATCHDOG || !BD70528_WATCHDOG)
4387     help
4388     If you say Y here you will get support for the RTC
4389     on ROHM BD70528 Power Management IC.
4390     diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
4391     index a075e77617dc..3450d615974d 100644
4392     --- a/drivers/rtc/rtc-pcf85363.c
4393     +++ b/drivers/rtc/rtc-pcf85363.c
4394     @@ -166,7 +166,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
4395     buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
4396    
4397     ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
4398     - tmp, sizeof(tmp));
4399     + tmp, 2);
4400     + if (ret)
4401     + return ret;
4402     +
4403     + ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
4404     + buf, sizeof(tmp) - 2);
4405     if (ret)
4406     return ret;
4407    
4408     diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
4409     index 7ee673a25fd0..4f9a107a0427 100644
4410     --- a/drivers/rtc/rtc-snvs.c
4411     +++ b/drivers/rtc/rtc-snvs.c
4412     @@ -279,6 +279,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
4413     if (!data)
4414     return -ENOMEM;
4415    
4416     + data->rtc = devm_rtc_allocate_device(&pdev->dev);
4417     + if (IS_ERR(data->rtc))
4418     + return PTR_ERR(data->rtc);
4419     +
4420     data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
4421    
4422     if (IS_ERR(data->regmap)) {
4423     @@ -343,10 +347,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
4424     goto error_rtc_device_register;
4425     }
4426    
4427     - data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
4428     - &snvs_rtc_ops, THIS_MODULE);
4429     - if (IS_ERR(data->rtc)) {
4430     - ret = PTR_ERR(data->rtc);
4431     + data->rtc->ops = &snvs_rtc_ops;
4432     + ret = rtc_register_device(data->rtc);
4433     + if (ret) {
4434     dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
4435     goto error_rtc_device_register;
4436     }
4437     diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
4438     index 39b8cc4574b4..c6ed0b12e807 100644
4439     --- a/drivers/scsi/scsi_logging.c
4440     +++ b/drivers/scsi/scsi_logging.c
4441     @@ -15,57 +15,15 @@
4442     #include <scsi/scsi_eh.h>
4443     #include <scsi/scsi_dbg.h>
4444    
4445     -#define SCSI_LOG_SPOOLSIZE 4096
4446     -
4447     -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
4448     -#warning SCSI logging bitmask too large
4449     -#endif
4450     -
4451     -struct scsi_log_buf {
4452     - char buffer[SCSI_LOG_SPOOLSIZE];
4453     - unsigned long map;
4454     -};
4455     -
4456     -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
4457     -
4458     static char *scsi_log_reserve_buffer(size_t *len)
4459     {
4460     - struct scsi_log_buf *buf;
4461     - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
4462     - unsigned long idx = 0;
4463     -
4464     - preempt_disable();
4465     - buf = this_cpu_ptr(&scsi_format_log);
4466     - idx = find_first_zero_bit(&buf->map, map_bits);
4467     - if (likely(idx < map_bits)) {
4468     - while (test_and_set_bit(idx, &buf->map)) {
4469     - idx = find_next_zero_bit(&buf->map, map_bits, idx);
4470     - if (idx >= map_bits)
4471     - break;
4472     - }
4473     - }
4474     - if (WARN_ON(idx >= map_bits)) {
4475     - preempt_enable();
4476     - return NULL;
4477     - }
4478     - *len = SCSI_LOG_BUFSIZE;
4479     - return buf->buffer + idx * SCSI_LOG_BUFSIZE;
4480     + *len = 128;
4481     + return kmalloc(*len, GFP_ATOMIC);
4482     }
4483    
4484     static void scsi_log_release_buffer(char *bufptr)
4485     {
4486     - struct scsi_log_buf *buf;
4487     - unsigned long idx;
4488     - int ret;
4489     -
4490     - buf = this_cpu_ptr(&scsi_format_log);
4491     - if (bufptr >= buf->buffer &&
4492     - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
4493     - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
4494     - ret = test_and_clear_bit(idx, &buf->map);
4495     - WARN_ON(!ret);
4496     - }
4497     - preempt_enable();
4498     + kfree(bufptr);
4499     }
4500    
4501     static inline const char *scmd_name(const struct scsi_cmnd *scmd)
4502     diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
4503     index 317873bc0555..ec25a71d0887 100644
4504     --- a/drivers/soundwire/intel.c
4505     +++ b/drivers/soundwire/intel.c
4506     @@ -289,6 +289,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
4507    
4508     if (pcm) {
4509     count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
4510     +
4511     + /*
4512     + * WORKAROUND: on all existing Intel controllers, pdi
4513     + * number 2 reports channel count as 1 even though it
4514     + * supports 8 channels. Performing hardcoding for pdi
4515     + * number 2.
4516     + */
4517     + if (pdi_num == 2)
4518     + count = 7;
4519     +
4520     } else {
4521     count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
4522     count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
4523     diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
4524     index 703948c9fbe1..02206162eaa9 100644
4525     --- a/drivers/vfio/pci/vfio_pci.c
4526     +++ b/drivers/vfio/pci/vfio_pci.c
4527     @@ -438,11 +438,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
4528     pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4529    
4530     /*
4531     - * Try to reset the device. The success of this is dependent on
4532     - * being able to lock the device, which is not always possible.
4533     + * Try to get the locks ourselves to prevent a deadlock. The
4534     + * success of this is dependent on being able to lock the device,
4535     + * which is not always possible.
4536     + * We can not use the "try" reset interface here, which will
4537     + * overwrite the previously restored configuration information.
4538     */
4539     - if (vdev->reset_works && !pci_try_reset_function(pdev))
4540     - vdev->needs_reset = false;
4541     + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
4542     + if (device_trylock(&pdev->dev)) {
4543     + if (!__pci_reset_function_locked(pdev))
4544     + vdev->needs_reset = false;
4545     + device_unlock(&pdev->dev);
4546     + }
4547     + pci_cfg_access_unlock(pdev);
4548     + }
4549    
4550     pci_restore_state(pdev);
4551     out:
4552     diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
4553     index b674948e3bb8..3f28e1b5d422 100644
4554     --- a/drivers/video/fbdev/ssd1307fb.c
4555     +++ b/drivers/video/fbdev/ssd1307fb.c
4556     @@ -432,7 +432,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
4557     if (ret < 0)
4558     return ret;
4559    
4560     - ret = ssd1307fb_write_cmd(par->client, 0x0);
4561     + ret = ssd1307fb_write_cmd(par->client, par->page_offset);
4562     if (ret < 0)
4563     return ret;
4564    
4565     diff --git a/fs/9p/cache.c b/fs/9p/cache.c
4566     index 995e332eee5c..eb2151fb6049 100644
4567     --- a/fs/9p/cache.c
4568     +++ b/fs/9p/cache.c
4569     @@ -51,6 +51,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
4570     if (!v9ses->cachetag) {
4571     if (v9fs_random_cachetag(v9ses) < 0) {
4572     v9ses->fscache = NULL;
4573     + kfree(v9ses->cachetag);
4574     + v9ses->cachetag = NULL;
4575     return;
4576     }
4577     }
4578     diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
4579     index 8e83741b02e0..d4d4fdfac1a6 100644
4580     --- a/fs/ext4/block_validity.c
4581     +++ b/fs/ext4/block_validity.c
4582     @@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
4583    
4584     void ext4_exit_system_zone(void)
4585     {
4586     + rcu_barrier();
4587     kmem_cache_destroy(ext4_system_zone_cachep);
4588     }
4589    
4590     @@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
4591     return 0;
4592     }
4593    
4594     +static void release_system_zone(struct ext4_system_blocks *system_blks)
4595     +{
4596     + struct ext4_system_zone *entry, *n;
4597     +
4598     + rbtree_postorder_for_each_entry_safe(entry, n,
4599     + &system_blks->root, node)
4600     + kmem_cache_free(ext4_system_zone_cachep, entry);
4601     +}
4602     +
4603     /*
4604     * Mark a range of blocks as belonging to the "system zone" --- that
4605     * is, filesystem metadata blocks which should never be used by
4606     * inodes.
4607     */
4608     -static int add_system_zone(struct ext4_sb_info *sbi,
4609     +static int add_system_zone(struct ext4_system_blocks *system_blks,
4610     ext4_fsblk_t start_blk,
4611     unsigned int count)
4612     {
4613     struct ext4_system_zone *new_entry = NULL, *entry;
4614     - struct rb_node **n = &sbi->system_blks.rb_node, *node;
4615     + struct rb_node **n = &system_blks->root.rb_node, *node;
4616     struct rb_node *parent = NULL, *new_node = NULL;
4617    
4618     while (*n) {
4619     @@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
4620     new_node = &new_entry->node;
4621    
4622     rb_link_node(new_node, parent, n);
4623     - rb_insert_color(new_node, &sbi->system_blks);
4624     + rb_insert_color(new_node, &system_blks->root);
4625     }
4626    
4627     /* Can we merge to the left? */
4628     @@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
4629     if (can_merge(entry, new_entry)) {
4630     new_entry->start_blk = entry->start_blk;
4631     new_entry->count += entry->count;
4632     - rb_erase(node, &sbi->system_blks);
4633     + rb_erase(node, &system_blks->root);
4634     kmem_cache_free(ext4_system_zone_cachep, entry);
4635     }
4636     }
4637     @@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
4638     entry = rb_entry(node, struct ext4_system_zone, node);
4639     if (can_merge(new_entry, entry)) {
4640     new_entry->count += entry->count;
4641     - rb_erase(node, &sbi->system_blks);
4642     + rb_erase(node, &system_blks->root);
4643     kmem_cache_free(ext4_system_zone_cachep, entry);
4644     }
4645     }
4646     @@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
4647     int first = 1;
4648    
4649     printk(KERN_INFO "System zones: ");
4650     - node = rb_first(&sbi->system_blks);
4651     + node = rb_first(&sbi->system_blks->root);
4652     while (node) {
4653     entry = rb_entry(node, struct ext4_system_zone, node);
4654     printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
4655     @@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
4656     printk(KERN_CONT "\n");
4657     }
4658    
4659     -static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
4660     +/*
4661     + * Returns 1 if the passed-in block region (start_blk,
4662     + * start_blk+count) is valid; 0 if some part of the block region
4663     + * overlaps with filesystem metadata blocks.
4664     + */
4665     +static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
4666     + struct ext4_system_blocks *system_blks,
4667     + ext4_fsblk_t start_blk,
4668     + unsigned int count)
4669     +{
4670     + struct ext4_system_zone *entry;
4671     + struct rb_node *n;
4672     +
4673     + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
4674     + (start_blk + count < start_blk) ||
4675     + (start_blk + count > ext4_blocks_count(sbi->s_es))) {
4676     + sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
4677     + return 0;
4678     + }
4679     +
4680     + if (system_blks == NULL)
4681     + return 1;
4682     +
4683     + n = system_blks->root.rb_node;
4684     + while (n) {
4685     + entry = rb_entry(n, struct ext4_system_zone, node);
4686     + if (start_blk + count - 1 < entry->start_blk)
4687     + n = n->rb_left;
4688     + else if (start_blk >= (entry->start_blk + entry->count))
4689     + n = n->rb_right;
4690     + else {
4691     + sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
4692     + return 0;
4693     + }
4694     + }
4695     + return 1;
4696     +}
4697     +
4698     +static int ext4_protect_reserved_inode(struct super_block *sb,
4699     + struct ext4_system_blocks *system_blks,
4700     + u32 ino)
4701     {
4702     struct inode *inode;
4703     struct ext4_sb_info *sbi = EXT4_SB(sb);
4704     @@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
4705     if (n == 0) {
4706     i++;
4707     } else {
4708     - if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
4709     + if (!ext4_data_block_valid_rcu(sbi, system_blks,
4710     + map.m_pblk, n)) {
4711     ext4_error(sb, "blocks %llu-%llu from inode %u "
4712     "overlap system zone", map.m_pblk,
4713     map.m_pblk + map.m_len - 1, ino);
4714     err = -EFSCORRUPTED;
4715     break;
4716     }
4717     - err = add_system_zone(sbi, map.m_pblk, n);
4718     + err = add_system_zone(system_blks, map.m_pblk, n);
4719     if (err < 0)
4720     break;
4721     i += n;
4722     @@ -180,94 +231,130 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
4723     return err;
4724     }
4725    
4726     +static void ext4_destroy_system_zone(struct rcu_head *rcu)
4727     +{
4728     + struct ext4_system_blocks *system_blks;
4729     +
4730     + system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
4731     + release_system_zone(system_blks);
4732     + kfree(system_blks);
4733     +}
4734     +
4735     +/*
4736     + * Build system zone rbtree which is used for block validity checking.
4737     + *
4738     + * The update of system_blks pointer in this function is protected by
4739     + * sb->s_umount semaphore. However we have to be careful as we can be
4740     + * racing with ext4_data_block_valid() calls reading system_blks rbtree
4741     + * protected only by RCU. That's why we first build the rbtree and then
4742     + * swap it in place.
4743     + */
4744     int ext4_setup_system_zone(struct super_block *sb)
4745     {
4746     ext4_group_t ngroups = ext4_get_groups_count(sb);
4747     struct ext4_sb_info *sbi = EXT4_SB(sb);
4748     + struct ext4_system_blocks *system_blks;
4749     struct ext4_group_desc *gdp;
4750     ext4_group_t i;
4751     int flex_size = ext4_flex_bg_size(sbi);
4752     int ret;
4753    
4754     if (!test_opt(sb, BLOCK_VALIDITY)) {
4755     - if (sbi->system_blks.rb_node)
4756     + if (sbi->system_blks)
4757     ext4_release_system_zone(sb);
4758     return 0;
4759     }
4760     - if (sbi->system_blks.rb_node)
4761     + if (sbi->system_blks)
4762     return 0;
4763    
4764     + system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
4765     + if (!system_blks)
4766     + return -ENOMEM;
4767     +
4768     for (i=0; i < ngroups; i++) {
4769     cond_resched();
4770     if (ext4_bg_has_super(sb, i) &&
4771     ((i < 5) || ((i % flex_size) == 0)))
4772     - add_system_zone(sbi, ext4_group_first_block_no(sb, i),
4773     + add_system_zone(system_blks,
4774     + ext4_group_first_block_no(sb, i),
4775     ext4_bg_num_gdb(sb, i) + 1);
4776     gdp = ext4_get_group_desc(sb, i, NULL);
4777     - ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
4778     + ret = add_system_zone(system_blks,
4779     + ext4_block_bitmap(sb, gdp), 1);
4780     if (ret)
4781     - return ret;
4782     - ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
4783     + goto err;
4784     + ret = add_system_zone(system_blks,
4785     + ext4_inode_bitmap(sb, gdp), 1);
4786     if (ret)
4787     - return ret;
4788     - ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
4789     + goto err;
4790     + ret = add_system_zone(system_blks,
4791     + ext4_inode_table(sb, gdp),
4792     sbi->s_itb_per_group);
4793     if (ret)
4794     - return ret;
4795     + goto err;
4796     }
4797     if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
4798     - ret = ext4_protect_reserved_inode(sb,
4799     + ret = ext4_protect_reserved_inode(sb, system_blks,
4800     le32_to_cpu(sbi->s_es->s_journal_inum));
4801     if (ret)
4802     - return ret;
4803     + goto err;
4804     }
4805    
4806     + /*
4807     + * System blks rbtree complete, announce it once to prevent racing
4808     + * with ext4_data_block_valid() accessing the rbtree at the same
4809     + * time.
4810     + */
4811     + rcu_assign_pointer(sbi->system_blks, system_blks);
4812     +
4813     if (test_opt(sb, DEBUG))
4814     debug_print_tree(sbi);
4815     return 0;
4816     +err:
4817     + release_system_zone(system_blks);
4818     + kfree(system_blks);
4819     + return ret;
4820     }
4821    
4822     -/* Called when the filesystem is unmounted */
4823     +/*
4824     + * Called when the filesystem is unmounted or when remounting it with
4825     + * noblock_validity specified.
4826     + *
4827     + * The update of system_blks pointer in this function is protected by
4828     + * sb->s_umount semaphore. However we have to be careful as we can be
4829     + * racing with ext4_data_block_valid() calls reading system_blks rbtree
4830     + * protected only by RCU. So we first clear the system_blks pointer and
4831     + * then free the rbtree only after RCU grace period expires.
4832     + */
4833     void ext4_release_system_zone(struct super_block *sb)
4834     {
4835     - struct ext4_system_zone *entry, *n;
4836     + struct ext4_system_blocks *system_blks;
4837    
4838     - rbtree_postorder_for_each_entry_safe(entry, n,
4839     - &EXT4_SB(sb)->system_blks, node)
4840     - kmem_cache_free(ext4_system_zone_cachep, entry);
4841     + system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
4842     + lockdep_is_held(&sb->s_umount));
4843     + rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
4844    
4845     - EXT4_SB(sb)->system_blks = RB_ROOT;
4846     + if (system_blks)
4847     + call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
4848     }
4849    
4850     -/*
4851     - * Returns 1 if the passed-in block region (start_blk,
4852     - * start_blk+count) is valid; 0 if some part of the block region
4853     - * overlaps with filesystem metadata blocks.
4854     - */
4855     int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
4856     unsigned int count)
4857     {
4858     - struct ext4_system_zone *entry;
4859     - struct rb_node *n = sbi->system_blks.rb_node;
4860     + struct ext4_system_blocks *system_blks;
4861     + int ret;
4862    
4863     - if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
4864     - (start_blk + count < start_blk) ||
4865     - (start_blk + count > ext4_blocks_count(sbi->s_es))) {
4866     - sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
4867     - return 0;
4868     - }
4869     - while (n) {
4870     - entry = rb_entry(n, struct ext4_system_zone, node);
4871     - if (start_blk + count - 1 < entry->start_blk)
4872     - n = n->rb_left;
4873     - else if (start_blk >= (entry->start_blk + entry->count))
4874     - n = n->rb_right;
4875     - else {
4876     - sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
4877     - return 0;
4878     - }
4879     - }
4880     - return 1;
4881     + /*
4882     + * Lock the system zone to prevent it being released concurrently
4883     + * when doing a remount which inverse current "[no]block_validity"
4884     + * mount option.
4885     + */
4886     + rcu_read_lock();
4887     + system_blks = rcu_dereference(sbi->system_blks);
4888     + ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
4889     + count);
4890     + rcu_read_unlock();
4891     + return ret;
4892     }
4893    
4894     int ext4_check_blockref(const char *function, unsigned int line,
4895     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4896     index bf660aa7a9e0..c025efcbcf27 100644
4897     --- a/fs/ext4/ext4.h
4898     +++ b/fs/ext4/ext4.h
4899     @@ -184,6 +184,14 @@ struct ext4_map_blocks {
4900     unsigned int m_flags;
4901     };
4902    
4903     +/*
4904     + * Block validity checking, system zone rbtree.
4905     + */
4906     +struct ext4_system_blocks {
4907     + struct rb_root root;
4908     + struct rcu_head rcu;
4909     +};
4910     +
4911     /*
4912     * Flags for ext4_io_end->flags
4913     */
4914     @@ -1421,7 +1429,7 @@ struct ext4_sb_info {
4915     int s_jquota_fmt; /* Format of quota to use */
4916     #endif
4917     unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
4918     - struct rb_root system_blks;
4919     + struct ext4_system_blocks __rcu *system_blks;
4920    
4921     #ifdef EXTENTS_STATS
4922     /* ext4 extents stats */
4923     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4924     index 78a1b873e48a..aa3178f1b145 100644
4925     --- a/fs/f2fs/super.c
4926     +++ b/fs/f2fs/super.c
4927     @@ -873,7 +873,21 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
4928    
4929     static int f2fs_drop_inode(struct inode *inode)
4930     {
4931     + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4932     int ret;
4933     +
4934     + /*
4935     + * during filesystem shutdown, if checkpoint is disabled,
4936     + * drop useless meta/node dirty pages.
4937     + */
4938     + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
4939     + if (inode->i_ino == F2FS_NODE_INO(sbi) ||
4940     + inode->i_ino == F2FS_META_INO(sbi)) {
4941     + trace_f2fs_drop_inode(inode, 1);
4942     + return 1;
4943     + }
4944     + }
4945     +
4946     /*
4947     * This is to avoid a deadlock condition like below.
4948     * writeback_single_inode(inode)
4949     diff --git a/fs/fat/dir.c b/fs/fat/dir.c
4950     index 1bda2ab6745b..814ad2c2ba80 100644
4951     --- a/fs/fat/dir.c
4952     +++ b/fs/fat/dir.c
4953     @@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
4954     err = -ENOMEM;
4955     goto error;
4956     }
4957     + /* Avoid race with userspace read via bdev */
4958     + lock_buffer(bhs[n]);
4959     memset(bhs[n]->b_data, 0, sb->s_blocksize);
4960     set_buffer_uptodate(bhs[n]);
4961     + unlock_buffer(bhs[n]);
4962     mark_buffer_dirty_inode(bhs[n], dir);
4963    
4964     n++;
4965     @@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
4966     fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
4967    
4968     de = (struct msdos_dir_entry *)bhs[0]->b_data;
4969     + /* Avoid race with userspace read via bdev */
4970     + lock_buffer(bhs[0]);
4971     /* filling the new directory slots ("." and ".." entries) */
4972     memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
4973     memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
4974     @@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
4975     de[0].size = de[1].size = 0;
4976     memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
4977     set_buffer_uptodate(bhs[0]);
4978     + unlock_buffer(bhs[0]);
4979     mark_buffer_dirty_inode(bhs[0], dir);
4980    
4981     err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
4982     @@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
4983    
4984     /* fill the directory entry */
4985     copy = min(size, sb->s_blocksize);
4986     + /* Avoid race with userspace read via bdev */
4987     + lock_buffer(bhs[n]);
4988     memcpy(bhs[n]->b_data, slots, copy);
4989     - slots += copy;
4990     - size -= copy;
4991     set_buffer_uptodate(bhs[n]);
4992     + unlock_buffer(bhs[n]);
4993     mark_buffer_dirty_inode(bhs[n], dir);
4994     + slots += copy;
4995     + size -= copy;
4996     if (!size)
4997     break;
4998     n++;
4999     diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
5000     index 265983635f2b..3647c65a0f48 100644
5001     --- a/fs/fat/fatent.c
5002     +++ b/fs/fat/fatent.c
5003     @@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
5004     err = -ENOMEM;
5005     goto error;
5006     }
5007     + /* Avoid race with userspace read via bdev */
5008     + lock_buffer(c_bh);
5009     memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
5010     set_buffer_uptodate(c_bh);
5011     + unlock_buffer(c_bh);
5012     mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
5013     if (sb->s_flags & SB_SYNCHRONOUS)
5014     err = sync_dirty_buffer(c_bh);
5015     diff --git a/fs/fs_context.c b/fs/fs_context.c
5016     index 103643c68e3f..87c2c9687d90 100644
5017     --- a/fs/fs_context.c
5018     +++ b/fs/fs_context.c
5019     @@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
5020     fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
5021     break;
5022     case FS_CONTEXT_FOR_RECONFIGURE:
5023     - /* We don't pin any namespaces as the superblock's
5024     - * subscriptions cannot be changed at this point.
5025     - */
5026     atomic_inc(&reference->d_sb->s_active);
5027     + fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
5028     fc->root = dget(reference);
5029     break;
5030     }
5031     diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
5032     index e78657742bd8..3883633e82eb 100644
5033     --- a/fs/ocfs2/dlm/dlmunlock.c
5034     +++ b/fs/ocfs2/dlm/dlmunlock.c
5035     @@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
5036     enum dlm_status status;
5037     int actions = 0;
5038     int in_use;
5039     - u8 owner;
5040     + u8 owner;
5041     + int recovery_wait = 0;
5042    
5043     mlog(0, "master_node = %d, valblk = %d\n", master_node,
5044     flags & LKM_VALBLK);
5045     @@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
5046     }
5047     if (flags & LKM_CANCEL)
5048     lock->cancel_pending = 0;
5049     - else
5050     - lock->unlock_pending = 0;
5051     -
5052     + else {
5053     + if (!lock->unlock_pending)
5054     + recovery_wait = 1;
5055     + else
5056     + lock->unlock_pending = 0;
5057     + }
5058     }
5059    
5060     /* get an extra ref on lock. if we are just switching
5061     @@ -229,6 +233,17 @@ leave:
5062     spin_unlock(&res->spinlock);
5063     wake_up(&res->wq);
5064    
5065     + if (recovery_wait) {
5066     + spin_lock(&res->spinlock);
5067     + /* Unlock request will directly succeed after owner dies,
5068     + * and the lock is already removed from grant list. We have to
5069     + * wait for RECOVERING done or we miss the chance to purge it
5070     + * since the removement is much faster than RECOVERING proc.
5071     + */
5072     + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
5073     + spin_unlock(&res->spinlock);
5074     + }
5075     +
5076     /* let the caller's final dlm_lock_put handle the actual kfree */
5077     if (actions & DLM_UNLOCK_FREE_LOCK) {
5078     /* this should always be coupled with list removal */
5079     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
5080     index 2bb3468fc93a..8caff834f002 100644
5081     --- a/fs/pstore/ram.c
5082     +++ b/fs/pstore/ram.c
5083     @@ -144,6 +144,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
5084     if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
5085     (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
5086     &header_length) == 3) {
5087     + time->tv_nsec *= 1000;
5088     if (data_type == 'C')
5089     *compressed = true;
5090     else
5091     @@ -151,6 +152,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
5092     } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
5093     (time64_t *)&time->tv_sec, &time->tv_nsec,
5094     &header_length) == 2) {
5095     + time->tv_nsec *= 1000;
5096     *compressed = false;
5097     } else {
5098     time->tv_sec = 0;
5099     diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
5100     index 79435cfc20eb..897e799dbcb9 100644
5101     --- a/include/linux/dsa/sja1105.h
5102     +++ b/include/linux/dsa/sja1105.h
5103     @@ -31,6 +31,8 @@
5104     #define SJA1105_META_SMAC 0x222222222222ull
5105     #define SJA1105_META_DMAC 0x0180C200000Eull
5106    
5107     +#define SJA1105_HWTS_RX_EN 0
5108     +
5109     /* Global tagger data: each struct sja1105_port has a reference to
5110     * the structure defined in struct sja1105_private.
5111     */
5112     @@ -42,7 +44,7 @@ struct sja1105_tagger_data {
5113     * from taggers running on multiple ports on SMP systems
5114     */
5115     spinlock_t meta_lock;
5116     - bool hwts_rx_en;
5117     + unsigned long state;
5118     };
5119    
5120     struct sja1105_skb_cb {
5121     diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
5122     index ccb73422c2fa..e6f54ef6698b 100644
5123     --- a/include/linux/mailbox/mtk-cmdq-mailbox.h
5124     +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
5125     @@ -20,6 +20,9 @@
5126     #define CMDQ_WFE_WAIT BIT(15)
5127     #define CMDQ_WFE_WAIT_VALUE 0x1
5128    
5129     +/** cmdq event maximum */
5130     +#define CMDQ_MAX_EVENT 0x3ff
5131     +
5132     /*
5133     * CMDQ_CODE_MASK:
5134     * set write mask
5135     diff --git a/include/linux/mm.h b/include/linux/mm.h
5136     index 0334ca97c584..fe4552e1c40b 100644
5137     --- a/include/linux/mm.h
5138     +++ b/include/linux/mm.h
5139     @@ -1405,7 +1405,11 @@ extern void pagefault_out_of_memory(void);
5140    
5141     extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
5142    
5143     +#ifdef CONFIG_MMU
5144     extern bool can_do_mlock(void);
5145     +#else
5146     +static inline bool can_do_mlock(void) { return false; }
5147     +#endif
5148     extern int user_shm_lock(size_t, struct user_struct *);
5149     extern void user_shm_unlock(size_t, struct user_struct *);
5150    
5151     diff --git a/include/linux/pci.h b/include/linux/pci.h
5152     index 82e4cd1b7ac3..ac8a6c4e1792 100644
5153     --- a/include/linux/pci.h
5154     +++ b/include/linux/pci.h
5155     @@ -2435,4 +2435,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
5156     #define pci_notice_ratelimited(pdev, fmt, arg...) \
5157     dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
5158    
5159     +#define pci_info_ratelimited(pdev, fmt, arg...) \
5160     + dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
5161     +
5162     #endif /* LINUX_PCI_H */
5163     diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
5164     index 54ade13a9b15..4e8899972db4 100644
5165     --- a/include/linux/soc/mediatek/mtk-cmdq.h
5166     +++ b/include/linux/soc/mediatek/mtk-cmdq.h
5167     @@ -13,9 +13,6 @@
5168    
5169     #define CMDQ_NO_TIMEOUT 0xffffffffu
5170    
5171     -/** cmdq event maximum */
5172     -#define CMDQ_MAX_EVENT 0x3ff
5173     -
5174     struct cmdq_pkt;
5175    
5176     struct cmdq_client {
5177     diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
5178     index e03bd9d41fa8..7b196d234626 100644
5179     --- a/include/scsi/scsi_dbg.h
5180     +++ b/include/scsi/scsi_dbg.h
5181     @@ -6,8 +6,6 @@ struct scsi_cmnd;
5182     struct scsi_device;
5183     struct scsi_sense_hdr;
5184    
5185     -#define SCSI_LOG_BUFSIZE 128
5186     -
5187     extern void scsi_print_command(struct scsi_cmnd *);
5188     extern size_t __scsi_format_command(char *, size_t,
5189     const unsigned char *, size_t);
5190     diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
5191     index a13a62db3565..edc5c887a44c 100644
5192     --- a/include/trace/events/rxrpc.h
5193     +++ b/include/trace/events/rxrpc.h
5194     @@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg,
5195     ),
5196    
5197     TP_fast_assign(
5198     - __entry->call = call->debug_id;
5199     + __entry->call = call ? call->debug_id : 0;
5200     __entry->why = why;
5201     __entry->seq = seq;
5202     __entry->offset = offset;
5203     diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
5204     index d5870723b8ad..15d70a90b50d 100644
5205     --- a/kernel/kexec_core.c
5206     +++ b/kernel/kexec_core.c
5207     @@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
5208     {
5209     struct page *pages;
5210    
5211     + if (fatal_signal_pending(current))
5212     + return NULL;
5213     pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
5214     if (pages) {
5215     unsigned int count, i;
5216     diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
5217     index c4ce08f43bd6..ab4a4606d19b 100644
5218     --- a/kernel/livepatch/core.c
5219     +++ b/kernel/livepatch/core.c
5220     @@ -1175,6 +1175,7 @@ err:
5221     pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
5222     patch->mod->name, obj->mod->name, obj->mod->name);
5223     mod->klp_alive = false;
5224     + obj->mod = NULL;
5225     klp_cleanup_module_patches_limited(mod, patch);
5226     mutex_unlock(&klp_mutex);
5227    
5228     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
5229     index 5960e2980a8a..4d39540011e2 100644
5230     --- a/lib/Kconfig.debug
5231     +++ b/lib/Kconfig.debug
5232     @@ -596,7 +596,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
5233     int "Maximum kmemleak early log entries"
5234     depends on DEBUG_KMEMLEAK
5235     range 200 40000
5236     - default 400
5237     + default 16000
5238     help
5239     Kmemleak must track all the memory allocations to avoid
5240     reporting false positives. Since memory may be allocated or
5241     diff --git a/net/core/sock.c b/net/core/sock.c
5242     index 545fac19a711..3aa93af51d48 100644
5243     --- a/net/core/sock.c
5244     +++ b/net/core/sock.c
5245     @@ -1700,8 +1700,6 @@ static void __sk_destruct(struct rcu_head *head)
5246     sk_filter_uncharge(sk, filter);
5247     RCU_INIT_POINTER(sk->sk_filter, NULL);
5248     }
5249     - if (rcu_access_pointer(sk->sk_reuseport_cb))
5250     - reuseport_detach_sock(sk);
5251    
5252     sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
5253    
5254     @@ -1728,7 +1726,14 @@ static void __sk_destruct(struct rcu_head *head)
5255    
5256     void sk_destruct(struct sock *sk)
5257     {
5258     - if (sock_flag(sk, SOCK_RCU_FREE))
5259     + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
5260     +
5261     + if (rcu_access_pointer(sk->sk_reuseport_cb)) {
5262     + reuseport_detach_sock(sk);
5263     + use_call_rcu = true;
5264     + }
5265     +
5266     + if (use_call_rcu)
5267     call_rcu(&sk->sk_rcu, __sk_destruct);
5268     else
5269     __sk_destruct(&sk->sk_rcu);
5270     diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
5271     index 47ee88163a9d..27fe80d07460 100644
5272     --- a/net/dsa/tag_sja1105.c
5273     +++ b/net/dsa/tag_sja1105.c
5274     @@ -155,7 +155,11 @@ static struct sk_buff
5275     /* Step 1: A timestampable frame was received.
5276     * Buffer it until we get its meta frame.
5277     */
5278     - if (is_link_local && sp->data->hwts_rx_en) {
5279     + if (is_link_local) {
5280     + if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
5281     + /* Do normal processing. */
5282     + return skb;
5283     +
5284     spin_lock(&sp->data->meta_lock);
5285     /* Was this a link-local frame instead of the meta
5286     * that we were expecting?
5287     @@ -186,6 +190,12 @@ static struct sk_buff
5288     } else if (is_meta) {
5289     struct sk_buff *stampable_skb;
5290    
5291     + /* Drop the meta frame if we're not in the right state
5292     + * to process it.
5293     + */
5294     + if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
5295     + return NULL;
5296     +
5297     spin_lock(&sp->data->meta_lock);
5298    
5299     stampable_skb = sp->data->stampable_skb;
5300     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
5301     index a53a543fe055..52690bb3e40f 100644
5302     --- a/net/ipv4/ip_gre.c
5303     +++ b/net/ipv4/ip_gre.c
5304     @@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
5305     struct ip_tunnel *t = netdev_priv(dev);
5306    
5307     ether_setup(dev);
5308     + dev->max_mtu = 0;
5309     dev->netdev_ops = &erspan_netdev_ops;
5310     dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5311     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5312     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5313     index 7dcce724c78b..14654876127e 100644
5314     --- a/net/ipv4/route.c
5315     +++ b/net/ipv4/route.c
5316     @@ -916,16 +916,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
5317     if (peer->rate_tokens == 0 ||
5318     time_after(jiffies,
5319     (peer->rate_last +
5320     - (ip_rt_redirect_load << peer->rate_tokens)))) {
5321     + (ip_rt_redirect_load << peer->n_redirects)))) {
5322     __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
5323    
5324     icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
5325     peer->rate_last = jiffies;
5326     - ++peer->rate_tokens;
5327     ++peer->n_redirects;
5328     #ifdef CONFIG_IP_ROUTE_VERBOSE
5329     if (log_martians &&
5330     - peer->rate_tokens == ip_rt_redirect_number)
5331     + peer->n_redirects == ip_rt_redirect_number)
5332     net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
5333     &ip_hdr(skb)->saddr, inet_iif(skb),
5334     &ip_hdr(skb)->daddr, &gw);
5335     diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
5336     index 3e8b38c73d8c..483323332d74 100644
5337     --- a/net/ipv4/tcp_timer.c
5338     +++ b/net/ipv4/tcp_timer.c
5339     @@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
5340     return false;
5341    
5342     start_ts = tcp_sk(sk)->retrans_stamp;
5343     - if (likely(timeout == 0))
5344     - timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
5345     + if (likely(timeout == 0)) {
5346     + unsigned int rto_base = TCP_RTO_MIN;
5347     +
5348     + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
5349     + rto_base = tcp_timeout_init(sk);
5350     + timeout = tcp_model_timeout(sk, boundary, rto_base);
5351     + }
5352    
5353     return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
5354     }
5355     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5356     index 16486c8b708b..5e5d0575a43c 100644
5357     --- a/net/ipv4/udp.c
5358     +++ b/net/ipv4/udp.c
5359     @@ -821,6 +821,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
5360     int is_udplite = IS_UDPLITE(sk);
5361     int offset = skb_transport_offset(skb);
5362     int len = skb->len - offset;
5363     + int datalen = len - sizeof(*uh);
5364     __wsum csum = 0;
5365    
5366     /*
5367     @@ -854,10 +855,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
5368     return -EIO;
5369     }
5370    
5371     - skb_shinfo(skb)->gso_size = cork->gso_size;
5372     - skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
5373     - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
5374     - cork->gso_size);
5375     + if (datalen > cork->gso_size) {
5376     + skb_shinfo(skb)->gso_size = cork->gso_size;
5377     + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
5378     + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
5379     + cork->gso_size);
5380     + }
5381     goto csum_partial;
5382     }
5383    
5384     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5385     index 6a576ff92c39..34ccef18b40e 100644
5386     --- a/net/ipv6/addrconf.c
5387     +++ b/net/ipv6/addrconf.c
5388     @@ -5964,13 +5964,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5389     switch (event) {
5390     case RTM_NEWADDR:
5391     /*
5392     - * If the address was optimistic
5393     - * we inserted the route at the start of
5394     - * our DAD process, so we don't need
5395     - * to do it again
5396     + * If the address was optimistic we inserted the route at the
5397     + * start of our DAD process, so we don't need to do it again.
5398     + * If the device was taken down in the middle of the DAD
5399     + * cycle there is a race where we could get here without a
5400     + * host route, so nothing to insert. That will be fixed when
5401     + * the device is brought up.
5402     */
5403     - if (!rcu_access_pointer(ifp->rt->fib6_node))
5404     + if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
5405     ip6_ins_rt(net, ifp->rt);
5406     + } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
5407     + pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
5408     + &ifp->addr, ifp->idev->dev->name);
5409     + }
5410     +
5411     if (ifp->idev->cnf.forwarding)
5412     addrconf_join_anycast(ifp);
5413     if (!ipv6_addr_any(&ifp->peer_addr))
5414     diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
5415     index fa014d5f1732..a593aaf25748 100644
5416     --- a/net/ipv6/ip6_input.c
5417     +++ b/net/ipv6/ip6_input.c
5418     @@ -221,6 +221,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
5419     if (ipv6_addr_is_multicast(&hdr->saddr))
5420     goto err;
5421    
5422     + /* While RFC4291 is not explicit about v4mapped addresses
5423     + * in IPv6 headers, it seems clear linux dual-stack
5424     + * model can not deal properly with these.
5425     + * Security models could be fooled by ::ffff:127.0.0.1 for example.
5426     + *
5427     + * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
5428     + */
5429     + if (ipv6_addr_v4mapped(&hdr->saddr))
5430     + goto err;
5431     +
5432     skb->transport_header = skb->network_header + sizeof(*hdr);
5433     IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
5434    
5435     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5436     index 5995fdc99d3f..0454a8a3b39c 100644
5437     --- a/net/ipv6/udp.c
5438     +++ b/net/ipv6/udp.c
5439     @@ -1109,6 +1109,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
5440     __wsum csum = 0;
5441     int offset = skb_transport_offset(skb);
5442     int len = skb->len - offset;
5443     + int datalen = len - sizeof(*uh);
5444    
5445     /*
5446     * Create a UDP header
5447     @@ -1141,8 +1142,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
5448     return -EIO;
5449     }
5450    
5451     - skb_shinfo(skb)->gso_size = cork->gso_size;
5452     - skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
5453     + if (datalen > cork->gso_size) {
5454     + skb_shinfo(skb)->gso_size = cork->gso_size;
5455     + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
5456     + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
5457     + cork->gso_size);
5458     + }
5459     goto csum_partial;
5460     }
5461    
5462     diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
5463     index 8dfea26536c9..ccdd790e163a 100644
5464     --- a/net/nfc/llcp_sock.c
5465     +++ b/net/nfc/llcp_sock.c
5466     @@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
5467     llcp_sock->service_name = kmemdup(llcp_addr.service_name,
5468     llcp_sock->service_name_len,
5469     GFP_KERNEL);
5470     -
5471     + if (!llcp_sock->service_name) {
5472     + ret = -ENOMEM;
5473     + goto put_dev;
5474     + }
5475     llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
5476     if (llcp_sock->ssap == LLCP_SAP_MAX) {
5477     + kfree(llcp_sock->service_name);
5478     + llcp_sock->service_name = NULL;
5479     ret = -EADDRINUSE;
5480     goto put_dev;
5481     }
5482     diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
5483     index ea64c90b14e8..17e6ca62f1be 100644
5484     --- a/net/nfc/netlink.c
5485     +++ b/net/nfc/netlink.c
5486     @@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
5487     int rc;
5488     u32 idx;
5489    
5490     - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
5491     + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
5492     + !info->attrs[NFC_ATTR_TARGET_INDEX])
5493     return -EINVAL;
5494    
5495     idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
5496     @@ -1018,7 +1019,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
5497     struct sk_buff *msg = NULL;
5498     u32 idx;
5499    
5500     - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
5501     + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
5502     + !info->attrs[NFC_ATTR_FIRMWARE_NAME])
5503     return -EINVAL;
5504    
5505     idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
5506     diff --git a/net/rds/ib.c b/net/rds/ib.c
5507     index 45acab2de0cf..9de2ae22d583 100644
5508     --- a/net/rds/ib.c
5509     +++ b/net/rds/ib.c
5510     @@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
5511     refcount_set(&rds_ibdev->refcount, 1);
5512     INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
5513    
5514     + INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
5515     + INIT_LIST_HEAD(&rds_ibdev->conn_list);
5516     +
5517     rds_ibdev->max_wrs = device->attrs.max_qp_wr;
5518     rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
5519    
5520     @@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
5521     device->name,
5522     rds_ibdev->use_fastreg ? "FRMR" : "FMR");
5523    
5524     - INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
5525     - INIT_LIST_HEAD(&rds_ibdev->conn_list);
5526     -
5527     down_write(&rds_ib_devices_lock);
5528     list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
5529     up_write(&rds_ib_devices_lock);
5530     diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
5531     index 06c7a2da21bc..39b427dc7512 100644
5532     --- a/net/sched/sch_cbq.c
5533     +++ b/net/sched/sch_cbq.c
5534     @@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
5535     [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
5536     };
5537    
5538     +static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
5539     + struct nlattr *opt,
5540     + struct netlink_ext_ack *extack)
5541     +{
5542     + int err;
5543     +
5544     + if (!opt) {
5545     + NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
5546     + return -EINVAL;
5547     + }
5548     +
5549     + err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
5550     + cbq_policy, extack);
5551     + if (err < 0)
5552     + return err;
5553     +
5554     + if (tb[TCA_CBQ_WRROPT]) {
5555     + const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
5556     +
5557     + if (wrr->priority > TC_CBQ_MAXPRIO) {
5558     + NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
5559     + err = -EINVAL;
5560     + }
5561     + }
5562     + return err;
5563     +}
5564     +
5565     static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
5566     struct netlink_ext_ack *extack)
5567     {
5568     @@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
5569     hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
5570     q->delay_timer.function = cbq_undelay;
5571    
5572     - if (!opt) {
5573     - NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
5574     - return -EINVAL;
5575     - }
5576     -
5577     - err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
5578     - extack);
5579     + err = cbq_opt_parse(tb, opt, extack);
5580     if (err < 0)
5581     return err;
5582    
5583     @@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
5584     struct cbq_class *parent;
5585     struct qdisc_rate_table *rtab = NULL;
5586    
5587     - if (!opt) {
5588     - NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
5589     - return -EINVAL;
5590     - }
5591     -
5592     - err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
5593     - extack);
5594     + err = cbq_opt_parse(tb, opt, extack);
5595     if (err < 0)
5596     return err;
5597    
5598     diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
5599     index 4a403d35438f..284ab2dcf47f 100644
5600     --- a/net/sched/sch_cbs.c
5601     +++ b/net/sched/sch_cbs.c
5602     @@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
5603     if (err < 0)
5604     goto skip;
5605    
5606     - if (ecmd.base.speed != SPEED_UNKNOWN)
5607     + if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
5608     speed = ecmd.base.speed;
5609    
5610     skip:
5611     diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
5612     index bad1cbe59a56..05605b30bef3 100644
5613     --- a/net/sched/sch_dsmark.c
5614     +++ b/net/sched/sch_dsmark.c
5615     @@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
5616     goto errout;
5617    
5618     err = -EINVAL;
5619     + if (!tb[TCA_DSMARK_INDICES])
5620     + goto errout;
5621     indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
5622    
5623     if (hweight32(indices) != 1)
5624     diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
5625     index 8d8bc2ec5cd6..76bebe516194 100644
5626     --- a/net/sched/sch_taprio.c
5627     +++ b/net/sched/sch_taprio.c
5628     @@ -961,12 +961,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
5629     if (err < 0)
5630     goto skip;
5631    
5632     - if (ecmd.base.speed != SPEED_UNKNOWN)
5633     + if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
5634     speed = ecmd.base.speed;
5635    
5636     skip:
5637     - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
5638     - speed * 1000 * 1000);
5639     + picos_per_byte = (USEC_PER_SEC * 8) / speed;
5640    
5641     atomic64_set(&q->picos_per_byte, picos_per_byte);
5642     netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
5643     diff --git a/net/tipc/link.c b/net/tipc/link.c
5644     index c2c5c53cad22..b0063d05599e 100644
5645     --- a/net/tipc/link.c
5646     +++ b/net/tipc/link.c
5647     @@ -160,6 +160,7 @@ struct tipc_link {
5648     struct {
5649     u16 len;
5650     u16 limit;
5651     + struct sk_buff *target_bskb;
5652     } backlog[5];
5653     u16 snd_nxt;
5654     u16 window;
5655     @@ -866,6 +867,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
5656     void tipc_link_reset(struct tipc_link *l)
5657     {
5658     struct sk_buff_head list;
5659     + u32 imp;
5660    
5661     __skb_queue_head_init(&list);
5662    
5663     @@ -887,11 +889,10 @@ void tipc_link_reset(struct tipc_link *l)
5664     __skb_queue_purge(&l->deferdq);
5665     __skb_queue_purge(&l->backlogq);
5666     __skb_queue_purge(&l->failover_deferdq);
5667     - l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
5668     - l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
5669     - l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
5670     - l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
5671     - l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
5672     + for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
5673     + l->backlog[imp].len = 0;
5674     + l->backlog[imp].target_bskb = NULL;
5675     + }
5676     kfree_skb(l->reasm_buf);
5677     kfree_skb(l->failover_reasm_skb);
5678     l->reasm_buf = NULL;
5679     @@ -931,7 +932,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
5680     u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
5681     struct sk_buff_head *transmq = &l->transmq;
5682     struct sk_buff_head *backlogq = &l->backlogq;
5683     - struct sk_buff *skb, *_skb, *bskb;
5684     + struct sk_buff *skb, *_skb, **tskb;
5685     int pkt_cnt = skb_queue_len(list);
5686     int rc = 0;
5687    
5688     @@ -980,19 +981,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
5689     seqno++;
5690     continue;
5691     }
5692     - if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
5693     + tskb = &l->backlog[imp].target_bskb;
5694     + if (tipc_msg_bundle(*tskb, hdr, mtu)) {
5695     kfree_skb(__skb_dequeue(list));
5696     l->stats.sent_bundled++;
5697     continue;
5698     }
5699     - if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
5700     + if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
5701     kfree_skb(__skb_dequeue(list));
5702     - __skb_queue_tail(backlogq, bskb);
5703     - l->backlog[msg_importance(buf_msg(bskb))].len++;
5704     + __skb_queue_tail(backlogq, *tskb);
5705     + l->backlog[imp].len++;
5706     l->stats.sent_bundled++;
5707     l->stats.sent_bundles++;
5708     continue;
5709     }
5710     + l->backlog[imp].target_bskb = NULL;
5711     l->backlog[imp].len += skb_queue_len(list);
5712     skb_queue_splice_tail_init(list, backlogq);
5713     }
5714     @@ -1008,6 +1011,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
5715     u16 seqno = l->snd_nxt;
5716     u16 ack = l->rcv_nxt - 1;
5717     u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
5718     + u32 imp;
5719    
5720     while (skb_queue_len(&l->transmq) < l->window) {
5721     skb = skb_peek(&l->backlogq);
5722     @@ -1018,7 +1022,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
5723     break;
5724     __skb_dequeue(&l->backlogq);
5725     hdr = buf_msg(skb);
5726     - l->backlog[msg_importance(hdr)].len--;
5727     + imp = msg_importance(hdr);
5728     + l->backlog[imp].len--;
5729     + if (unlikely(skb == l->backlog[imp].target_bskb))
5730     + l->backlog[imp].target_bskb = NULL;
5731     __skb_queue_tail(&l->transmq, skb);
5732     /* next retransmit attempt */
5733     if (link_is_bc_sndlink(l))
5734     diff --git a/net/tipc/msg.c b/net/tipc/msg.c
5735     index f48e5857210f..b956ce4a40ef 100644
5736     --- a/net/tipc/msg.c
5737     +++ b/net/tipc/msg.c
5738     @@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
5739     bmsg = buf_msg(_skb);
5740     tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
5741     INT_H_SIZE, dnode);
5742     - if (msg_isdata(msg))
5743     - msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
5744     - else
5745     - msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
5746     + msg_set_importance(bmsg, msg_importance(msg));
5747     msg_set_seqno(bmsg, msg_seqno(msg));
5748     msg_set_ack(bmsg, msg_ack(msg));
5749     msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
5750     diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
5751     index ab47bf3ab66e..2ab43b2bba31 100644
5752     --- a/net/vmw_vsock/af_vsock.c
5753     +++ b/net/vmw_vsock/af_vsock.c
5754     @@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
5755     }
5756     EXPORT_SYMBOL_GPL(__vsock_create);
5757    
5758     -static void __vsock_release(struct sock *sk)
5759     +static void __vsock_release(struct sock *sk, int level)
5760     {
5761     if (sk) {
5762     struct sk_buff *skb;
5763     @@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
5764     vsk = vsock_sk(sk);
5765     pending = NULL; /* Compiler warning. */
5766    
5767     + /* The release call is supposed to use lock_sock_nested()
5768     + * rather than lock_sock(), if a sock lock should be acquired.
5769     + */
5770     transport->release(vsk);
5771    
5772     - lock_sock(sk);
5773     + /* When "level" is SINGLE_DEPTH_NESTING, use the nested
5774     + * version to avoid the warning "possible recursive locking
5775     + * detected". When "level" is 0, lock_sock_nested(sk, level)
5776     + * is the same as lock_sock(sk).
5777     + */
5778     + lock_sock_nested(sk, level);
5779     sock_orphan(sk);
5780     sk->sk_shutdown = SHUTDOWN_MASK;
5781    
5782     @@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
5783    
5784     /* Clean up any sockets that never were accepted. */
5785     while ((pending = vsock_dequeue_accept(sk)) != NULL) {
5786     - __vsock_release(pending);
5787     + __vsock_release(pending, SINGLE_DEPTH_NESTING);
5788     sock_put(pending);
5789     }
5790    
5791     @@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
5792    
5793     static int vsock_release(struct socket *sock)
5794     {
5795     - __vsock_release(sock->sk);
5796     + __vsock_release(sock->sk, 0);
5797     sock->sk = NULL;
5798     sock->state = SS_FREE;
5799    
5800     diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
5801     index 9d864ebeb7b3..4b126b21b453 100644
5802     --- a/net/vmw_vsock/hyperv_transport.c
5803     +++ b/net/vmw_vsock/hyperv_transport.c
5804     @@ -559,7 +559,7 @@ static void hvs_release(struct vsock_sock *vsk)
5805     struct sock *sk = sk_vsock(vsk);
5806     bool remove_sock;
5807    
5808     - lock_sock(sk);
5809     + lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
5810     remove_sock = hvs_close_lock_held(vsk);
5811     release_sock(sk);
5812     if (remove_sock)
5813     diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
5814     index 6f1a8aff65c5..a7adffd062c7 100644
5815     --- a/net/vmw_vsock/virtio_transport_common.c
5816     +++ b/net/vmw_vsock/virtio_transport_common.c
5817     @@ -790,7 +790,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
5818     struct sock *sk = &vsk->sk;
5819     bool remove_sock = true;
5820    
5821     - lock_sock(sk);
5822     + lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
5823     if (sk->sk_type == SOCK_STREAM)
5824     remove_sock = virtio_transport_close(vsk);
5825    
5826     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
5827     index 74dd46de01b6..e75517464786 100644
5828     --- a/security/selinux/hooks.c
5829     +++ b/security/selinux/hooks.c
5830     @@ -3403,7 +3403,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
5831     static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
5832     struct kernfs_node *kn)
5833     {
5834     - const struct task_security_struct *tsec = current_security();
5835     + const struct task_security_struct *tsec = selinux_cred(current_cred());
5836     u32 parent_sid, newsid, clen;
5837     int rc;
5838     char *context;
5839     diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
5840     index 91c5395dd20c..586b7abd0aa7 100644
5841     --- a/security/selinux/include/objsec.h
5842     +++ b/security/selinux/include/objsec.h
5843     @@ -37,16 +37,6 @@ struct task_security_struct {
5844     u32 sockcreate_sid; /* fscreate SID */
5845     };
5846    
5847     -/*
5848     - * get the subjective security ID of the current task
5849     - */
5850     -static inline u32 current_sid(void)
5851     -{
5852     - const struct task_security_struct *tsec = current_security();
5853     -
5854     - return tsec->sid;
5855     -}
5856     -
5857     enum label_initialized {
5858     LABEL_INVALID, /* invalid or not initialized */
5859     LABEL_INITIALIZED, /* initialized */
5860     @@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc(
5861     return ipc->security + selinux_blob_sizes.lbs_ipc;
5862     }
5863    
5864     +/*
5865     + * get the subjective security ID of the current task
5866     + */
5867     +static inline u32 current_sid(void)
5868     +{
5869     + const struct task_security_struct *tsec = selinux_cred(current_cred());
5870     +
5871     + return tsec->sid;
5872     +}
5873     +
5874     #endif /* _SELINUX_OBJSEC_H_ */
5875     diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
5876     index f1c93a7be9ec..38ac3da4e791 100644
5877     --- a/security/smack/smack_access.c
5878     +++ b/security/smack/smack_access.c
5879     @@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
5880     if (i == 0 || i >= SMK_LONGLABEL)
5881     return ERR_PTR(-EINVAL);
5882    
5883     - smack = kzalloc(i + 1, GFP_KERNEL);
5884     + smack = kzalloc(i + 1, GFP_NOFS);
5885     if (smack == NULL)
5886     return ERR_PTR(-ENOMEM);
5887    
5888     @@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
5889     if ((m & *cp) == 0)
5890     continue;
5891     rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
5892     - cat, GFP_KERNEL);
5893     + cat, GFP_NOFS);
5894     if (rc < 0) {
5895     netlbl_catmap_free(sap->attr.mls.cat);
5896     return rc;
5897     @@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
5898     if (skp != NULL)
5899     goto freeout;
5900    
5901     - skp = kzalloc(sizeof(*skp), GFP_KERNEL);
5902     + skp = kzalloc(sizeof(*skp), GFP_NOFS);
5903     if (skp == NULL) {
5904     skp = ERR_PTR(-ENOMEM);
5905     goto freeout;
5906     diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
5907     index 4c5e5a438f8b..36b6b9d4cbaf 100644
5908     --- a/security/smack/smack_lsm.c
5909     +++ b/security/smack/smack_lsm.c
5910     @@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
5911     if (!(ip->i_opflags & IOP_XATTR))
5912     return ERR_PTR(-EOPNOTSUPP);
5913    
5914     - buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
5915     + buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
5916     if (buffer == NULL)
5917     return ERR_PTR(-ENOMEM);
5918    
5919     @@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
5920    
5921     if (rc != 0)
5922     return rc;
5923     - } else if (bprm->unsafe)
5924     + }
5925     + if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
5926     return -EPERM;
5927    
5928     bsp->smk_task = isp->smk_task;
5929     @@ -3925,6 +3926,8 @@ access_check:
5930     skp = smack_ipv6host_label(&sadd);
5931     if (skp == NULL)
5932     skp = smack_net_ambient;
5933     + if (skb == NULL)
5934     + break;
5935     #ifdef CONFIG_AUDIT
5936     smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
5937     ad.a.u.net->family = family;
5938     diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
5939     index 6a10dea01eef..696586407e83 100644
5940     --- a/tools/power/x86/intel-speed-select/isst-config.c
5941     +++ b/tools/power/x86/intel-speed-select/isst-config.c
5942     @@ -402,6 +402,9 @@ void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask,
5943     int j;
5944    
5945     for (j = 0; j < topo_max_cpus; ++j) {
5946     + if (!CPU_ISSET_S(j, present_cpumask_size, present_cpumask))
5947     + continue;
5948     +
5949     if (cpu_map[j].pkg_id == pkg_id &&
5950     cpu_map[j].die_id == die_id &&
5951     cpu_map[j].punit_cpu_core == i) {
5952     diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
5953     index b8265ee9923f..614b31aad168 100644
5954     --- a/tools/testing/selftests/net/udpgso.c
5955     +++ b/tools/testing/selftests/net/udpgso.c
5956     @@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
5957     .tfail = true,
5958     },
5959     {
5960     - /* send a single MSS: will fail with GSO, because the segment
5961     - * logic in udp4_ufo_fragment demands a gso skb to be > MTU
5962     - */
5963     + /* send a single MSS: will fall back to no GSO */
5964     .tlen = CONST_MSS_V4,
5965     .gso_len = CONST_MSS_V4,
5966     - .tfail = true,
5967     .r_num_mss = 1,
5968     },
5969     {
5970     @@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
5971     .tfail = true,
5972     },
5973     {
5974     - /* send a single 1B MSS: will fail, see single MSS above */
5975     + /* send a single 1B MSS: will fall back to no GSO */
5976     .tlen = 1,
5977     .gso_len = 1,
5978     - .tfail = true,
5979     .r_num_mss = 1,
5980     },
5981     {
5982     @@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
5983     .tfail = true,
5984     },
5985     {
5986     - /* send a single MSS: will fail with GSO, because the segment
5987     - * logic in udp4_ufo_fragment demands a gso skb to be > MTU
5988     - */
5989     + /* send a single MSS: will fall back to no GSO */
5990     .tlen = CONST_MSS_V6,
5991     .gso_len = CONST_MSS_V6,
5992     - .tfail = true,
5993     .r_num_mss = 1,
5994     },
5995     {
5996     @@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
5997     .tfail = true,
5998     },
5999     {
6000     - /* send a single 1B MSS: will fail, see single MSS above */
6001     + /* send a single 1B MSS: will fall back to no GSO */
6002     .tlen = 1,
6003     .gso_len = 1,
6004     - .tfail = true,
6005     .r_num_mss = 1,
6006     },
6007     {
6008     diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
6009     index 97f9f491c541..c402464b038f 100644
6010     --- a/tools/testing/selftests/powerpc/tm/tm.h
6011     +++ b/tools/testing/selftests/powerpc/tm/tm.h
6012     @@ -55,7 +55,8 @@ static inline bool failure_is_unavailable(void)
6013     static inline bool failure_is_reschedule(void)
6014     {
6015     if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED ||
6016     - (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED)
6017     + (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED ||
6018     + (failure_code() & TM_CAUSE_KVM_FAC_UNAV) == TM_CAUSE_KVM_FAC_UNAV)
6019     return true;
6020    
6021     return false;
6022     diff --git a/usr/Makefile b/usr/Makefile
6023     index 6a89eb019275..e6f7cb2f81db 100644
6024     --- a/usr/Makefile
6025     +++ b/usr/Makefile
6026     @@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
6027     datafile_d_y = .$(datafile_y).d
6028     AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
6029    
6030     +# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all
6031     +# possible compression formats.
6032     +clean-files += initramfs_data.cpio*
6033    
6034     # Generate builtin.o based on initramfs_data.o
6035     obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o