Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0104-4.1.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (hide annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 4 months ago) by niro
File size: 181635 byte(s)
-linux-4.1 patches up to 4.1.15
1 niro 2748 diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
2     index 014f112e2a14..57fffe33ebfc 100644
3     --- a/Documentation/hwmon/nct7904
4     +++ b/Documentation/hwmon/nct7904
5     @@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
6     temp[2-9]_input CPU temperatures (1/1000 degree,
7     0.125 degree resolution)
8    
9     -fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode
10     +pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
11     Setting SmartFan mode is supported only if it has been
12     previously configured by BIOS (or configuration EEPROM)
13    
14     -fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode
15     +pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
16    
17     The driver checks sensor control registers and does not export the sensors
18     that are not enabled. Anyway, a sensor that is enabled may actually be not
19     diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
20     index 74b6c6d97210..d2b1c40cb666 100644
21     --- a/Documentation/kbuild/makefiles.txt
22     +++ b/Documentation/kbuild/makefiles.txt
23     @@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
24     $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
25     mode) if this option is supported by $(AR).
26    
27     + ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
28     +
29     + These variables are appended to the KBUILD_CPPFLAGS,
30     + KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
31     + top-level Makefile has set any other flags. This provides a
32     + means for an architecture to override the defaults.
33     +
34     +
35     --- 6.2 Add prerequisites to archheaders:
36    
37     The archheaders: rule is used to generate header files that
38     diff --git a/Makefile b/Makefile
39     index 36f3225cdf1f..068dd690933d 100644
40     --- a/Makefile
41     +++ b/Makefile
42     @@ -1,6 +1,6 @@
43     VERSION = 4
44     PATCHLEVEL = 1
45     -SUBLEVEL = 4
46     +SUBLEVEL = 5
47     EXTRAVERSION =
48     NAME = Series 4800
49    
50     @@ -783,10 +783,11 @@ endif
51     include scripts/Makefile.kasan
52     include scripts/Makefile.extrawarn
53    
54     -# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
55     -KBUILD_CPPFLAGS += $(KCPPFLAGS)
56     -KBUILD_AFLAGS += $(KAFLAGS)
57     -KBUILD_CFLAGS += $(KCFLAGS)
58     +# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
59     +# last assignments
60     +KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
61     +KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
62     +KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
63    
64     # Use --build-id when available.
65     LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
66     diff --git a/arch/arc/Makefile b/arch/arc/Makefile
67     index db72fec0e160..2f21e1e0ecf7 100644
68     --- a/arch/arc/Makefile
69     +++ b/arch/arc/Makefile
70     @@ -43,7 +43,8 @@ endif
71    
72     ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
73     # Generic build system uses -O2, we want -O3
74     -cflags-y += -O3
75     +# Note: No need to add to cflags-y as that happens anyways
76     +ARCH_CFLAGS += -O3
77     endif
78    
79     # small data is default for elf32 tool-chain. If not usable, disable it
80     diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
81     index 624a9d048ca9..dae03e66fa9e 100644
82     --- a/arch/arc/include/asm/bitops.h
83     +++ b/arch/arc/include/asm/bitops.h
84     @@ -18,83 +18,49 @@
85     #include <linux/types.h>
86     #include <linux/compiler.h>
87     #include <asm/barrier.h>
88     +#ifndef CONFIG_ARC_HAS_LLSC
89     +#include <asm/smp.h>
90     +#endif
91    
92     -/*
93     - * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
94     - * The Kconfig glue ensures that in SMP, this is only set if the container
95     - * SoC/platform has cross-core coherent LLOCK/SCOND
96     - */
97     #if defined(CONFIG_ARC_HAS_LLSC)
98    
99     -static inline void set_bit(unsigned long nr, volatile unsigned long *m)
100     -{
101     - unsigned int temp;
102     -
103     - m += nr >> 5;
104     -
105     - /*
106     - * ARC ISA micro-optimization:
107     - *
108     - * Instructions dealing with bitpos only consider lower 5 bits (0-31)
109     - * e.g (x << 33) is handled like (x << 1) by ASL instruction
110     - * (mem pointer still needs adjustment to point to next word)
111     - *
112     - * Hence the masking to clamp @nr arg can be elided in general.
113     - *
114     - * However if @nr is a constant (above assumed it in a register),
115     - * and greater than 31, gcc can optimize away (x << 33) to 0,
116     - * as overflow, given the 32-bit ISA. Thus masking needs to be done
117     - * for constant @nr, but no code is generated due to const prop.
118     - */
119     - if (__builtin_constant_p(nr))
120     - nr &= 0x1f;
121     -
122     - __asm__ __volatile__(
123     - "1: llock %0, [%1] \n"
124     - " bset %0, %0, %2 \n"
125     - " scond %0, [%1] \n"
126     - " bnz 1b \n"
127     - : "=&r"(temp)
128     - : "r"(m), "ir"(nr)
129     - : "cc");
130     -}
131     -
132     -static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
133     -{
134     - unsigned int temp;
135     -
136     - m += nr >> 5;
137     -
138     - if (__builtin_constant_p(nr))
139     - nr &= 0x1f;
140     -
141     - __asm__ __volatile__(
142     - "1: llock %0, [%1] \n"
143     - " bclr %0, %0, %2 \n"
144     - " scond %0, [%1] \n"
145     - " bnz 1b \n"
146     - : "=&r"(temp)
147     - : "r"(m), "ir"(nr)
148     - : "cc");
149     -}
150     -
151     -static inline void change_bit(unsigned long nr, volatile unsigned long *m)
152     -{
153     - unsigned int temp;
154     -
155     - m += nr >> 5;
156     -
157     - if (__builtin_constant_p(nr))
158     - nr &= 0x1f;
159     +/*
160     + * Hardware assisted Atomic-R-M-W
161     + */
162    
163     - __asm__ __volatile__(
164     - "1: llock %0, [%1] \n"
165     - " bxor %0, %0, %2 \n"
166     - " scond %0, [%1] \n"
167     - " bnz 1b \n"
168     - : "=&r"(temp)
169     - : "r"(m), "ir"(nr)
170     - : "cc");
171     +#define BIT_OP(op, c_op, asm_op) \
172     +static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
173     +{ \
174     + unsigned int temp; \
175     + \
176     + m += nr >> 5; \
177     + \
178     + /* \
179     + * ARC ISA micro-optimization: \
180     + * \
181     + * Instructions dealing with bitpos only consider lower 5 bits \
182     + * e.g (x << 33) is handled like (x << 1) by ASL instruction \
183     + * (mem pointer still needs adjustment to point to next word) \
184     + * \
185     + * Hence the masking to clamp @nr arg can be elided in general. \
186     + * \
187     + * However if @nr is a constant (above assumed in a register), \
188     + * and greater than 31, gcc can optimize away (x << 33) to 0, \
189     + * as overflow, given the 32-bit ISA. Thus masking needs to be \
190     + * done for const @nr, but no code is generated due to gcc \
191     + * const prop. \
192     + */ \
193     + nr &= 0x1f; \
194     + \
195     + __asm__ __volatile__( \
196     + "1: llock %0, [%1] \n" \
197     + " " #asm_op " %0, %0, %2 \n" \
198     + " scond %0, [%1] \n" \
199     + " bnz 1b \n" \
200     + : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
201     + : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
202     + "ir"(nr) \
203     + : "cc"); \
204     }
205    
206     /*
207     @@ -108,91 +74,37 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
208     * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
209     * and the old value of bit is returned
210     */
211     -static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
212     -{
213     - unsigned long old, temp;
214     -
215     - m += nr >> 5;
216     -
217     - if (__builtin_constant_p(nr))
218     - nr &= 0x1f;
219     -
220     - /*
221     - * Explicit full memory barrier needed before/after as
222     - * LLOCK/SCOND themselves don't provide any such semantics
223     - */
224     - smp_mb();
225     -
226     - __asm__ __volatile__(
227     - "1: llock %0, [%2] \n"
228     - " bset %1, %0, %3 \n"
229     - " scond %1, [%2] \n"
230     - " bnz 1b \n"
231     - : "=&r"(old), "=&r"(temp)
232     - : "r"(m), "ir"(nr)
233     - : "cc");
234     -
235     - smp_mb();
236     -
237     - return (old & (1 << nr)) != 0;
238     -}
239     -
240     -static inline int
241     -test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
242     -{
243     - unsigned int old, temp;
244     -
245     - m += nr >> 5;
246     -
247     - if (__builtin_constant_p(nr))
248     - nr &= 0x1f;
249     -
250     - smp_mb();
251     -
252     - __asm__ __volatile__(
253     - "1: llock %0, [%2] \n"
254     - " bclr %1, %0, %3 \n"
255     - " scond %1, [%2] \n"
256     - " bnz 1b \n"
257     - : "=&r"(old), "=&r"(temp)
258     - : "r"(m), "ir"(nr)
259     - : "cc");
260     -
261     - smp_mb();
262     -
263     - return (old & (1 << nr)) != 0;
264     -}
265     -
266     -static inline int
267     -test_and_change_bit(unsigned long nr, volatile unsigned long *m)
268     -{
269     - unsigned int old, temp;
270     -
271     - m += nr >> 5;
272     -
273     - if (__builtin_constant_p(nr))
274     - nr &= 0x1f;
275     -
276     - smp_mb();
277     -
278     - __asm__ __volatile__(
279     - "1: llock %0, [%2] \n"
280     - " bxor %1, %0, %3 \n"
281     - " scond %1, [%2] \n"
282     - " bnz 1b \n"
283     - : "=&r"(old), "=&r"(temp)
284     - : "r"(m), "ir"(nr)
285     - : "cc");
286     -
287     - smp_mb();
288     -
289     - return (old & (1 << nr)) != 0;
290     +#define TEST_N_BIT_OP(op, c_op, asm_op) \
291     +static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
292     +{ \
293     + unsigned long old, temp; \
294     + \
295     + m += nr >> 5; \
296     + \
297     + nr &= 0x1f; \
298     + \
299     + /* \
300     + * Explicit full memory barrier needed before/after as \
301     + * LLOCK/SCOND themselves don't provide any such smenatic \
302     + */ \
303     + smp_mb(); \
304     + \
305     + __asm__ __volatile__( \
306     + "1: llock %0, [%2] \n" \
307     + " " #asm_op " %1, %0, %3 \n" \
308     + " scond %1, [%2] \n" \
309     + " bnz 1b \n" \
310     + : "=&r"(old), "=&r"(temp) \
311     + : "r"(m), "ir"(nr) \
312     + : "cc"); \
313     + \
314     + smp_mb(); \
315     + \
316     + return (old & (1 << nr)) != 0; \
317     }
318    
319     #else /* !CONFIG_ARC_HAS_LLSC */
320    
321     -#include <asm/smp.h>
322     -
323     /*
324     * Non hardware assisted Atomic-R-M-W
325     * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
326     @@ -209,111 +121,37 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
327     * at compile time)
328     */
329    
330     -static inline void set_bit(unsigned long nr, volatile unsigned long *m)
331     -{
332     - unsigned long temp, flags;
333     - m += nr >> 5;
334     -
335     - if (__builtin_constant_p(nr))
336     - nr &= 0x1f;
337     -
338     - bitops_lock(flags);
339     -
340     - temp = *m;
341     - *m = temp | (1UL << nr);
342     -
343     - bitops_unlock(flags);
344     +#define BIT_OP(op, c_op, asm_op) \
345     +static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
346     +{ \
347     + unsigned long temp, flags; \
348     + m += nr >> 5; \
349     + \
350     + /* \
351     + * spin lock/unlock provide the needed smp_mb() before/after \
352     + */ \
353     + bitops_lock(flags); \
354     + \
355     + temp = *m; \
356     + *m = temp c_op (1UL << (nr & 0x1f)); \
357     + \
358     + bitops_unlock(flags); \
359     }
360    
361     -static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
362     -{
363     - unsigned long temp, flags;
364     - m += nr >> 5;
365     -
366     - if (__builtin_constant_p(nr))
367     - nr &= 0x1f;
368     -
369     - bitops_lock(flags);
370     -
371     - temp = *m;
372     - *m = temp & ~(1UL << nr);
373     -
374     - bitops_unlock(flags);
375     -}
376     -
377     -static inline void change_bit(unsigned long nr, volatile unsigned long *m)
378     -{
379     - unsigned long temp, flags;
380     - m += nr >> 5;
381     -
382     - if (__builtin_constant_p(nr))
383     - nr &= 0x1f;
384     -
385     - bitops_lock(flags);
386     -
387     - temp = *m;
388     - *m = temp ^ (1UL << nr);
389     -
390     - bitops_unlock(flags);
391     -}
392     -
393     -static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
394     -{
395     - unsigned long old, flags;
396     - m += nr >> 5;
397     -
398     - if (__builtin_constant_p(nr))
399     - nr &= 0x1f;
400     -
401     - /*
402     - * spin lock/unlock provide the needed smp_mb() before/after
403     - */
404     - bitops_lock(flags);
405     -
406     - old = *m;
407     - *m = old | (1 << nr);
408     -
409     - bitops_unlock(flags);
410     -
411     - return (old & (1 << nr)) != 0;
412     -}
413     -
414     -static inline int
415     -test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
416     -{
417     - unsigned long old, flags;
418     - m += nr >> 5;
419     -
420     - if (__builtin_constant_p(nr))
421     - nr &= 0x1f;
422     -
423     - bitops_lock(flags);
424     -
425     - old = *m;
426     - *m = old & ~(1 << nr);
427     -
428     - bitops_unlock(flags);
429     -
430     - return (old & (1 << nr)) != 0;
431     -}
432     -
433     -static inline int
434     -test_and_change_bit(unsigned long nr, volatile unsigned long *m)
435     -{
436     - unsigned long old, flags;
437     - m += nr >> 5;
438     -
439     - if (__builtin_constant_p(nr))
440     - nr &= 0x1f;
441     -
442     - bitops_lock(flags);
443     -
444     - old = *m;
445     - *m = old ^ (1 << nr);
446     -
447     - bitops_unlock(flags);
448     -
449     - return (old & (1 << nr)) != 0;
450     +#define TEST_N_BIT_OP(op, c_op, asm_op) \
451     +static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
452     +{ \
453     + unsigned long old, flags; \
454     + m += nr >> 5; \
455     + \
456     + bitops_lock(flags); \
457     + \
458     + old = *m; \
459     + *m = old c_op (1UL << (nr & 0x1f)); \
460     + \
461     + bitops_unlock(flags); \
462     + \
463     + return (old & (1UL << (nr & 0x1f))) != 0; \
464     }
465    
466     #endif /* CONFIG_ARC_HAS_LLSC */
467     @@ -322,86 +160,45 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
468     * Non atomic variants
469     **************************************/
470    
471     -static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
472     -{
473     - unsigned long temp;
474     - m += nr >> 5;
475     -
476     - if (__builtin_constant_p(nr))
477     - nr &= 0x1f;
478     -
479     - temp = *m;
480     - *m = temp | (1UL << nr);
481     +#define __BIT_OP(op, c_op, asm_op) \
482     +static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
483     +{ \
484     + unsigned long temp; \
485     + m += nr >> 5; \
486     + \
487     + temp = *m; \
488     + *m = temp c_op (1UL << (nr & 0x1f)); \
489     }
490    
491     -static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
492     -{
493     - unsigned long temp;
494     - m += nr >> 5;
495     -
496     - if (__builtin_constant_p(nr))
497     - nr &= 0x1f;
498     -
499     - temp = *m;
500     - *m = temp & ~(1UL << nr);
501     +#define __TEST_N_BIT_OP(op, c_op, asm_op) \
502     +static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
503     +{ \
504     + unsigned long old; \
505     + m += nr >> 5; \
506     + \
507     + old = *m; \
508     + *m = old c_op (1UL << (nr & 0x1f)); \
509     + \
510     + return (old & (1UL << (nr & 0x1f))) != 0; \
511     }
512    
513     -static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
514     -{
515     - unsigned long temp;
516     - m += nr >> 5;
517     -
518     - if (__builtin_constant_p(nr))
519     - nr &= 0x1f;
520     -
521     - temp = *m;
522     - *m = temp ^ (1UL << nr);
523     -}
524     -
525     -static inline int
526     -__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
527     -{
528     - unsigned long old;
529     - m += nr >> 5;
530     -
531     - if (__builtin_constant_p(nr))
532     - nr &= 0x1f;
533     -
534     - old = *m;
535     - *m = old | (1 << nr);
536     -
537     - return (old & (1 << nr)) != 0;
538     -}
539     -
540     -static inline int
541     -__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
542     -{
543     - unsigned long old;
544     - m += nr >> 5;
545     -
546     - if (__builtin_constant_p(nr))
547     - nr &= 0x1f;
548     -
549     - old = *m;
550     - *m = old & ~(1 << nr);
551     -
552     - return (old & (1 << nr)) != 0;
553     -}
554     -
555     -static inline int
556     -__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
557     -{
558     - unsigned long old;
559     - m += nr >> 5;
560     -
561     - if (__builtin_constant_p(nr))
562     - nr &= 0x1f;
563     -
564     - old = *m;
565     - *m = old ^ (1 << nr);
566     -
567     - return (old & (1 << nr)) != 0;
568     -}
569     +#define BIT_OPS(op, c_op, asm_op) \
570     + \
571     + /* set_bit(), clear_bit(), change_bit() */ \
572     + BIT_OP(op, c_op, asm_op) \
573     + \
574     + /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
575     + TEST_N_BIT_OP(op, c_op, asm_op) \
576     + \
577     + /* __set_bit(), __clear_bit(), __change_bit() */ \
578     + __BIT_OP(op, c_op, asm_op) \
579     + \
580     + /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
581     + __TEST_N_BIT_OP(op, c_op, asm_op)
582     +
583     +BIT_OPS(set, |, bset)
584     +BIT_OPS(clear, & ~, bclr)
585     +BIT_OPS(change, ^, bxor)
586    
587     /*
588     * This routine doesn't need to be atomic.
589     @@ -413,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
590    
591     addr += nr >> 5;
592    
593     - if (__builtin_constant_p(nr))
594     - nr &= 0x1f;
595     -
596     - mask = 1 << nr;
597     + mask = 1UL << (nr & 0x1f);
598    
599     return ((mask & *addr) != 0);
600     }
601     diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
602     index 1bfeec2c0558..2a58af7a2e3a 100644
603     --- a/arch/arc/include/asm/ptrace.h
604     +++ b/arch/arc/include/asm/ptrace.h
605     @@ -63,7 +63,7 @@ struct callee_regs {
606     long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
607     };
608    
609     -#define instruction_pointer(regs) ((regs)->ret)
610     +#define instruction_pointer(regs) (unsigned long)((regs)->ret)
611     #define profile_pc(regs) instruction_pointer(regs)
612    
613     /* return 1 if user mode or 0 if kernel mode */
614     diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
615     index 7128fad991ac..c9df40e5cd3b 100644
616     --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
617     +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
618     @@ -544,6 +544,10 @@
619     phy-supply = <&ldousb_reg>;
620     };
621    
622     +&usb2_phy2 {
623     + phy-supply = <&ldousb_reg>;
624     +};
625     +
626     &usb1 {
627     dr_mode = "host";
628     pinctrl-names = "default";
629     diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
630     index aa465904f6cc..096f68be99e2 100644
631     --- a/arch/arm/boot/dts/dra7-evm.dts
632     +++ b/arch/arm/boot/dts/dra7-evm.dts
633     @@ -686,7 +686,8 @@
634    
635     &dcan1 {
636     status = "ok";
637     - pinctrl-names = "default", "sleep";
638     - pinctrl-0 = <&dcan1_pins_default>;
639     + pinctrl-names = "default", "sleep", "active";
640     + pinctrl-0 = <&dcan1_pins_sleep>;
641     pinctrl-1 = <&dcan1_pins_sleep>;
642     + pinctrl-2 = <&dcan1_pins_default>;
643     };
644     diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
645     index ce0390f081d9..6b05f6a0ba84 100644
646     --- a/arch/arm/boot/dts/dra72-evm.dts
647     +++ b/arch/arm/boot/dts/dra72-evm.dts
648     @@ -497,9 +497,10 @@
649    
650     &dcan1 {
651     status = "ok";
652     - pinctrl-names = "default", "sleep";
653     - pinctrl-0 = <&dcan1_pins_default>;
654     + pinctrl-names = "default", "sleep", "active";
655     + pinctrl-0 = <&dcan1_pins_sleep>;
656     pinctrl-1 = <&dcan1_pins_sleep>;
657     + pinctrl-2 = <&dcan1_pins_default>;
658     };
659    
660     &qspi {
661     diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
662     index 6d0893a3828e..78b6fd0b86e6 100644
663     --- a/arch/arm/mach-imx/gpc.c
664     +++ b/arch/arm/mach-imx/gpc.c
665     @@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
666     }
667     }
668    
669     -#ifdef CONFIG_PM_GENERIC_DOMAINS
670     -
671     static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
672     {
673     int iso, iso2sw;
674     @@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
675     static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
676     {
677     struct clk *clk;
678     - bool is_off;
679     int i;
680    
681     imx6q_pu_domain.reg = pu_reg;
682     @@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
683     }
684     imx6q_pu_domain.num_clks = i;
685    
686     - is_off = IS_ENABLED(CONFIG_PM);
687     - if (is_off) {
688     - _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
689     - } else {
690     - /*
691     - * Enable power if compiled without CONFIG_PM in case the
692     - * bootloader disabled it.
693     - */
694     - imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
695     - }
696     + /* Enable power always in case bootloader disabled it. */
697     + imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
698     +
699     + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
700     + return 0;
701    
702     - pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
703     + pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
704     return of_genpd_add_provider_onecell(dev->of_node,
705     &imx_gpc_onecell_data);
706    
707     @@ -437,13 +429,6 @@ clk_err:
708     return -EINVAL;
709     }
710    
711     -#else
712     -static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
713     -{
714     - return 0;
715     -}
716     -#endif /* CONFIG_PM_GENERIC_DOMAINS */
717     -
718     static int imx_gpc_probe(struct platform_device *pdev)
719     {
720     struct regulator *pu_reg;
721     diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
722     index c092730749b9..bf366b39fa61 100644
723     --- a/arch/arm/mach-pxa/capc7117.c
724     +++ b/arch/arm/mach-pxa/capc7117.c
725     @@ -24,6 +24,7 @@
726     #include <linux/ata_platform.h>
727     #include <linux/serial_8250.h>
728     #include <linux/gpio.h>
729     +#include <linux/regulator/machine.h>
730    
731     #include <asm/mach-types.h>
732     #include <asm/mach/arch.h>
733     @@ -144,6 +145,8 @@ static void __init capc7117_init(void)
734    
735     capc7117_uarts_init();
736     capc7117_ide_init();
737     +
738     + regulator_has_full_constraints();
739     }
740    
741     MACHINE_START(CAPC7117,
742     diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
743     index bb99f59a36d8..a17a91eb8e9a 100644
744     --- a/arch/arm/mach-pxa/cm-x2xx.c
745     +++ b/arch/arm/mach-pxa/cm-x2xx.c
746     @@ -13,6 +13,7 @@
747     #include <linux/syscore_ops.h>
748     #include <linux/irq.h>
749     #include <linux/gpio.h>
750     +#include <linux/regulator/machine.h>
751    
752     #include <linux/dm9000.h>
753     #include <linux/leds.h>
754     @@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
755     cmx2xx_init_ac97();
756     cmx2xx_init_touchscreen();
757     cmx2xx_init_leds();
758     +
759     + regulator_has_full_constraints();
760     }
761    
762     static void __init cmx2xx_init_irq(void)
763     diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
764     index 4d3588d26c2a..5851f4c254c1 100644
765     --- a/arch/arm/mach-pxa/cm-x300.c
766     +++ b/arch/arm/mach-pxa/cm-x300.c
767     @@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
768     cm_x300_init_ac97();
769     cm_x300_init_wi2wi();
770     cm_x300_init_bl();
771     +
772     + regulator_has_full_constraints();
773     }
774    
775     static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
776     diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
777     index 5f9d9303b346..3503826333c7 100644
778     --- a/arch/arm/mach-pxa/colibri-pxa270.c
779     +++ b/arch/arm/mach-pxa/colibri-pxa270.c
780     @@ -18,6 +18,7 @@
781     #include <linux/mtd/partitions.h>
782     #include <linux/mtd/physmap.h>
783     #include <linux/platform_device.h>
784     +#include <linux/regulator/machine.h>
785     #include <linux/ucb1400.h>
786    
787     #include <asm/mach/arch.h>
788     @@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
789     printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
790     colibri_pxa270_baseboard);
791     }
792     +
793     + regulator_has_full_constraints();
794     }
795    
796     /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
797     diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
798     index 51531ecffca8..9d7072b04045 100644
799     --- a/arch/arm/mach-pxa/em-x270.c
800     +++ b/arch/arm/mach-pxa/em-x270.c
801     @@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
802     em_x270_init_i2c();
803     em_x270_init_camera();
804     em_x270_userspace_consumers_init();
805     +
806     + regulator_has_full_constraints();
807     }
808    
809     MACHINE_START(EM_X270, "Compulab EM-X270")
810     diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
811     index c98511c5abd1..9b0eb0252af6 100644
812     --- a/arch/arm/mach-pxa/icontrol.c
813     +++ b/arch/arm/mach-pxa/icontrol.c
814     @@ -26,6 +26,7 @@
815     #include <linux/spi/spi.h>
816     #include <linux/spi/pxa2xx_spi.h>
817     #include <linux/can/platform/mcp251x.h>
818     +#include <linux/regulator/machine.h>
819    
820     #include "generic.h"
821    
822     @@ -185,6 +186,8 @@ static void __init icontrol_init(void)
823     mxm_8x10_mmc_init();
824    
825     icontrol_can_init();
826     +
827     + regulator_has_full_constraints();
828     }
829    
830     MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
831     diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
832     index 872dcb20e757..066e3a250ee0 100644
833     --- a/arch/arm/mach-pxa/trizeps4.c
834     +++ b/arch/arm/mach-pxa/trizeps4.c
835     @@ -26,6 +26,7 @@
836     #include <linux/dm9000.h>
837     #include <linux/mtd/physmap.h>
838     #include <linux/mtd/partitions.h>
839     +#include <linux/regulator/machine.h>
840     #include <linux/i2c/pxa-i2c.h>
841    
842     #include <asm/types.h>
843     @@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
844    
845     BCR_writew(trizeps_conxs_bcr);
846     board_backlight_power(1);
847     +
848     + regulator_has_full_constraints();
849     }
850    
851     static void __init trizeps4_map_io(void)
852     diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
853     index aa89488f961e..54122a983ae3 100644
854     --- a/arch/arm/mach-pxa/vpac270.c
855     +++ b/arch/arm/mach-pxa/vpac270.c
856     @@ -24,6 +24,7 @@
857     #include <linux/dm9000.h>
858     #include <linux/ucb1400.h>
859     #include <linux/ata_platform.h>
860     +#include <linux/regulator/machine.h>
861     #include <linux/regulator/max1586.h>
862     #include <linux/i2c/pxa-i2c.h>
863    
864     @@ -711,6 +712,8 @@ static void __init vpac270_init(void)
865     vpac270_ts_init();
866     vpac270_rtc_init();
867     vpac270_ide_init();
868     +
869     + regulator_has_full_constraints();
870     }
871    
872     MACHINE_START(VPAC270, "Voipac PXA270")
873     diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
874     index ac2ae5c71ab4..6158566fa0f7 100644
875     --- a/arch/arm/mach-pxa/zeus.c
876     +++ b/arch/arm/mach-pxa/zeus.c
877     @@ -868,6 +868,8 @@ static void __init zeus_init(void)
878     i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
879     pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
880     spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
881     +
882     + regulator_has_full_constraints();
883     }
884    
885     static struct map_desc zeus_io_desc[] __initdata = {
886     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
887     index 7e7583ddd607..6e4b9ff22ef3 100644
888     --- a/arch/arm/mm/dma-mapping.c
889     +++ b/arch/arm/mm/dma-mapping.c
890     @@ -1953,7 +1953,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
891     {
892     int next_bitmap;
893    
894     - if (mapping->nr_bitmaps > mapping->extensions)
895     + if (mapping->nr_bitmaps >= mapping->extensions)
896     return -EINVAL;
897    
898     next_bitmap = mapping->nr_bitmaps;
899     diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
900     index ab21e0d58278..352962bc2e78 100644
901     --- a/arch/arm64/kernel/efi.c
902     +++ b/arch/arm64/kernel/efi.c
903     @@ -122,12 +122,12 @@ static int __init uefi_init(void)
904    
905     /* Show what we know for posterity */
906     c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
907     - sizeof(vendor));
908     + sizeof(vendor) * sizeof(efi_char16_t));
909     if (c16) {
910     for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
911     vendor[i] = c16[i];
912     vendor[i] = '\0';
913     - early_memunmap(c16, sizeof(vendor));
914     + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
915     }
916    
917     pr_info("EFI v%u.%.02u by %s\n",
918     diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
919     index 23b1a97fae7a..52c179bec0cc 100644
920     --- a/arch/avr32/mach-at32ap/clock.c
921     +++ b/arch/avr32/mach-at32ap/clock.c
922     @@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
923     {
924     unsigned long flags;
925    
926     + if (!clk)
927     + return 0;
928     +
929     spin_lock_irqsave(&clk_lock, flags);
930     __clk_enable(clk);
931     spin_unlock_irqrestore(&clk_lock, flags);
932     @@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
933     {
934     unsigned long flags;
935    
936     + if (IS_ERR_OR_NULL(clk))
937     + return;
938     +
939     spin_lock_irqsave(&clk_lock, flags);
940     __clk_disable(clk);
941     spin_unlock_irqrestore(&clk_lock, flags);
942     @@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
943     unsigned long flags;
944     unsigned long rate;
945    
946     + if (!clk)
947     + return 0;
948     +
949     spin_lock_irqsave(&clk_lock, flags);
950     rate = clk->get_rate(clk);
951     spin_unlock_irqrestore(&clk_lock, flags);
952     @@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
953     {
954     unsigned long flags, actual_rate;
955    
956     + if (!clk)
957     + return 0;
958     +
959     if (!clk->set_rate)
960     return -ENOSYS;
961    
962     @@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
963     unsigned long flags;
964     long ret;
965    
966     + if (!clk)
967     + return 0;
968     +
969     if (!clk->set_rate)
970     return -ENOSYS;
971    
972     @@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
973     unsigned long flags;
974     int ret;
975    
976     + if (!clk)
977     + return 0;
978     +
979     if (!clk->set_parent)
980     return -ENOSYS;
981    
982     @@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
983    
984     struct clk *clk_get_parent(struct clk *clk)
985     {
986     - return clk->parent;
987     + return !clk ? NULL : clk->parent;
988     }
989     EXPORT_SYMBOL(clk_get_parent);
990    
991     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
992     index f5016656494f..a3b1ffe50aa0 100644
993     --- a/arch/mips/Kconfig
994     +++ b/arch/mips/Kconfig
995     @@ -1417,6 +1417,7 @@ config CPU_MIPS64_R6
996     select CPU_SUPPORTS_HIGHMEM
997     select CPU_SUPPORTS_MSA
998     select GENERIC_CSUM
999     + select MIPS_O32_FP64_SUPPORT if MIPS32_O32
1000     help
1001     Choose this option to build a kernel for release 6 or later of the
1002     MIPS64 architecture. New MIPS processors, starting with the Warrior
1003     diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
1004     index 084780b355aa..1b0625189835 100644
1005     --- a/arch/mips/include/asm/fpu.h
1006     +++ b/arch/mips/include/asm/fpu.h
1007     @@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
1008     goto fr_common;
1009    
1010     case FPU_64BIT:
1011     -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
1012     +#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
1013     || defined(CONFIG_64BIT))
1014     /* we only have a 32-bit FPU */
1015     return SIGFPE;
1016     diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
1017     index 2b25d1ba1ea0..16f1ea9ab191 100644
1018     --- a/arch/mips/include/asm/smp.h
1019     +++ b/arch/mips/include/asm/smp.h
1020     @@ -23,6 +23,7 @@
1021     extern int smp_num_siblings;
1022     extern cpumask_t cpu_sibling_map[];
1023     extern cpumask_t cpu_core_map[];
1024     +extern cpumask_t cpu_foreign_map;
1025    
1026     #define raw_smp_processor_id() (current_thread_info()->cpu)
1027    
1028     diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
1029     index faa46ebd9dda..d0744cc77ea7 100644
1030     --- a/arch/mips/kernel/smp.c
1031     +++ b/arch/mips/kernel/smp.c
1032     @@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
1033     cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
1034     EXPORT_SYMBOL(cpu_core_map);
1035    
1036     +/*
1037     + * A logcal cpu mask containing only one VPE per core to
1038     + * reduce the number of IPIs on large MT systems.
1039     + */
1040     +cpumask_t cpu_foreign_map __read_mostly;
1041     +EXPORT_SYMBOL(cpu_foreign_map);
1042     +
1043     /* representing cpus for which sibling maps can be computed */
1044     static cpumask_t cpu_sibling_setup_map;
1045    
1046     @@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
1047     }
1048     }
1049    
1050     +/*
1051     + * Calculate a new cpu_foreign_map mask whenever a
1052     + * new cpu appears or disappears.
1053     + */
1054     +static inline void calculate_cpu_foreign_map(void)
1055     +{
1056     + int i, k, core_present;
1057     + cpumask_t temp_foreign_map;
1058     +
1059     + /* Re-calculate the mask */
1060     + for_each_online_cpu(i) {
1061     + core_present = 0;
1062     + for_each_cpu(k, &temp_foreign_map)
1063     + if (cpu_data[i].package == cpu_data[k].package &&
1064     + cpu_data[i].core == cpu_data[k].core)
1065     + core_present = 1;
1066     + if (!core_present)
1067     + cpumask_set_cpu(i, &temp_foreign_map);
1068     + }
1069     +
1070     + cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
1071     +}
1072     +
1073     struct plat_smp_ops *mp_ops;
1074     EXPORT_SYMBOL(mp_ops);
1075    
1076     @@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
1077     set_cpu_sibling_map(cpu);
1078     set_cpu_core_map(cpu);
1079    
1080     + calculate_cpu_foreign_map();
1081     +
1082     cpumask_set_cpu(cpu, &cpu_callin_map);
1083    
1084     synchronise_count_slave(cpu);
1085     @@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
1086     static void stop_this_cpu(void *dummy)
1087     {
1088     /*
1089     - * Remove this CPU:
1090     + * Remove this CPU. Be a bit slow here and
1091     + * set the bits for every online CPU so we don't miss
1092     + * any IPI whilst taking this VPE down.
1093     */
1094     +
1095     + cpumask_copy(&cpu_foreign_map, cpu_online_mask);
1096     +
1097     + /* Make it visible to every other CPU */
1098     + smp_mb();
1099     +
1100     set_cpu_online(smp_processor_id(), false);
1101     + calculate_cpu_foreign_map();
1102     local_irq_disable();
1103     while (1);
1104     }
1105     @@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1106     mp_ops->prepare_cpus(max_cpus);
1107     set_cpu_sibling_map(0);
1108     set_cpu_core_map(0);
1109     + calculate_cpu_foreign_map();
1110     #ifndef CONFIG_HOTPLUG_CPU
1111     init_cpu_present(cpu_possible_mask);
1112     #endif
1113     diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
1114     index 22b9b2cb9219..6983fcd48131 100644
1115     --- a/arch/mips/math-emu/cp1emu.c
1116     +++ b/arch/mips/math-emu/cp1emu.c
1117     @@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
1118     /* Fall through */
1119     case jr_op:
1120     /* For R6, JR already emulated in jalr_op */
1121     - if (NO_R6EMU && insn.r_format.opcode == jr_op)
1122     + if (NO_R6EMU && insn.r_format.func == jr_op)
1123     break;
1124     *contpc = regs->regs[insn.r_format.rs];
1125     return 1;
1126     diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
1127     index 2e03ab173591..dca0efc078c1 100644
1128     --- a/arch/mips/mm/c-r4k.c
1129     +++ b/arch/mips/mm/c-r4k.c
1130     @@ -37,6 +37,7 @@
1131     #include <asm/cacheflush.h> /* for run_uncached() */
1132     #include <asm/traps.h>
1133     #include <asm/dma-coherence.h>
1134     +#include <asm/mips-cm.h>
1135    
1136     /*
1137     * Special Variant of smp_call_function for use by cache functions:
1138     @@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
1139     {
1140     preempt_disable();
1141    
1142     -#ifndef CONFIG_MIPS_MT_SMP
1143     - smp_call_function(func, info, 1);
1144     -#endif
1145     + /*
1146     + * The Coherent Manager propagates address-based cache ops to other
1147     + * cores but not index-based ops. However, r4k_on_each_cpu is used
1148     + * in both cases so there is no easy way to tell what kind of op is
1149     + * executed to the other cores. The best we can probably do is
1150     + * to restrict that call when a CM is not present because both
1151     + * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
1152     + */
1153     + if (!mips_cm_present())
1154     + smp_call_function_many(&cpu_foreign_map, func, info, 1);
1155     func(info);
1156     preempt_enable();
1157     }
1158     diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
1159     index 3a08eae3318f..3edbb9fc91b4 100644
1160     --- a/arch/parisc/include/asm/pgalloc.h
1161     +++ b/arch/parisc/include/asm/pgalloc.h
1162     @@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
1163    
1164     static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
1165     {
1166     - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
1167     + if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
1168     /*
1169     * This is the permanent pmd attached to the pgd;
1170     * cannot free it.
1171     @@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
1172     */
1173     mm_inc_nr_pmds(mm);
1174     return;
1175     + }
1176     free_pages((unsigned long)pmd, PMD_ORDER);
1177     }
1178    
1179     diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1180     index 0a183756d6ec..f93c4a4e6580 100644
1181     --- a/arch/parisc/include/asm/pgtable.h
1182     +++ b/arch/parisc/include/asm/pgtable.h
1183     @@ -16,7 +16,7 @@
1184     #include <asm/processor.h>
1185     #include <asm/cache.h>
1186    
1187     -extern spinlock_t pa_dbit_lock;
1188     +extern spinlock_t pa_tlb_lock;
1189    
1190     /*
1191     * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
1192     @@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
1193     */
1194     #define kern_addr_valid(addr) (1)
1195    
1196     +/* Purge data and instruction TLB entries. Must be called holding
1197     + * the pa_tlb_lock. The TLB purge instructions are slow on SMP
1198     + * machines since the purge must be broadcast to all CPUs.
1199     + */
1200     +
1201     +static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
1202     +{
1203     + mtsp(mm->context, 1);
1204     + pdtlb(addr);
1205     + if (unlikely(split_tlb))
1206     + pitlb(addr);
1207     +}
1208     +
1209     /* Certain architectures need to do special things when PTEs
1210     * within a page table are directly modified. Thus, the following
1211     * hook is made available.
1212     @@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
1213     *(pteptr) = (pteval); \
1214     } while(0)
1215    
1216     -extern void purge_tlb_entries(struct mm_struct *, unsigned long);
1217     +#define pte_inserted(x) \
1218     + ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
1219     + == (_PAGE_PRESENT|_PAGE_ACCESSED))
1220    
1221     -#define set_pte_at(mm, addr, ptep, pteval) \
1222     - do { \
1223     +#define set_pte_at(mm, addr, ptep, pteval) \
1224     + do { \
1225     + pte_t old_pte; \
1226     unsigned long flags; \
1227     - spin_lock_irqsave(&pa_dbit_lock, flags); \
1228     - set_pte(ptep, pteval); \
1229     - purge_tlb_entries(mm, addr); \
1230     - spin_unlock_irqrestore(&pa_dbit_lock, flags); \
1231     + spin_lock_irqsave(&pa_tlb_lock, flags); \
1232     + old_pte = *ptep; \
1233     + set_pte(ptep, pteval); \
1234     + if (pte_inserted(old_pte)) \
1235     + purge_tlb_entries(mm, addr); \
1236     + spin_unlock_irqrestore(&pa_tlb_lock, flags); \
1237     } while (0)
1238    
1239     #endif /* !__ASSEMBLY__ */
1240     @@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
1241    
1242     #define pte_none(x) (pte_val(x) == 0)
1243     #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
1244     -#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
1245     +#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
1246    
1247     #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
1248     #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
1249     @@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
1250     if (!pte_young(*ptep))
1251     return 0;
1252    
1253     - spin_lock_irqsave(&pa_dbit_lock, flags);
1254     + spin_lock_irqsave(&pa_tlb_lock, flags);
1255     pte = *ptep;
1256     if (!pte_young(pte)) {
1257     - spin_unlock_irqrestore(&pa_dbit_lock, flags);
1258     + spin_unlock_irqrestore(&pa_tlb_lock, flags);
1259     return 0;
1260     }
1261     set_pte(ptep, pte_mkold(pte));
1262     purge_tlb_entries(vma->vm_mm, addr);
1263     - spin_unlock_irqrestore(&pa_dbit_lock, flags);
1264     + spin_unlock_irqrestore(&pa_tlb_lock, flags);
1265     return 1;
1266     }
1267    
1268     @@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1269     pte_t old_pte;
1270     unsigned long flags;
1271    
1272     - spin_lock_irqsave(&pa_dbit_lock, flags);
1273     + spin_lock_irqsave(&pa_tlb_lock, flags);
1274     old_pte = *ptep;
1275     - pte_clear(mm,addr,ptep);
1276     - purge_tlb_entries(mm, addr);
1277     - spin_unlock_irqrestore(&pa_dbit_lock, flags);
1278     + set_pte(ptep, __pte(0));
1279     + if (pte_inserted(old_pte))
1280     + purge_tlb_entries(mm, addr);
1281     + spin_unlock_irqrestore(&pa_tlb_lock, flags);
1282    
1283     return old_pte;
1284     }
1285     @@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1286     static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1287     {
1288     unsigned long flags;
1289     - spin_lock_irqsave(&pa_dbit_lock, flags);
1290     + spin_lock_irqsave(&pa_tlb_lock, flags);
1291     set_pte(ptep, pte_wrprotect(*ptep));
1292     purge_tlb_entries(mm, addr);
1293     - spin_unlock_irqrestore(&pa_dbit_lock, flags);
1294     + spin_unlock_irqrestore(&pa_tlb_lock, flags);
1295     }
1296    
1297     #define pte_same(A,B) (pte_val(A) == pte_val(B))
1298     diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
1299     index 9d086a599fa0..e84b96478193 100644
1300     --- a/arch/parisc/include/asm/tlbflush.h
1301     +++ b/arch/parisc/include/asm/tlbflush.h
1302     @@ -13,6 +13,9 @@
1303     * active at any one time on the Merced bus. This tlb purge
1304     * synchronisation is fairly lightweight and harmless so we activate
1305     * it on all systems not just the N class.
1306     +
1307     + * It is also used to ensure PTE updates are atomic and consistent
1308     + * with the TLB.
1309     */
1310     extern spinlock_t pa_tlb_lock;
1311    
1312     @@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
1313    
1314     #define smp_flush_tlb_all() flush_tlb_all()
1315    
1316     +int __flush_tlb_range(unsigned long sid,
1317     + unsigned long start, unsigned long end);
1318     +
1319     +#define flush_tlb_range(vma, start, end) \
1320     + __flush_tlb_range((vma)->vm_mm->context, start, end)
1321     +
1322     +#define flush_tlb_kernel_range(start, end) \
1323     + __flush_tlb_range(0, start, end)
1324     +
1325     /*
1326     * flush_tlb_mm()
1327     *
1328     - * XXX This code is NOT valid for HP-UX compatibility processes,
1329     - * (although it will probably work 99% of the time). HP-UX
1330     - * processes are free to play with the space id's and save them
1331     - * over long periods of time, etc. so we have to preserve the
1332     - * space and just flush the entire tlb. We need to check the
1333     - * personality in order to do that, but the personality is not
1334     - * currently being set correctly.
1335     - *
1336     - * Of course, Linux processes could do the same thing, but
1337     - * we don't support that (and the compilers, dynamic linker,
1338     - * etc. do not do that).
1339     + * The code to switch to a new context is NOT valid for processes
1340     + * which play with the space id's. Thus, we have to preserve the
1341     + * space and just flush the entire tlb. However, the compilers,
1342     + * dynamic linker, etc, do not manipulate space id's, so there
1343     + * could be a significant performance benefit in switching contexts
1344     + * and not flushing the whole tlb.
1345     */
1346    
1347     static inline void flush_tlb_mm(struct mm_struct *mm)
1348     @@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
1349     BUG_ON(mm == &init_mm); /* Should never happen */
1350    
1351     #if 1 || defined(CONFIG_SMP)
1352     + /* Except for very small threads, flushing the whole TLB is
1353     + * faster than using __flush_tlb_range. The pdtlb and pitlb
1354     + * instructions are very slow because of the TLB broadcast.
1355     + * It might be faster to do local range flushes on all CPUs
1356     + * on PA 2.0 systems.
1357     + */
1358     flush_tlb_all();
1359     #else
1360     /* FIXME: currently broken, causing space id and protection ids
1361     - * to go out of sync, resulting in faults on userspace accesses.
1362     + * to go out of sync, resulting in faults on userspace accesses.
1363     + * This approach needs further investigation since running many
1364     + * small applications (e.g., GCC testsuite) is faster on HP-UX.
1365     */
1366     if (mm) {
1367     if (mm->context != 0)
1368     @@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
1369     {
1370     unsigned long flags, sid;
1371    
1372     - /* For one page, it's not worth testing the split_tlb variable */
1373     -
1374     - mb();
1375     sid = vma->vm_mm->context;
1376     purge_tlb_start(flags);
1377     mtsp(sid, 1);
1378     pdtlb(addr);
1379     - pitlb(addr);
1380     + if (unlikely(split_tlb))
1381     + pitlb(addr);
1382     purge_tlb_end(flags);
1383     }
1384     -
1385     -void __flush_tlb_range(unsigned long sid,
1386     - unsigned long start, unsigned long end);
1387     -
1388     -#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
1389     -
1390     -#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
1391     -
1392     #endif
1393     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
1394     index f6448c7c62b5..cda6dbbe9842 100644
1395     --- a/arch/parisc/kernel/cache.c
1396     +++ b/arch/parisc/kernel/cache.c
1397     @@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
1398     EXPORT_SYMBOL(flush_kernel_icache_range_asm);
1399    
1400     #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
1401     -int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
1402     +static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
1403     +
1404     +#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
1405     +static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
1406    
1407     void __init parisc_setup_cache_timing(void)
1408     {
1409     unsigned long rangetime, alltime;
1410     - unsigned long size;
1411     + unsigned long size, start;
1412    
1413     alltime = mfctl(16);
1414     flush_data_cache();
1415     @@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
1416     /* Racy, but if we see an intermediate value, it's ok too... */
1417     parisc_cache_flush_threshold = size * alltime / rangetime;
1418    
1419     - parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
1420     + parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
1421     if (!parisc_cache_flush_threshold)
1422     parisc_cache_flush_threshold = FLUSH_THRESHOLD;
1423    
1424     if (parisc_cache_flush_threshold > cache_info.dc_size)
1425     parisc_cache_flush_threshold = cache_info.dc_size;
1426    
1427     - printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
1428     + printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
1429     + parisc_cache_flush_threshold/1024);
1430     +
1431     + /* calculate TLB flush threshold */
1432     +
1433     + alltime = mfctl(16);
1434     + flush_tlb_all();
1435     + alltime = mfctl(16) - alltime;
1436     +
1437     + size = PAGE_SIZE;
1438     + start = (unsigned long) _text;
1439     + rangetime = mfctl(16);
1440     + while (start < (unsigned long) _end) {
1441     + flush_tlb_kernel_range(start, start + PAGE_SIZE);
1442     + start += PAGE_SIZE;
1443     + size += PAGE_SIZE;
1444     + }
1445     + rangetime = mfctl(16) - rangetime;
1446     +
1447     + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
1448     + alltime, size, rangetime);
1449     +
1450     + parisc_tlb_flush_threshold = size * alltime / rangetime;
1451     + parisc_tlb_flush_threshold *= num_online_cpus();
1452     + parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
1453     + if (!parisc_tlb_flush_threshold)
1454     + parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
1455     +
1456     + printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
1457     + parisc_tlb_flush_threshold/1024);
1458     }
1459    
1460     extern void purge_kernel_dcache_page_asm(unsigned long);
1461     @@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
1462     }
1463     EXPORT_SYMBOL(copy_user_page);
1464    
1465     -void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
1466     -{
1467     - unsigned long flags;
1468     -
1469     - /* Note: purge_tlb_entries can be called at startup with
1470     - no context. */
1471     -
1472     - purge_tlb_start(flags);
1473     - mtsp(mm->context, 1);
1474     - pdtlb(addr);
1475     - pitlb(addr);
1476     - purge_tlb_end(flags);
1477     -}
1478     -EXPORT_SYMBOL(purge_tlb_entries);
1479     -
1480     -void __flush_tlb_range(unsigned long sid, unsigned long start,
1481     - unsigned long end)
1482     +/* __flush_tlb_range()
1483     + *
1484     + * returns 1 if all TLBs were flushed.
1485     + */
1486     +int __flush_tlb_range(unsigned long sid, unsigned long start,
1487     + unsigned long end)
1488     {
1489     - unsigned long npages;
1490     + unsigned long flags, size;
1491    
1492     - npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1493     - if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
1494     + size = (end - start);
1495     + if (size >= parisc_tlb_flush_threshold) {
1496     flush_tlb_all();
1497     - else {
1498     - unsigned long flags;
1499     + return 1;
1500     + }
1501    
1502     + /* Purge TLB entries for small ranges using the pdtlb and
1503     + pitlb instructions. These instructions execute locally
1504     + but cause a purge request to be broadcast to other TLBs. */
1505     + if (likely(!split_tlb)) {
1506     + while (start < end) {
1507     + purge_tlb_start(flags);
1508     + mtsp(sid, 1);
1509     + pdtlb(start);
1510     + purge_tlb_end(flags);
1511     + start += PAGE_SIZE;
1512     + }
1513     + return 0;
1514     + }
1515     +
1516     + /* split TLB case */
1517     + while (start < end) {
1518     purge_tlb_start(flags);
1519     mtsp(sid, 1);
1520     - if (split_tlb) {
1521     - while (npages--) {
1522     - pdtlb(start);
1523     - pitlb(start);
1524     - start += PAGE_SIZE;
1525     - }
1526     - } else {
1527     - while (npages--) {
1528     - pdtlb(start);
1529     - start += PAGE_SIZE;
1530     - }
1531     - }
1532     + pdtlb(start);
1533     + pitlb(start);
1534     purge_tlb_end(flags);
1535     + start += PAGE_SIZE;
1536     }
1537     + return 0;
1538     }
1539    
1540     static void cacheflush_h_tmp_function(void *dummy)
1541     diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
1542     index 75819617f93b..c5ef4081b01d 100644
1543     --- a/arch/parisc/kernel/entry.S
1544     +++ b/arch/parisc/kernel/entry.S
1545     @@ -45,7 +45,7 @@
1546     .level 2.0
1547     #endif
1548    
1549     - .import pa_dbit_lock,data
1550     + .import pa_tlb_lock,data
1551    
1552     /* space_to_prot macro creates a prot id from a space id */
1553    
1554     @@ -420,8 +420,8 @@
1555     SHLREG %r9,PxD_VALUE_SHIFT,\pmd
1556     extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
1557     dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1558     - shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
1559     - LDREG %r0(\pmd),\pte /* pmd is now pte */
1560     + shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
1561     + LDREG %r0(\pmd),\pte
1562     bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
1563     .endm
1564    
1565     @@ -453,57 +453,53 @@
1566     L2_ptep \pgd,\pte,\index,\va,\fault
1567     .endm
1568    
1569     - /* Acquire pa_dbit_lock lock. */
1570     - .macro dbit_lock spc,tmp,tmp1
1571     + /* Acquire pa_tlb_lock lock and recheck page is still present. */
1572     + .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
1573     #ifdef CONFIG_SMP
1574     cmpib,COND(=),n 0,\spc,2f
1575     - load32 PA(pa_dbit_lock),\tmp
1576     + load32 PA(pa_tlb_lock),\tmp
1577     1: LDCW 0(\tmp),\tmp1
1578     cmpib,COND(=) 0,\tmp1,1b
1579     nop
1580     + LDREG 0(\ptp),\pte
1581     + bb,<,n \pte,_PAGE_PRESENT_BIT,2f
1582     + b \fault
1583     + stw \spc,0(\tmp)
1584     2:
1585     #endif
1586     .endm
1587    
1588     - /* Release pa_dbit_lock lock without reloading lock address. */
1589     - .macro dbit_unlock0 spc,tmp
1590     + /* Release pa_tlb_lock lock without reloading lock address. */
1591     + .macro tlb_unlock0 spc,tmp
1592     #ifdef CONFIG_SMP
1593     or,COND(=) %r0,\spc,%r0
1594     stw \spc,0(\tmp)
1595     #endif
1596     .endm
1597    
1598     - /* Release pa_dbit_lock lock. */
1599     - .macro dbit_unlock1 spc,tmp
1600     + /* Release pa_tlb_lock lock. */
1601     + .macro tlb_unlock1 spc,tmp
1602     #ifdef CONFIG_SMP
1603     - load32 PA(pa_dbit_lock),\tmp
1604     - dbit_unlock0 \spc,\tmp
1605     + load32 PA(pa_tlb_lock),\tmp
1606     + tlb_unlock0 \spc,\tmp
1607     #endif
1608     .endm
1609    
1610     /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
1611     * don't needlessly dirty the cache line if it was already set */
1612     - .macro update_ptep spc,ptep,pte,tmp,tmp1
1613     -#ifdef CONFIG_SMP
1614     - or,COND(=) %r0,\spc,%r0
1615     - LDREG 0(\ptep),\pte
1616     -#endif
1617     + .macro update_accessed ptp,pte,tmp,tmp1
1618     ldi _PAGE_ACCESSED,\tmp1
1619     or \tmp1,\pte,\tmp
1620     and,COND(<>) \tmp1,\pte,%r0
1621     - STREG \tmp,0(\ptep)
1622     + STREG \tmp,0(\ptp)
1623     .endm
1624    
1625     /* Set the dirty bit (and accessed bit). No need to be
1626     * clever, this is only used from the dirty fault */
1627     - .macro update_dirty spc,ptep,pte,tmp
1628     -#ifdef CONFIG_SMP
1629     - or,COND(=) %r0,\spc,%r0
1630     - LDREG 0(\ptep),\pte
1631     -#endif
1632     + .macro update_dirty ptp,pte,tmp
1633     ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
1634     or \tmp,\pte,\pte
1635     - STREG \pte,0(\ptep)
1636     + STREG \pte,0(\ptp)
1637     .endm
1638    
1639     /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
1640     @@ -1148,14 +1144,14 @@ dtlb_miss_20w:
1641    
1642     L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1643    
1644     - dbit_lock spc,t0,t1
1645     - update_ptep spc,ptp,pte,t0,t1
1646     + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1647     + update_accessed ptp,pte,t0,t1
1648    
1649     make_insert_tlb spc,pte,prot
1650    
1651     idtlbt pte,prot
1652     - dbit_unlock1 spc,t0
1653    
1654     + tlb_unlock1 spc,t0
1655     rfir
1656     nop
1657    
1658     @@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
1659    
1660     L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1661    
1662     - dbit_lock spc,t0,t1
1663     - update_ptep spc,ptp,pte,t0,t1
1664     + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1665     + update_accessed ptp,pte,t0,t1
1666    
1667     make_insert_tlb spc,pte,prot
1668    
1669     idtlbt pte,prot
1670     - dbit_unlock1 spc,t0
1671    
1672     + tlb_unlock1 spc,t0
1673     rfir
1674     nop
1675    
1676     @@ -1202,20 +1198,20 @@ dtlb_miss_11:
1677    
1678     L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1679    
1680     - dbit_lock spc,t0,t1
1681     - update_ptep spc,ptp,pte,t0,t1
1682     + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1683     + update_accessed ptp,pte,t0,t1
1684    
1685     make_insert_tlb_11 spc,pte,prot
1686    
1687     - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1688     + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1689     mtsp spc,%sr1
1690    
1691     idtlba pte,(%sr1,va)
1692     idtlbp prot,(%sr1,va)
1693    
1694     - mtsp t0, %sr1 /* Restore sr1 */
1695     - dbit_unlock1 spc,t0
1696     + mtsp t1, %sr1 /* Restore sr1 */
1697    
1698     + tlb_unlock1 spc,t0
1699     rfir
1700     nop
1701    
1702     @@ -1235,21 +1231,20 @@ nadtlb_miss_11:
1703    
1704     L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1705    
1706     - dbit_lock spc,t0,t1
1707     - update_ptep spc,ptp,pte,t0,t1
1708     + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1709     + update_accessed ptp,pte,t0,t1
1710    
1711     make_insert_tlb_11 spc,pte,prot
1712    
1713     -
1714     - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1715     + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1716     mtsp spc,%sr1
1717    
1718     idtlba pte,(%sr1,va)
1719     idtlbp prot,(%sr1,va)
1720    
1721     - mtsp t0, %sr1 /* Restore sr1 */
1722     - dbit_unlock1 spc,t0
1723     + mtsp t1, %sr1 /* Restore sr1 */
1724    
1725     + tlb_unlock1 spc,t0
1726     rfir
1727     nop
1728    
1729     @@ -1269,16 +1264,16 @@ dtlb_miss_20:
1730    
1731     L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1732    
1733     - dbit_lock spc,t0,t1
1734     - update_ptep spc,ptp,pte,t0,t1
1735     + tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1736     + update_accessed ptp,pte,t0,t1
1737    
1738     make_insert_tlb spc,pte,prot
1739    
1740     - f_extend pte,t0
1741     + f_extend pte,t1
1742    
1743     idtlbt pte,prot
1744     - dbit_unlock1 spc,t0
1745    
1746     + tlb_unlock1 spc,t0
1747     rfir
1748     nop
1749    
1750     @@ -1297,16 +1292,16 @@ nadtlb_miss_20:
1751    
1752     L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1753    
1754     - dbit_lock spc,t0,t1
1755     - update_ptep spc,ptp,pte,t0,t1
1756     + tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1757     + update_accessed ptp,pte,t0,t1
1758    
1759     make_insert_tlb spc,pte,prot
1760    
1761     - f_extend pte,t0
1762     + f_extend pte,t1
1763    
1764     - idtlbt pte,prot
1765     - dbit_unlock1 spc,t0
1766     + idtlbt pte,prot
1767    
1768     + tlb_unlock1 spc,t0
1769     rfir
1770     nop
1771    
1772     @@ -1406,14 +1401,14 @@ itlb_miss_20w:
1773    
1774     L3_ptep ptp,pte,t0,va,itlb_fault
1775    
1776     - dbit_lock spc,t0,t1
1777     - update_ptep spc,ptp,pte,t0,t1
1778     + tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1779     + update_accessed ptp,pte,t0,t1
1780    
1781     make_insert_tlb spc,pte,prot
1782    
1783     iitlbt pte,prot
1784     - dbit_unlock1 spc,t0
1785    
1786     + tlb_unlock1 spc,t0
1787     rfir
1788     nop
1789    
1790     @@ -1430,14 +1425,14 @@ naitlb_miss_20w:
1791    
1792     L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1793    
1794     - dbit_lock spc,t0,t1
1795     - update_ptep spc,ptp,pte,t0,t1
1796     + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1797     + update_accessed ptp,pte,t0,t1
1798    
1799     make_insert_tlb spc,pte,prot
1800    
1801     iitlbt pte,prot
1802     - dbit_unlock1 spc,t0
1803    
1804     + tlb_unlock1 spc,t0
1805     rfir
1806     nop
1807    
1808     @@ -1458,20 +1453,20 @@ itlb_miss_11:
1809    
1810     L2_ptep ptp,pte,t0,va,itlb_fault
1811    
1812     - dbit_lock spc,t0,t1
1813     - update_ptep spc,ptp,pte,t0,t1
1814     + tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1815     + update_accessed ptp,pte,t0,t1
1816    
1817     make_insert_tlb_11 spc,pte,prot
1818    
1819     - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1820     + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1821     mtsp spc,%sr1
1822    
1823     iitlba pte,(%sr1,va)
1824     iitlbp prot,(%sr1,va)
1825    
1826     - mtsp t0, %sr1 /* Restore sr1 */
1827     - dbit_unlock1 spc,t0
1828     + mtsp t1, %sr1 /* Restore sr1 */
1829    
1830     + tlb_unlock1 spc,t0
1831     rfir
1832     nop
1833    
1834     @@ -1482,20 +1477,20 @@ naitlb_miss_11:
1835    
1836     L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1837    
1838     - dbit_lock spc,t0,t1
1839     - update_ptep spc,ptp,pte,t0,t1
1840     + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1841     + update_accessed ptp,pte,t0,t1
1842    
1843     make_insert_tlb_11 spc,pte,prot
1844    
1845     - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1846     + mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1847     mtsp spc,%sr1
1848    
1849     iitlba pte,(%sr1,va)
1850     iitlbp prot,(%sr1,va)
1851    
1852     - mtsp t0, %sr1 /* Restore sr1 */
1853     - dbit_unlock1 spc,t0
1854     + mtsp t1, %sr1 /* Restore sr1 */
1855    
1856     + tlb_unlock1 spc,t0
1857     rfir
1858     nop
1859    
1860     @@ -1516,16 +1511,16 @@ itlb_miss_20:
1861    
1862     L2_ptep ptp,pte,t0,va,itlb_fault
1863    
1864     - dbit_lock spc,t0,t1
1865     - update_ptep spc,ptp,pte,t0,t1
1866     + tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1867     + update_accessed ptp,pte,t0,t1
1868    
1869     make_insert_tlb spc,pte,prot
1870    
1871     - f_extend pte,t0
1872     + f_extend pte,t1
1873    
1874     iitlbt pte,prot
1875     - dbit_unlock1 spc,t0
1876    
1877     + tlb_unlock1 spc,t0
1878     rfir
1879     nop
1880    
1881     @@ -1536,16 +1531,16 @@ naitlb_miss_20:
1882    
1883     L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1884    
1885     - dbit_lock spc,t0,t1
1886     - update_ptep spc,ptp,pte,t0,t1
1887     + tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1888     + update_accessed ptp,pte,t0,t1
1889    
1890     make_insert_tlb spc,pte,prot
1891    
1892     - f_extend pte,t0
1893     + f_extend pte,t1
1894    
1895     iitlbt pte,prot
1896     - dbit_unlock1 spc,t0
1897    
1898     + tlb_unlock1 spc,t0
1899     rfir
1900     nop
1901    
1902     @@ -1568,14 +1563,14 @@ dbit_trap_20w:
1903    
1904     L3_ptep ptp,pte,t0,va,dbit_fault
1905    
1906     - dbit_lock spc,t0,t1
1907     - update_dirty spc,ptp,pte,t1
1908     + tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1909     + update_dirty ptp,pte,t1
1910    
1911     make_insert_tlb spc,pte,prot
1912    
1913     idtlbt pte,prot
1914     - dbit_unlock0 spc,t0
1915    
1916     + tlb_unlock0 spc,t0
1917     rfir
1918     nop
1919     #else
1920     @@ -1588,8 +1583,8 @@ dbit_trap_11:
1921    
1922     L2_ptep ptp,pte,t0,va,dbit_fault
1923    
1924     - dbit_lock spc,t0,t1
1925     - update_dirty spc,ptp,pte,t1
1926     + tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1927     + update_dirty ptp,pte,t1
1928    
1929     make_insert_tlb_11 spc,pte,prot
1930    
1931     @@ -1600,8 +1595,8 @@ dbit_trap_11:
1932     idtlbp prot,(%sr1,va)
1933    
1934     mtsp t1, %sr1 /* Restore sr1 */
1935     - dbit_unlock0 spc,t0
1936    
1937     + tlb_unlock0 spc,t0
1938     rfir
1939     nop
1940    
1941     @@ -1612,16 +1607,16 @@ dbit_trap_20:
1942    
1943     L2_ptep ptp,pte,t0,va,dbit_fault
1944    
1945     - dbit_lock spc,t0,t1
1946     - update_dirty spc,ptp,pte,t1
1947     + tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1948     + update_dirty ptp,pte,t1
1949    
1950     make_insert_tlb spc,pte,prot
1951    
1952     f_extend pte,t1
1953    
1954     - idtlbt pte,prot
1955     - dbit_unlock0 spc,t0
1956     + idtlbt pte,prot
1957    
1958     + tlb_unlock0 spc,t0
1959     rfir
1960     nop
1961     #endif
1962     diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1963     index 47ee620d15d2..7f67c4c96a7a 100644
1964     --- a/arch/parisc/kernel/traps.c
1965     +++ b/arch/parisc/kernel/traps.c
1966     @@ -43,10 +43,6 @@
1967    
1968     #include "../math-emu/math-emu.h" /* for handle_fpe() */
1969    
1970     -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
1971     -DEFINE_SPINLOCK(pa_dbit_lock);
1972     -#endif
1973     -
1974     static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
1975     struct pt_regs *regs);
1976    
1977     diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
1978     index ccde8f084ce4..112ccf497562 100644
1979     --- a/arch/powerpc/kernel/idle_power7.S
1980     +++ b/arch/powerpc/kernel/idle_power7.S
1981     @@ -52,6 +52,22 @@
1982     .text
1983    
1984     /*
1985     + * Used by threads when the lock bit of core_idle_state is set.
1986     + * Threads will spin in HMT_LOW until the lock bit is cleared.
1987     + * r14 - pointer to core_idle_state
1988     + * r15 - used to load contents of core_idle_state
1989     + */
1990     +
1991     +core_idle_lock_held:
1992     + HMT_LOW
1993     +3: lwz r15,0(r14)
1994     + andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
1995     + bne 3b
1996     + HMT_MEDIUM
1997     + lwarx r15,0,r14
1998     + blr
1999     +
2000     +/*
2001     * Pass requested state in r3:
2002     * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
2003     *
2004     @@ -150,6 +166,10 @@ power7_enter_nap_mode:
2005     ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
2006     lwarx_loop1:
2007     lwarx r15,0,r14
2008     +
2009     + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
2010     + bnel core_idle_lock_held
2011     +
2012     andc r15,r15,r7 /* Clear thread bit */
2013    
2014     andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
2015     @@ -294,7 +314,7 @@ lwarx_loop2:
2016     * workaround undo code or resyncing timebase or restoring context
2017     * In either case loop until the lock bit is cleared.
2018     */
2019     - bne core_idle_lock_held
2020     + bnel core_idle_lock_held
2021    
2022     cmpwi cr2,r15,0
2023     lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
2024     @@ -319,15 +339,6 @@ lwarx_loop2:
2025     isync
2026     b common_exit
2027    
2028     -core_idle_lock_held:
2029     - HMT_LOW
2030     -core_idle_lock_loop:
2031     - lwz r15,0(14)
2032     - andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
2033     - bne core_idle_lock_loop
2034     - HMT_MEDIUM
2035     - b lwarx_loop2
2036     -
2037     first_thread_in_subcore:
2038     /* First thread in subcore to wakeup */
2039     ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
2040     diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
2041     index cfad7fca01d6..d7697ab802f6 100644
2042     --- a/arch/s390/include/asm/ctl_reg.h
2043     +++ b/arch/s390/include/asm/ctl_reg.h
2044     @@ -57,7 +57,10 @@ union ctlreg0 {
2045     unsigned long lap : 1; /* Low-address-protection control */
2046     unsigned long : 4;
2047     unsigned long edat : 1; /* Enhanced-DAT-enablement control */
2048     - unsigned long : 23;
2049     + unsigned long : 4;
2050     + unsigned long afp : 1; /* AFP-register control */
2051     + unsigned long vx : 1; /* Vector enablement control */
2052     + unsigned long : 17;
2053     };
2054     };
2055    
2056     diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
2057     index bff5e3b6d822..8ba32436effe 100644
2058     --- a/arch/s390/kernel/cache.c
2059     +++ b/arch/s390/kernel/cache.c
2060     @@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
2061     union cache_topology ct;
2062     enum cache_type ctype;
2063    
2064     + if (!test_facility(34))
2065     + return -EOPNOTSUPP;
2066     if (!this_cpu_ci)
2067     return -EINVAL;
2068     ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
2069     diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
2070     index 505c17c0ae1a..56b550893593 100644
2071     --- a/arch/s390/kernel/nmi.c
2072     +++ b/arch/s390/kernel/nmi.c
2073     @@ -21,6 +21,7 @@
2074     #include <asm/nmi.h>
2075     #include <asm/crw.h>
2076     #include <asm/switch_to.h>
2077     +#include <asm/ctl_reg.h>
2078    
2079     struct mcck_struct {
2080     int kill_task;
2081     @@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
2082     } else
2083     asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
2084    
2085     - asm volatile(
2086     - " ld 0,0(%0)\n"
2087     - " ld 1,8(%0)\n"
2088     - " ld 2,16(%0)\n"
2089     - " ld 3,24(%0)\n"
2090     - " ld 4,32(%0)\n"
2091     - " ld 5,40(%0)\n"
2092     - " ld 6,48(%0)\n"
2093     - " ld 7,56(%0)\n"
2094     - " ld 8,64(%0)\n"
2095     - " ld 9,72(%0)\n"
2096     - " ld 10,80(%0)\n"
2097     - " ld 11,88(%0)\n"
2098     - " ld 12,96(%0)\n"
2099     - " ld 13,104(%0)\n"
2100     - " ld 14,112(%0)\n"
2101     - " ld 15,120(%0)\n"
2102     - : : "a" (fpt_save_area));
2103     - /* Revalidate vector registers */
2104     - if (MACHINE_HAS_VX && current->thread.vxrs) {
2105     + if (!MACHINE_HAS_VX) {
2106     + /* Revalidate floating point registers */
2107     + asm volatile(
2108     + " ld 0,0(%0)\n"
2109     + " ld 1,8(%0)\n"
2110     + " ld 2,16(%0)\n"
2111     + " ld 3,24(%0)\n"
2112     + " ld 4,32(%0)\n"
2113     + " ld 5,40(%0)\n"
2114     + " ld 6,48(%0)\n"
2115     + " ld 7,56(%0)\n"
2116     + " ld 8,64(%0)\n"
2117     + " ld 9,72(%0)\n"
2118     + " ld 10,80(%0)\n"
2119     + " ld 11,88(%0)\n"
2120     + " ld 12,96(%0)\n"
2121     + " ld 13,104(%0)\n"
2122     + " ld 14,112(%0)\n"
2123     + " ld 15,120(%0)\n"
2124     + : : "a" (fpt_save_area));
2125     + } else {
2126     + /* Revalidate vector registers */
2127     + union ctlreg0 cr0;
2128     +
2129     if (!mci->vr) {
2130     /*
2131     * Vector registers can't be restored and therefore
2132     @@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
2133     */
2134     kill_task = 1;
2135     }
2136     + cr0.val = S390_lowcore.cregs_save_area[0];
2137     + cr0.afp = cr0.vx = 1;
2138     + __ctl_load(cr0.val, 0, 0);
2139     restore_vx_regs((__vector128 *)
2140     - S390_lowcore.vector_save_area_addr);
2141     + &S390_lowcore.vector_save_area);
2142     + __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
2143     }
2144     /* Revalidate access registers */
2145     asm volatile(
2146     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
2147     index dc5edc29b73a..8f587d871b9f 100644
2148     --- a/arch/s390/kernel/process.c
2149     +++ b/arch/s390/kernel/process.c
2150     @@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
2151     asmlinkage void execve_tail(void)
2152     {
2153     current->thread.fp_regs.fpc = 0;
2154     - asm volatile("sfpc %0,%0" : : "d" (0));
2155     + asm volatile("sfpc %0" : : "d" (0));
2156     }
2157    
2158     /*
2159     diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
2160     index 43c3169ea49c..ada0c07fe1a8 100644
2161     --- a/arch/s390/kernel/sclp.S
2162     +++ b/arch/s390/kernel/sclp.S
2163     @@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
2164     jno .Lesa2
2165     ahi %r15,-80
2166     stmh %r6,%r15,96(%r15) # store upper register halves
2167     + basr %r13,0
2168     + lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
2169     .Lesa2:
2170     lr %r10,%r2 # save string pointer
2171     lhi %r2,0
2172     @@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
2173     .Lesa3:
2174     lm %r6,%r15,120(%r15) # restore registers
2175     br %r14
2176     +.Lzeroes:
2177     + .fill 64,4,0
2178    
2179     .LwritedataS4:
2180     .long 0x00760005 # SCLP command for write data
2181     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
2182     index 9afb9d602f84..dc2d7aa56440 100644
2183     --- a/arch/s390/net/bpf_jit_comp.c
2184     +++ b/arch/s390/net/bpf_jit_comp.c
2185     @@ -415,13 +415,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
2186     EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
2187     BPF_REG_1, offsetof(struct sk_buff, data));
2188     }
2189     - /* BPF compatibility: clear A (%b7) and X (%b8) registers */
2190     - if (REG_SEEN(BPF_REG_7))
2191     - /* lghi %b7,0 */
2192     - EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
2193     - if (REG_SEEN(BPF_REG_8))
2194     - /* lghi %b8,0 */
2195     - EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
2196     + /* BPF compatibility: clear A (%b0) and X (%b7) registers */
2197     + if (REG_SEEN(BPF_REG_A))
2198     + /* lghi %ba,0 */
2199     + EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
2200     + if (REG_SEEN(BPF_REG_X))
2201     + /* lghi %bx,0 */
2202     + EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
2203     }
2204    
2205     /*
2206     diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
2207     index d366675e4bf8..396b5c96e272 100644
2208     --- a/arch/tile/kernel/setup.c
2209     +++ b/arch/tile/kernel/setup.c
2210     @@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
2211    
2212     void __init free_initrd_mem(unsigned long begin, unsigned long end)
2213     {
2214     - free_bootmem(__pa(begin), end - begin);
2215     + free_bootmem_late(__pa(begin), end - begin);
2216     }
2217    
2218     static int __init setup_initrd(char *str)
2219     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
2220     index 48304b89b601..0cdc154a22b5 100644
2221     --- a/arch/x86/boot/compressed/eboot.c
2222     +++ b/arch/x86/boot/compressed/eboot.c
2223     @@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
2224     unsigned int e820_type = 0;
2225     unsigned long m = efi->efi_memmap;
2226    
2227     +#ifdef CONFIG_X86_64
2228     + m |= (u64)efi->efi_memmap_hi << 32;
2229     +#endif
2230     +
2231     d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
2232     switch (d->type) {
2233     case EFI_RESERVED_TYPE:
2234     diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
2235     index 8b22422fbad8..74a2a8dc9908 100644
2236     --- a/arch/x86/include/asm/kasan.h
2237     +++ b/arch/x86/include/asm/kasan.h
2238     @@ -14,15 +14,11 @@
2239    
2240     #ifndef __ASSEMBLY__
2241    
2242     -extern pte_t kasan_zero_pte[];
2243     -extern pte_t kasan_zero_pmd[];
2244     -extern pte_t kasan_zero_pud[];
2245     -
2246     #ifdef CONFIG_KASAN
2247     -void __init kasan_map_early_shadow(pgd_t *pgd);
2248     +void __init kasan_early_init(void);
2249     void __init kasan_init(void);
2250     #else
2251     -static inline void kasan_map_early_shadow(pgd_t *pgd) { }
2252     +static inline void kasan_early_init(void) { }
2253     static inline void kasan_init(void) { }
2254     #endif
2255    
2256     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
2257     index 883f6b933fa4..e997f70f80c4 100644
2258     --- a/arch/x86/include/asm/mmu_context.h
2259     +++ b/arch/x86/include/asm/mmu_context.h
2260     @@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
2261    
2262     static inline void load_mm_cr4(struct mm_struct *mm)
2263     {
2264     - if (static_key_true(&rdpmc_always_available) ||
2265     + if (static_key_false(&rdpmc_always_available) ||
2266     atomic_read(&mm->context.perf_rdpmc_allowed))
2267     cr4_set_bits(X86_CR4_PCE);
2268     else
2269     diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
2270     index e4d1b8b738fa..cb77b11bc414 100644
2271     --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
2272     +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
2273     @@ -934,6 +934,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
2274     return 0;
2275    
2276     /*
2277     + * Getting up-to-date values requires an SMP IPI which is not
2278     + * possible if we're being called in interrupt context. Return
2279     + * the cached values instead.
2280     + */
2281     + if (unlikely(in_interrupt()))
2282     + goto out;
2283     +
2284     + /*
2285     * Notice that we don't perform the reading of an RMID
2286     * atomically, because we can't hold a spin lock across the
2287     * IPIs.
2288     diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
2289     index 5a4668136e98..f129a9af6357 100644
2290     --- a/arch/x86/kernel/head64.c
2291     +++ b/arch/x86/kernel/head64.c
2292     @@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
2293     /* Kill off the identity-map trampoline */
2294     reset_early_page_tables();
2295    
2296     - kasan_map_early_shadow(early_level4_pgt);
2297     -
2298     - /* clear bss before set_intr_gate with early_idt_handler */
2299     clear_bss();
2300    
2301     + clear_page(init_level4_pgt);
2302     +
2303     + kasan_early_init();
2304     +
2305     for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
2306     set_intr_gate(i, early_idt_handler_array[i]);
2307     load_idt((const struct desc_ptr *)&idt_descr);
2308     @@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
2309     */
2310     load_ucode_bsp();
2311    
2312     - clear_page(init_level4_pgt);
2313     /* set init_level4_pgt kernel high mapping*/
2314     init_level4_pgt[511] = early_level4_pgt[511];
2315    
2316     - kasan_map_early_shadow(init_level4_pgt);
2317     -
2318     x86_64_start_reservations(real_mode_data);
2319     }
2320    
2321     diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
2322     index df7e78057ae0..7e5da2cbe59e 100644
2323     --- a/arch/x86/kernel/head_64.S
2324     +++ b/arch/x86/kernel/head_64.S
2325     @@ -516,38 +516,9 @@ ENTRY(phys_base)
2326     /* This must match the first entry in level2_kernel_pgt */
2327     .quad 0x0000000000000000
2328    
2329     -#ifdef CONFIG_KASAN
2330     -#define FILL(VAL, COUNT) \
2331     - .rept (COUNT) ; \
2332     - .quad (VAL) ; \
2333     - .endr
2334     -
2335     -NEXT_PAGE(kasan_zero_pte)
2336     - FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
2337     -NEXT_PAGE(kasan_zero_pmd)
2338     - FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
2339     -NEXT_PAGE(kasan_zero_pud)
2340     - FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
2341     -
2342     -#undef FILL
2343     -#endif
2344     -
2345     -
2346     #include "../../x86/xen/xen-head.S"
2347    
2348     __PAGE_ALIGNED_BSS
2349     NEXT_PAGE(empty_zero_page)
2350     .skip PAGE_SIZE
2351    
2352     -#ifdef CONFIG_KASAN
2353     -/*
2354     - * This page used as early shadow. We don't use empty_zero_page
2355     - * at early stages, stack instrumentation could write some garbage
2356     - * to this page.
2357     - * Latter we reuse it as zero shadow for large ranges of memory
2358     - * that allowed to access, but not instrumented by kasan
2359     - * (vmalloc/vmemmap ...).
2360     - */
2361     -NEXT_PAGE(kasan_zero_page)
2362     - .skip PAGE_SIZE
2363     -#endif
2364     diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
2365     index 4860906c6b9f..9a54dbe98064 100644
2366     --- a/arch/x86/mm/kasan_init_64.c
2367     +++ b/arch/x86/mm/kasan_init_64.c
2368     @@ -11,7 +11,19 @@
2369     extern pgd_t early_level4_pgt[PTRS_PER_PGD];
2370     extern struct range pfn_mapped[E820_X_MAX];
2371    
2372     -extern unsigned char kasan_zero_page[PAGE_SIZE];
2373     +static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
2374     +static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
2375     +static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
2376     +
2377     +/*
2378     + * This page used as early shadow. We don't use empty_zero_page
2379     + * at early stages, stack instrumentation could write some garbage
2380     + * to this page.
2381     + * Latter we reuse it as zero shadow for large ranges of memory
2382     + * that allowed to access, but not instrumented by kasan
2383     + * (vmalloc/vmemmap ...).
2384     + */
2385     +static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
2386    
2387     static int __init map_range(struct range *range)
2388     {
2389     @@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
2390     pgd_clear(pgd_offset_k(start));
2391     }
2392    
2393     -void __init kasan_map_early_shadow(pgd_t *pgd)
2394     +static void __init kasan_map_early_shadow(pgd_t *pgd)
2395     {
2396     int i;
2397     unsigned long start = KASAN_SHADOW_START;
2398     @@ -73,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
2399     while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
2400     WARN_ON(!pmd_none(*pmd));
2401     set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
2402     - | __PAGE_KERNEL_RO));
2403     + | _KERNPG_TABLE));
2404     addr += PMD_SIZE;
2405     pmd = pmd_offset(pud, addr);
2406     }
2407     @@ -99,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
2408     while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
2409     WARN_ON(!pud_none(*pud));
2410     set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
2411     - | __PAGE_KERNEL_RO));
2412     + | _KERNPG_TABLE));
2413     addr += PUD_SIZE;
2414     pud = pud_offset(pgd, addr);
2415     }
2416     @@ -124,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
2417     while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
2418     WARN_ON(!pgd_none(*pgd));
2419     set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
2420     - | __PAGE_KERNEL_RO));
2421     + | _KERNPG_TABLE));
2422     addr += PGDIR_SIZE;
2423     pgd = pgd_offset_k(addr);
2424     }
2425     @@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
2426     };
2427     #endif
2428    
2429     +void __init kasan_early_init(void)
2430     +{
2431     + int i;
2432     + pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
2433     + pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
2434     + pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
2435     +
2436     + for (i = 0; i < PTRS_PER_PTE; i++)
2437     + kasan_zero_pte[i] = __pte(pte_val);
2438     +
2439     + for (i = 0; i < PTRS_PER_PMD; i++)
2440     + kasan_zero_pmd[i] = __pmd(pmd_val);
2441     +
2442     + for (i = 0; i < PTRS_PER_PUD; i++)
2443     + kasan_zero_pud[i] = __pud(pud_val);
2444     +
2445     + kasan_map_early_shadow(early_level4_pgt);
2446     + kasan_map_early_shadow(init_level4_pgt);
2447     +}
2448     +
2449     void __init kasan_init(void)
2450     {
2451     int i;
2452     @@ -176,6 +208,7 @@ void __init kasan_init(void)
2453    
2454     memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
2455     load_cr3(early_level4_pgt);
2456     + __flush_tlb_all();
2457    
2458     clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
2459    
2460     @@ -202,5 +235,6 @@ void __init kasan_init(void)
2461     memset(kasan_zero_page, 0, PAGE_SIZE);
2462    
2463     load_cr3(init_level4_pgt);
2464     + __flush_tlb_all();
2465     init_task.kasan_depth = 0;
2466     }
2467     diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
2468     index 3250f2371aea..90b924acd982 100644
2469     --- a/arch/x86/mm/tlb.c
2470     +++ b/arch/x86/mm/tlb.c
2471     @@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
2472     } else {
2473     unsigned long addr;
2474     unsigned long nr_pages =
2475     - f->flush_end - f->flush_start / PAGE_SIZE;
2476     + (f->flush_end - f->flush_start) / PAGE_SIZE;
2477     addr = f->flush_start;
2478     while (addr < f->flush_end) {
2479     __flush_tlb_single(addr);
2480     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
2481     index 02744df576d5..841ea05e1b02 100644
2482     --- a/arch/x86/platform/efi/efi.c
2483     +++ b/arch/x86/platform/efi/efi.c
2484     @@ -946,6 +946,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
2485    
2486     static int __init arch_parse_efi_cmdline(char *str)
2487     {
2488     + if (!str) {
2489     + pr_warn("need at least one option\n");
2490     + return -EINVAL;
2491     + }
2492     +
2493     if (parse_option_str(str, "old_map"))
2494     set_bit(EFI_OLD_MEMMAP, &efi.flags);
2495     if (parse_option_str(str, "debug"))
2496     diff --git a/block/bio-integrity.c b/block/bio-integrity.c
2497     index 5cbd5d9ea61d..39ce74d10e2b 100644
2498     --- a/block/bio-integrity.c
2499     +++ b/block/bio-integrity.c
2500     @@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
2501     unsigned long idx = BIO_POOL_NONE;
2502     unsigned inline_vecs;
2503    
2504     - if (!bs) {
2505     + if (!bs || !bs->bio_integrity_pool) {
2506     bip = kmalloc(sizeof(struct bio_integrity_payload) +
2507     sizeof(struct bio_vec) * nr_vecs, gfp_mask);
2508     inline_vecs = nr_vecs;
2509     @@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
2510     kfree(page_address(bip->bip_vec->bv_page) +
2511     bip->bip_vec->bv_offset);
2512    
2513     - if (bs) {
2514     + if (bs && bs->bio_integrity_pool) {
2515     if (bip->bip_slab != BIO_POOL_NONE)
2516     bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
2517     bip->bip_slab);
2518     diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
2519     index 0ac817b750db..6817e28960b7 100644
2520     --- a/block/blk-cgroup.c
2521     +++ b/block/blk-cgroup.c
2522     @@ -716,8 +716,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
2523     return -EINVAL;
2524    
2525     disk = get_gendisk(MKDEV(major, minor), &part);
2526     - if (!disk || part)
2527     + if (!disk)
2528     return -EINVAL;
2529     + if (part) {
2530     + put_disk(disk);
2531     + return -EINVAL;
2532     + }
2533    
2534     rcu_read_lock();
2535     spin_lock_irq(disk->queue->queue_lock);
2536     diff --git a/block/blk-mq.c b/block/blk-mq.c
2537     index 594eea04266e..2dc1fd6c5bdb 100644
2538     --- a/block/blk-mq.c
2539     +++ b/block/blk-mq.c
2540     @@ -1968,7 +1968,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2541     goto err_hctxs;
2542    
2543     setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
2544     - blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
2545     + blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2546    
2547     q->nr_queues = nr_cpu_ids;
2548     q->nr_hw_queues = set->nr_hw_queues;
2549     diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
2550     index 7ccc084bf1df..85aa76116a30 100644
2551     --- a/drivers/ata/libata-pmp.c
2552     +++ b/drivers/ata/libata-pmp.c
2553     @@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
2554     ATA_LFLAG_NO_SRST |
2555     ATA_LFLAG_ASSUME_ATA;
2556     }
2557     + } else if (vendor == 0x11ab && devid == 0x4140) {
2558     + /* Marvell 4140 quirks */
2559     + ata_for_each_link(link, ap, EDGE) {
2560     + /* port 4 is for SEMB device and it doesn't like SRST */
2561     + if (link->pmp == 4)
2562     + link->flags |= ATA_LFLAG_DISABLED;
2563     + }
2564     }
2565     }
2566    
2567     diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
2568     index bf12a25eb3a2..0f8db28353c5 100644
2569     --- a/drivers/clk/st/clk-flexgen.c
2570     +++ b/drivers/clk/st/clk-flexgen.c
2571     @@ -303,6 +303,8 @@ void __init st_of_flexgen_setup(struct device_node *np)
2572     if (!rlock)
2573     goto err;
2574    
2575     + spin_lock_init(rlock);
2576     +
2577     for (i = 0; i < clk_data->clk_num; i++) {
2578     struct clk *clk;
2579     const char *clk_name;
2580     diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
2581     index a917c4c7eaa9..6ae068ab07c8 100644
2582     --- a/drivers/clk/st/clkgen-fsyn.c
2583     +++ b/drivers/clk/st/clkgen-fsyn.c
2584     @@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
2585     CLKGEN_FIELD(0x30c, 0xf, 20),
2586     CLKGEN_FIELD(0x310, 0xf, 20) },
2587     .lockstatus_present = true,
2588     - .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
2589     + .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
2590     .powerup_polarity = 1,
2591     .standby_polarity = 1,
2592     .pll_ops = &st_quadfs_pll_c32_ops,
2593     diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
2594     index fdcff10f6d30..ef6514636bfc 100644
2595     --- a/drivers/clk/st/clkgen-mux.c
2596     +++ b/drivers/clk/st/clkgen-mux.c
2597     @@ -582,7 +582,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
2598     };
2599     static struct clkgen_mux_data stih407_a9_mux_data = {
2600     .offset = 0x1a4,
2601     - .shift = 1,
2602     + .shift = 0,
2603     .width = 2,
2604     };
2605    
2606     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2607     index c45d274a75c8..6f9d27f9001c 100644
2608     --- a/drivers/cpufreq/intel_pstate.c
2609     +++ b/drivers/cpufreq/intel_pstate.c
2610     @@ -678,6 +678,7 @@ static struct cpu_defaults knl_params = {
2611     .get_max = core_get_max_pstate,
2612     .get_min = core_get_min_pstate,
2613     .get_turbo = knl_get_turbo_pstate,
2614     + .get_scaling = core_get_scaling,
2615     .set = core_set_pstate,
2616     },
2617     };
2618     diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
2619     index 46307098f8ba..0a70e46d5416 100644
2620     --- a/drivers/crypto/omap-des.c
2621     +++ b/drivers/crypto/omap-des.c
2622     @@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
2623     dmaengine_terminate_all(dd->dma_lch_in);
2624     dmaengine_terminate_all(dd->dma_lch_out);
2625    
2626     - dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
2627     - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
2628     -
2629     return err;
2630     }
2631    
2632     diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
2633     index 4fd9961d552e..d42537425438 100644
2634     --- a/drivers/firmware/efi/cper.c
2635     +++ b/drivers/firmware/efi/cper.c
2636     @@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
2637     return ret;
2638     }
2639    
2640     -static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
2641     +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
2642     + int len)
2643     {
2644     struct cper_mem_err_compact cmem;
2645    
2646     + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
2647     + if (len == sizeof(struct cper_sec_mem_err_old) &&
2648     + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
2649     + pr_err(FW_WARN "valid bits set for fields beyond structure\n");
2650     + return;
2651     + }
2652     if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
2653     printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
2654     if (mem->validation_bits & CPER_MEM_VALID_PA)
2655     @@ -405,8 +412,10 @@ static void cper_estatus_print_section(
2656     } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
2657     struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
2658     printk("%s""section_type: memory error\n", newpfx);
2659     - if (gdata->error_data_length >= sizeof(*mem_err))
2660     - cper_print_mem(newpfx, mem_err);
2661     + if (gdata->error_data_length >=
2662     + sizeof(struct cper_sec_mem_err_old))
2663     + cper_print_mem(newpfx, mem_err,
2664     + gdata->error_data_length);
2665     else
2666     goto err_section_too_small;
2667     } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
2668     diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
2669     index e14363d12690..63226e9036a1 100644
2670     --- a/drivers/firmware/efi/efi.c
2671     +++ b/drivers/firmware/efi/efi.c
2672     @@ -57,6 +57,11 @@ bool efi_runtime_disabled(void)
2673    
2674     static int __init parse_efi_cmdline(char *str)
2675     {
2676     + if (!str) {
2677     + pr_warn("need at least one option\n");
2678     + return -EINVAL;
2679     + }
2680     +
2681     if (parse_option_str(str, "noruntime"))
2682     disable_runtime = true;
2683    
2684     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
2685     index 89049335b738..cd6dae08175e 100644
2686     --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
2687     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
2688     @@ -863,8 +863,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
2689    
2690     pm_runtime_get_sync(dev->dev);
2691    
2692     + mutex_lock(&cli->mutex);
2693     if (cli->abi16)
2694     nouveau_abi16_fini(cli->abi16);
2695     + mutex_unlock(&cli->mutex);
2696    
2697     mutex_lock(&drm->client.mutex);
2698     list_del(&cli->head);
2699     diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
2700     index 4ef602c5469d..495c57644ced 100644
2701     --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
2702     +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
2703     @@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
2704     if (ret)
2705     return ret;
2706    
2707     - if (RING_SPACE(chan, 49)) {
2708     + if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
2709     nouveau_fbcon_gpu_lockup(info);
2710     return 0;
2711     }
2712     diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
2713     index 7da7958556a3..981342d142ff 100644
2714     --- a/drivers/gpu/drm/nouveau/nv50_display.c
2715     +++ b/drivers/gpu/drm/nouveau/nv50_display.c
2716     @@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
2717     {
2718     struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
2719    
2720     - if (show && nv_crtc->cursor.nvbo)
2721     + if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
2722     nv50_crtc_cursor_show(nv_crtc);
2723     else
2724     nv50_crtc_cursor_hide(nv_crtc);
2725     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
2726     index 80614f1b2074..282143f49d72 100644
2727     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
2728     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
2729     @@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
2730     {
2731     struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
2732     struct nv04_instobj_priv *node = (void *)object;
2733     + struct nvkm_subdev *subdev = (void *)priv;
2734     +
2735     + mutex_lock(&subdev->mutex);
2736     nvkm_mm_free(&priv->heap, &node->mem);
2737     + mutex_unlock(&subdev->mutex);
2738     +
2739     nvkm_instobj_destroy(&node->base);
2740     }
2741    
2742     @@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
2743     struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
2744     struct nv04_instobj_priv *node;
2745     struct nvkm_instobj_args *args = data;
2746     + struct nvkm_subdev *subdev = (void *)priv;
2747     int ret;
2748    
2749     if (!args->align)
2750     @@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
2751     if (ret)
2752     return ret;
2753    
2754     + mutex_lock(&subdev->mutex);
2755     ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
2756     args->align, &node->mem);
2757     + mutex_unlock(&subdev->mutex);
2758     if (ret)
2759     return ret;
2760    
2761     diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
2762     index 3318de690e00..a2dbbbe0d8d7 100644
2763     --- a/drivers/hid/hid-cp2112.c
2764     +++ b/drivers/hid/hid-cp2112.c
2765     @@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
2766     struct cp2112_force_read_report report;
2767     int ret;
2768    
2769     + if (size > sizeof(dev->read_data))
2770     + size = sizeof(dev->read_data);
2771     report.report = CP2112_DATA_READ_FORCE_SEND;
2772     report.length = cpu_to_be16(size);
2773    
2774     diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
2775     index 28fcb2e246d5..fbfc02bb2cfa 100644
2776     --- a/drivers/hwmon/nct7802.c
2777     +++ b/drivers/hwmon/nct7802.c
2778     @@ -195,7 +195,7 @@ abort:
2779     }
2780    
2781     static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
2782     - unsigned int voltage)
2783     + unsigned long voltage)
2784     {
2785     int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
2786     int err;
2787     diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
2788     index b77b82f24480..6153df735e82 100644
2789     --- a/drivers/hwmon/nct7904.c
2790     +++ b/drivers/hwmon/nct7904.c
2791     @@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
2792     return sprintf(buf, "%d\n", val);
2793     }
2794    
2795     -static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
2796     - const char *buf, size_t count)
2797     +static ssize_t store_enable(struct device *dev,
2798     + struct device_attribute *devattr,
2799     + const char *buf, size_t count)
2800     {
2801     int index = to_sensor_dev_attr(devattr)->index;
2802     struct nct7904_data *data = dev_get_drvdata(dev);
2803     @@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
2804    
2805     if (kstrtoul(buf, 10, &val) < 0)
2806     return -EINVAL;
2807     - if (val > 1 || (val && !data->fan_mode[index]))
2808     + if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
2809     return -EINVAL;
2810    
2811     ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
2812     - val ? data->fan_mode[index] : 0);
2813     + val == 2 ? data->fan_mode[index] : 0);
2814    
2815     return ret ? ret : count;
2816     }
2817    
2818     -/* Return 0 for manual mode or 1 for SmartFan mode */
2819     -static ssize_t show_mode(struct device *dev,
2820     - struct device_attribute *devattr, char *buf)
2821     +/* Return 1 for manual mode or 2 for SmartFan mode */
2822     +static ssize_t show_enable(struct device *dev,
2823     + struct device_attribute *devattr, char *buf)
2824     {
2825     int index = to_sensor_dev_attr(devattr)->index;
2826     struct nct7904_data *data = dev_get_drvdata(dev);
2827     @@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
2828     if (val < 0)
2829     return val;
2830    
2831     - return sprintf(buf, "%d\n", val ? 1 : 0);
2832     + return sprintf(buf, "%d\n", val ? 2 : 1);
2833     }
2834    
2835     /* 2 attributes per channel: pwm and mode */
2836     -static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
2837     +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
2838     show_pwm, store_pwm, 0);
2839     -static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
2840     - show_mode, store_mode, 0);
2841     -static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
2842     +static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
2843     + show_enable, store_enable, 0);
2844     +static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
2845     show_pwm, store_pwm, 1);
2846     -static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
2847     - show_mode, store_mode, 1);
2848     -static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
2849     +static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
2850     + show_enable, store_enable, 1);
2851     +static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
2852     show_pwm, store_pwm, 2);
2853     -static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
2854     - show_mode, store_mode, 2);
2855     -static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
2856     +static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
2857     + show_enable, store_enable, 2);
2858     +static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
2859     show_pwm, store_pwm, 3);
2860     -static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
2861     - show_mode, store_mode, 3);
2862     +static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
2863     + show_enable, store_enable, 3);
2864    
2865     static struct attribute *nct7904_fanctl_attrs[] = {
2866     - &sensor_dev_attr_fan1_pwm.dev_attr.attr,
2867     - &sensor_dev_attr_fan1_mode.dev_attr.attr,
2868     - &sensor_dev_attr_fan2_pwm.dev_attr.attr,
2869     - &sensor_dev_attr_fan2_mode.dev_attr.attr,
2870     - &sensor_dev_attr_fan3_pwm.dev_attr.attr,
2871     - &sensor_dev_attr_fan3_mode.dev_attr.attr,
2872     - &sensor_dev_attr_fan4_pwm.dev_attr.attr,
2873     - &sensor_dev_attr_fan4_mode.dev_attr.attr,
2874     + &sensor_dev_attr_pwm1.dev_attr.attr,
2875     + &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2876     + &sensor_dev_attr_pwm2.dev_attr.attr,
2877     + &sensor_dev_attr_pwm2_enable.dev_attr.attr,
2878     + &sensor_dev_attr_pwm3.dev_attr.attr,
2879     + &sensor_dev_attr_pwm3_enable.dev_attr.attr,
2880     + &sensor_dev_attr_pwm4.dev_attr.attr,
2881     + &sensor_dev_attr_pwm4_enable.dev_attr.attr,
2882     NULL
2883     };
2884    
2885     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
2886     index e5cc43074196..2d13fd08ceb7 100644
2887     --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
2888     +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
2889     @@ -176,7 +176,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
2890     else
2891     size += ipoib_recvq_size * ipoib_max_conn_qp;
2892     } else
2893     - goto out_free_wq;
2894     + if (ret != -ENOSYS)
2895     + goto out_free_wq;
2896    
2897     priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
2898     if (IS_ERR(priv->recv_cq)) {
2899     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
2900     index 35c8d0ceabee..3a32caf06bf1 100644
2901     --- a/drivers/input/mouse/synaptics.c
2902     +++ b/drivers/input/mouse/synaptics.c
2903     @@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
2904     ABS_MT_POSITION_Y);
2905     /* Image sensors can report per-contact pressure */
2906     input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
2907     - input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
2908     + input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
2909    
2910     /* Image sensors can signal 4 and 5 finger clicks */
2911     __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
2912     diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
2913     index f2c6c352c55a..2c41107240de 100644
2914     --- a/drivers/input/touchscreen/usbtouchscreen.c
2915     +++ b/drivers/input/touchscreen/usbtouchscreen.c
2916     @@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
2917     goto err_out;
2918     }
2919    
2920     + /* TSC-25 data sheet specifies a delay after the RESET command */
2921     + msleep(150);
2922     +
2923     /* set coordinate output rate */
2924     buf[0] = buf[1] = 0xFF;
2925     ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
2926     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2927     index 5ecfaf29933a..c87c4b1bfc00 100644
2928     --- a/drivers/iommu/intel-iommu.c
2929     +++ b/drivers/iommu/intel-iommu.c
2930     @@ -1756,8 +1756,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
2931    
2932     static void domain_exit(struct dmar_domain *domain)
2933     {
2934     + struct dmar_drhd_unit *drhd;
2935     + struct intel_iommu *iommu;
2936     struct page *freelist = NULL;
2937     - int i;
2938    
2939     /* Domain 0 is reserved, so dont process it */
2940     if (!domain)
2941     @@ -1777,8 +1778,10 @@ static void domain_exit(struct dmar_domain *domain)
2942    
2943     /* clear attached or cached domains */
2944     rcu_read_lock();
2945     - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
2946     - iommu_detach_domain(domain, g_iommus[i]);
2947     + for_each_active_iommu(iommu, drhd)
2948     + if (domain_type_is_vm(domain) ||
2949     + test_bit(iommu->seq_id, domain->iommu_bmp))
2950     + iommu_detach_domain(domain, iommu);
2951     rcu_read_unlock();
2952    
2953     dma_free_pagelist(freelist);
2954     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
2955     index 1b7e155869f6..c00e2db351ba 100644
2956     --- a/drivers/irqchip/irq-gic-v3-its.c
2957     +++ b/drivers/irqchip/irq-gic-v3-its.c
2958     @@ -75,6 +75,13 @@ struct its_node {
2959    
2960     #define ITS_ITT_ALIGN SZ_256
2961    
2962     +struct event_lpi_map {
2963     + unsigned long *lpi_map;
2964     + u16 *col_map;
2965     + irq_hw_number_t lpi_base;
2966     + int nr_lpis;
2967     +};
2968     +
2969     /*
2970     * The ITS view of a device - belongs to an ITS, a collection, owns an
2971     * interrupt translation table, and a list of interrupts.
2972     @@ -82,11 +89,8 @@ struct its_node {
2973     struct its_device {
2974     struct list_head entry;
2975     struct its_node *its;
2976     - struct its_collection *collection;
2977     + struct event_lpi_map event_map;
2978     void *itt;
2979     - unsigned long *lpi_map;
2980     - irq_hw_number_t lpi_base;
2981     - int nr_lpis;
2982     u32 nr_ites;
2983     u32 device_id;
2984     };
2985     @@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
2986     #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
2987     #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
2988    
2989     +static struct its_collection *dev_event_to_col(struct its_device *its_dev,
2990     + u32 event)
2991     +{
2992     + struct its_node *its = its_dev->its;
2993     +
2994     + return its->collections + its_dev->event_map.col_map[event];
2995     +}
2996     +
2997     /*
2998     * ITS command descriptors - parameters to be encoded in a command
2999     * block.
3000     @@ -134,7 +146,7 @@ struct its_cmd_desc {
3001     struct {
3002     struct its_device *dev;
3003     struct its_collection *col;
3004     - u32 id;
3005     + u32 event_id;
3006     } its_movi_cmd;
3007    
3008     struct {
3009     @@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
3010    
3011     its_fixup_cmd(cmd);
3012    
3013     - return desc->its_mapd_cmd.dev->collection;
3014     + return NULL;
3015     }
3016    
3017     static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
3018     @@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
3019     static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
3020     struct its_cmd_desc *desc)
3021     {
3022     + struct its_collection *col;
3023     +
3024     + col = dev_event_to_col(desc->its_mapvi_cmd.dev,
3025     + desc->its_mapvi_cmd.event_id);
3026     +
3027     its_encode_cmd(cmd, GITS_CMD_MAPVI);
3028     its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
3029     its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
3030     its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
3031     - its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
3032     + its_encode_collection(cmd, col->col_id);
3033    
3034     its_fixup_cmd(cmd);
3035    
3036     - return desc->its_mapvi_cmd.dev->collection;
3037     + return col;
3038     }
3039    
3040     static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
3041     struct its_cmd_desc *desc)
3042     {
3043     + struct its_collection *col;
3044     +
3045     + col = dev_event_to_col(desc->its_movi_cmd.dev,
3046     + desc->its_movi_cmd.event_id);
3047     +
3048     its_encode_cmd(cmd, GITS_CMD_MOVI);
3049     its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
3050     - its_encode_event_id(cmd, desc->its_movi_cmd.id);
3051     + its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
3052     its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
3053    
3054     its_fixup_cmd(cmd);
3055    
3056     - return desc->its_movi_cmd.dev->collection;
3057     + return col;
3058     }
3059    
3060     static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
3061     struct its_cmd_desc *desc)
3062     {
3063     + struct its_collection *col;
3064     +
3065     + col = dev_event_to_col(desc->its_discard_cmd.dev,
3066     + desc->its_discard_cmd.event_id);
3067     +
3068     its_encode_cmd(cmd, GITS_CMD_DISCARD);
3069     its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
3070     its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
3071    
3072     its_fixup_cmd(cmd);
3073    
3074     - return desc->its_discard_cmd.dev->collection;
3075     + return col;
3076     }
3077    
3078     static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
3079     struct its_cmd_desc *desc)
3080     {
3081     + struct its_collection *col;
3082     +
3083     + col = dev_event_to_col(desc->its_inv_cmd.dev,
3084     + desc->its_inv_cmd.event_id);
3085     +
3086     its_encode_cmd(cmd, GITS_CMD_INV);
3087     its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
3088     its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
3089    
3090     its_fixup_cmd(cmd);
3091    
3092     - return desc->its_inv_cmd.dev->collection;
3093     + return col;
3094     }
3095    
3096     static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
3097     @@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
3098    
3099     desc.its_movi_cmd.dev = dev;
3100     desc.its_movi_cmd.col = col;
3101     - desc.its_movi_cmd.id = id;
3102     + desc.its_movi_cmd.event_id = id;
3103    
3104     its_send_single_command(dev->its, its_build_movi_cmd, &desc);
3105     }
3106     @@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
3107     static inline u32 its_get_event_id(struct irq_data *d)
3108     {
3109     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3110     - return d->hwirq - its_dev->lpi_base;
3111     + return d->hwirq - its_dev->event_map.lpi_base;
3112     }
3113    
3114     static void lpi_set_config(struct irq_data *d, bool enable)
3115     @@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
3116    
3117     target_col = &its_dev->its->collections[cpu];
3118     its_send_movi(its_dev, target_col, id);
3119     - its_dev->collection = target_col;
3120     + its_dev->event_map.col_map[id] = cpu;
3121    
3122     return IRQ_SET_MASK_OK_DONE;
3123     }
3124     @@ -713,8 +745,10 @@ out:
3125     return bitmap;
3126     }
3127    
3128     -static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
3129     +static void its_lpi_free(struct event_lpi_map *map)
3130     {
3131     + int base = map->lpi_base;
3132     + int nr_ids = map->nr_lpis;
3133     int lpi;
3134    
3135     spin_lock(&lpi_lock);
3136     @@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
3137    
3138     spin_unlock(&lpi_lock);
3139    
3140     - kfree(bitmap);
3141     + kfree(map->lpi_map);
3142     + kfree(map->col_map);
3143     }
3144    
3145     /*
3146     @@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3147     struct its_device *dev;
3148     unsigned long *lpi_map;
3149     unsigned long flags;
3150     + u16 *col_map = NULL;
3151     void *itt;
3152     int lpi_base;
3153     int nr_lpis;
3154     int nr_ites;
3155     - int cpu;
3156     int sz;
3157    
3158     dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3159     @@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3160     sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3161     itt = kzalloc(sz, GFP_KERNEL);
3162     lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
3163     + if (lpi_map)
3164     + col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
3165    
3166     - if (!dev || !itt || !lpi_map) {
3167     + if (!dev || !itt || !lpi_map || !col_map) {
3168     kfree(dev);
3169     kfree(itt);
3170     kfree(lpi_map);
3171     + kfree(col_map);
3172     return NULL;
3173     }
3174    
3175     dev->its = its;
3176     dev->itt = itt;
3177     dev->nr_ites = nr_ites;
3178     - dev->lpi_map = lpi_map;
3179     - dev->lpi_base = lpi_base;
3180     - dev->nr_lpis = nr_lpis;
3181     + dev->event_map.lpi_map = lpi_map;
3182     + dev->event_map.col_map = col_map;
3183     + dev->event_map.lpi_base = lpi_base;
3184     + dev->event_map.nr_lpis = nr_lpis;
3185     dev->device_id = dev_id;
3186     INIT_LIST_HEAD(&dev->entry);
3187    
3188     @@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3189     list_add(&dev->entry, &its->its_device_list);
3190     raw_spin_unlock_irqrestore(&its->lock, flags);
3191    
3192     - /* Bind the device to the first possible CPU */
3193     - cpu = cpumask_first(cpu_online_mask);
3194     - dev->collection = &its->collections[cpu];
3195     -
3196     /* Map device to its ITT */
3197     its_send_mapd(dev, 1);
3198    
3199     @@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
3200     {
3201     int idx;
3202    
3203     - idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
3204     - if (idx == dev->nr_lpis)
3205     + idx = find_first_zero_bit(dev->event_map.lpi_map,
3206     + dev->event_map.nr_lpis);
3207     + if (idx == dev->event_map.nr_lpis)
3208     return -ENOSPC;
3209    
3210     - *hwirq = dev->lpi_base + idx;
3211     - set_bit(idx, dev->lpi_map);
3212     + *hwirq = dev->event_map.lpi_base + idx;
3213     + set_bit(idx, dev->event_map.lpi_map);
3214    
3215     return 0;
3216     }
3217     @@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3218     irq_domain_set_hwirq_and_chip(domain, virq + i,
3219     hwirq, &its_irq_chip, its_dev);
3220     dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
3221     - (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
3222     + (int)(hwirq - its_dev->event_map.lpi_base),
3223     + (int)hwirq, virq + i);
3224     }
3225    
3226     return 0;
3227     @@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
3228     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3229     u32 event = its_get_event_id(d);
3230    
3231     + /* Bind the LPI to the first possible CPU */
3232     + its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
3233     +
3234     /* Map the GIC IRQ and event to the device */
3235     its_send_mapvi(its_dev, d->hwirq, event);
3236     }
3237     @@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3238     u32 event = its_get_event_id(data);
3239    
3240     /* Mark interrupt index as unused */
3241     - clear_bit(event, its_dev->lpi_map);
3242     + clear_bit(event, its_dev->event_map.lpi_map);
3243    
3244     /* Nuke the entry in the domain */
3245     irq_domain_reset_irq_data(data);
3246     }
3247    
3248     /* If all interrupts have been freed, start mopping the floor */
3249     - if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
3250     - its_lpi_free(its_dev->lpi_map,
3251     - its_dev->lpi_base,
3252     - its_dev->nr_lpis);
3253     + if (bitmap_empty(its_dev->event_map.lpi_map,
3254     + its_dev->event_map.nr_lpis)) {
3255     + its_lpi_free(&its_dev->event_map);
3256    
3257     /* Unmap device/itt */
3258     its_send_mapd(its_dev, 0);
3259     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3260     index 2caf492890d6..e8d84566f311 100644
3261     --- a/drivers/md/dm.c
3262     +++ b/drivers/md/dm.c
3263     @@ -1053,13 +1053,10 @@ static struct dm_rq_target_io *tio_from_request(struct request *rq)
3264     */
3265     static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
3266     {
3267     - int nr_requests_pending;
3268     -
3269     atomic_dec(&md->pending[rw]);
3270    
3271     /* nudge anyone waiting on suspend queue */
3272     - nr_requests_pending = md_in_flight(md);
3273     - if (!nr_requests_pending)
3274     + if (!md_in_flight(md))
3275     wake_up(&md->wait);
3276    
3277     /*
3278     @@ -1071,8 +1068,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
3279     if (run_queue) {
3280     if (md->queue->mq_ops)
3281     blk_mq_run_hw_queues(md->queue, true);
3282     - else if (!nr_requests_pending ||
3283     - (nr_requests_pending >= md->queue->nr_congestion_on))
3284     + else
3285     blk_run_queue_async(md->queue);
3286     }
3287    
3288     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
3289     index 9157a29c8dbf..cd7b0c1e882d 100644
3290     --- a/drivers/md/raid1.c
3291     +++ b/drivers/md/raid1.c
3292     @@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
3293     spin_lock_irqsave(&conf->device_lock, flags);
3294     if (r1_bio->mddev->degraded == conf->raid_disks ||
3295     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
3296     - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
3297     + test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
3298     uptodate = 1;
3299     spin_unlock_irqrestore(&conf->device_lock, flags);
3300     }
3301     diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
3302     index d1b55fe62817..e4dc8cdf67a3 100644
3303     --- a/drivers/misc/cxl/context.c
3304     +++ b/drivers/misc/cxl/context.c
3305     @@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3306    
3307     if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
3308     area = ctx->afu->psn_phys;
3309     - if (offset > ctx->afu->adapter->ps_size)
3310     + if (offset >= ctx->afu->adapter->ps_size)
3311     return VM_FAULT_SIGBUS;
3312     } else {
3313     area = ctx->psn_phys;
3314     - if (offset > ctx->psn_size)
3315     + if (offset >= ctx->psn_size)
3316     return VM_FAULT_SIGBUS;
3317     }
3318    
3319     diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
3320     index 8ccddceead66..de350dd46218 100644
3321     --- a/drivers/misc/cxl/main.c
3322     +++ b/drivers/misc/cxl/main.c
3323     @@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
3324     spin_lock(&adapter->afu_list_lock);
3325     for (slice = 0; slice < adapter->slices; slice++) {
3326     afu = adapter->afu[slice];
3327     - if (!afu->enabled)
3328     + if (!afu || !afu->enabled)
3329     continue;
3330     rcu_read_lock();
3331     idr_for_each_entry(&afu->contexts_idr, ctx, id)
3332     diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
3333     index 3e2968159506..e40bcd03bd47 100644
3334     --- a/drivers/misc/mei/main.c
3335     +++ b/drivers/misc/mei/main.c
3336     @@ -685,7 +685,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
3337     /* Fill in the data structures */
3338     devno = MKDEV(MAJOR(mei_devt), dev->minor);
3339     cdev_init(&dev->cdev, &mei_fops);
3340     - dev->cdev.owner = mei_fops.owner;
3341     + dev->cdev.owner = parent->driver->owner;
3342    
3343     /* Add the device */
3344     ret = cdev_add(&dev->cdev, devno, 1);
3345     diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
3346     index 9df2b6801f76..d0abdffb0d7c 100644
3347     --- a/drivers/mmc/host/omap_hsmmc.c
3348     +++ b/drivers/mmc/host/omap_hsmmc.c
3349     @@ -1062,6 +1062,10 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
3350    
3351     if (status & (CTO_EN | CCRC_EN))
3352     end_cmd = 1;
3353     + if (host->data || host->response_busy) {
3354     + end_trans = !end_cmd;
3355     + host->response_busy = 0;
3356     + }
3357     if (status & (CTO_EN | DTO_EN))
3358     hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
3359     else if (status & (CCRC_EN | DCRC_EN))
3360     @@ -1081,10 +1085,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
3361     }
3362     dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
3363     }
3364     - if (host->data || host->response_busy) {
3365     - end_trans = !end_cmd;
3366     - host->response_busy = 0;
3367     - }
3368     }
3369    
3370     OMAP_HSMMC_WRITE(host->base, STAT, status);
3371     diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
3372     index 3497cfaf683c..a870c42731d7 100644
3373     --- a/drivers/mmc/host/sdhci-esdhc.h
3374     +++ b/drivers/mmc/host/sdhci-esdhc.h
3375     @@ -45,6 +45,6 @@
3376     #define ESDHC_DMA_SYSCTL 0x40c
3377     #define ESDHC_DMA_SNOOP 0x00000040
3378    
3379     -#define ESDHC_HOST_CONTROL_RES 0x05
3380     +#define ESDHC_HOST_CONTROL_RES 0x01
3381    
3382     #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
3383     diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
3384     index b5103a247bc1..065dc70caa1d 100644
3385     --- a/drivers/mmc/host/sdhci-pxav3.c
3386     +++ b/drivers/mmc/host/sdhci-pxav3.c
3387     @@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
3388     goto err_of_parse;
3389     sdhci_get_of_property(pdev);
3390     pdata = pxav3_get_mmc_pdata(dev);
3391     + pdev->dev.platform_data = pdata;
3392     } else if (pdata) {
3393     /* on-chip device */
3394     if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
3395     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3396     index d3dbb28057e9..bec8a307f8cd 100644
3397     --- a/drivers/mmc/host/sdhci.c
3398     +++ b/drivers/mmc/host/sdhci.c
3399     @@ -3037,8 +3037,11 @@ int sdhci_add_host(struct sdhci_host *host)
3400     GFP_KERNEL);
3401     host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
3402     if (!host->adma_table || !host->align_buffer) {
3403     - dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
3404     - host->adma_table, host->adma_addr);
3405     + if (host->adma_table)
3406     + dma_free_coherent(mmc_dev(mmc),
3407     + host->adma_table_sz,
3408     + host->adma_table,
3409     + host->adma_addr);
3410     kfree(host->align_buffer);
3411     pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3412     mmc_hostname(mmc));
3413     diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
3414     index 041525d2595c..5d214d135332 100644
3415     --- a/drivers/net/can/c_can/c_can.c
3416     +++ b/drivers/net/can/c_can/c_can.c
3417     @@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
3418     {
3419     struct c_can_priv *priv = netdev_priv(dev);
3420     int err;
3421     + struct pinctrl *p;
3422    
3423     /* basic c_can configuration */
3424     err = c_can_chip_config(dev);
3425     @@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
3426    
3427     priv->can.state = CAN_STATE_ERROR_ACTIVE;
3428    
3429     - /* activate pins */
3430     - pinctrl_pm_select_default_state(dev->dev.parent);
3431     + /* Attempt to use "active" if available else use "default" */
3432     + p = pinctrl_get_select(priv->device, "active");
3433     + if (!IS_ERR(p))
3434     + pinctrl_put(p);
3435     + else
3436     + pinctrl_pm_select_default_state(priv->device);
3437     +
3438     return 0;
3439     }
3440    
3441     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
3442     index e9b1810d319f..aede704605c6 100644
3443     --- a/drivers/net/can/dev.c
3444     +++ b/drivers/net/can/dev.c
3445     @@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
3446     struct can_frame *cf = (struct can_frame *)skb->data;
3447     u8 dlc = cf->can_dlc;
3448    
3449     - if (!(skb->tstamp.tv64))
3450     - __net_timestamp(skb);
3451     -
3452     netif_rx(priv->echo_skb[idx]);
3453     priv->echo_skb[idx] = NULL;
3454    
3455     @@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
3456     if (unlikely(!skb))
3457     return NULL;
3458    
3459     - __net_timestamp(skb);
3460     skb->protocol = htons(ETH_P_CAN);
3461     skb->pkt_type = PACKET_BROADCAST;
3462     skb->ip_summed = CHECKSUM_UNNECESSARY;
3463     @@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
3464    
3465     can_skb_reserve(skb);
3466     can_skb_prv(skb)->ifindex = dev->ifindex;
3467     + can_skb_prv(skb)->skbcnt = 0;
3468    
3469     *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
3470     memset(*cf, 0, sizeof(struct can_frame));
3471     @@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
3472     if (unlikely(!skb))
3473     return NULL;
3474    
3475     - __net_timestamp(skb);
3476     skb->protocol = htons(ETH_P_CANFD);
3477     skb->pkt_type = PACKET_BROADCAST;
3478     skb->ip_summed = CHECKSUM_UNNECESSARY;
3479     @@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
3480    
3481     can_skb_reserve(skb);
3482     can_skb_prv(skb)->ifindex = dev->ifindex;
3483     + can_skb_prv(skb)->skbcnt = 0;
3484    
3485     *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
3486     memset(*cfd, 0, sizeof(struct canfd_frame));
3487     diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
3488     index 7deb80dcbe8c..2f9ebad4ff56 100644
3489     --- a/drivers/net/can/rcar_can.c
3490     +++ b/drivers/net/can/rcar_can.c
3491     @@ -526,7 +526,7 @@ static int rcar_can_open(struct net_device *ndev)
3492     napi_enable(&priv->napi);
3493     err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
3494     if (err) {
3495     - netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
3496     + netdev_err(ndev, "error requesting interrupt %d\n", ndev->irq);
3497     goto out_close;
3498     }
3499     can_led_event(ndev, CAN_LED_EVENT_OPEN);
3500     @@ -758,8 +758,9 @@ static int rcar_can_probe(struct platform_device *pdev)
3501     }
3502    
3503     irq = platform_get_irq(pdev, 0);
3504     - if (!irq) {
3505     + if (irq < 0) {
3506     dev_err(&pdev->dev, "No IRQ resource\n");
3507     + err = irq;
3508     goto fail;
3509     }
3510    
3511     @@ -823,7 +824,7 @@ static int rcar_can_probe(struct platform_device *pdev)
3512    
3513     devm_can_led_init(ndev);
3514    
3515     - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
3516     + dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
3517     priv->regs, ndev->irq);
3518    
3519     return 0;
3520     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
3521     index f64f5290d6f8..a23a7af8eb9a 100644
3522     --- a/drivers/net/can/slcan.c
3523     +++ b/drivers/net/can/slcan.c
3524     @@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
3525     if (!skb)
3526     return;
3527    
3528     - __net_timestamp(skb);
3529     skb->dev = sl->dev;
3530     skb->protocol = htons(ETH_P_CAN);
3531     skb->pkt_type = PACKET_BROADCAST;
3532     @@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
3533    
3534     can_skb_reserve(skb);
3535     can_skb_prv(skb)->ifindex = sl->dev->ifindex;
3536     + can_skb_prv(skb)->skbcnt = 0;
3537    
3538     memcpy(skb_put(skb, sizeof(struct can_frame)),
3539     &cf, sizeof(struct can_frame));
3540     diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
3541     index bf63fee4e743..34c625ea2801 100644
3542     --- a/drivers/net/can/spi/mcp251x.c
3543     +++ b/drivers/net/can/spi/mcp251x.c
3544     @@ -1221,17 +1221,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
3545     struct spi_device *spi = to_spi_device(dev);
3546     struct mcp251x_priv *priv = spi_get_drvdata(spi);
3547    
3548     - if (priv->after_suspend & AFTER_SUSPEND_POWER) {
3549     + if (priv->after_suspend & AFTER_SUSPEND_POWER)
3550     mcp251x_power_enable(priv->power, 1);
3551     +
3552     + if (priv->after_suspend & AFTER_SUSPEND_UP) {
3553     + mcp251x_power_enable(priv->transceiver, 1);
3554     queue_work(priv->wq, &priv->restart_work);
3555     } else {
3556     - if (priv->after_suspend & AFTER_SUSPEND_UP) {
3557     - mcp251x_power_enable(priv->transceiver, 1);
3558     - queue_work(priv->wq, &priv->restart_work);
3559     - } else {
3560     - priv->after_suspend = 0;
3561     - }
3562     + priv->after_suspend = 0;
3563     }
3564     +
3565     priv->force_quit = 0;
3566     enable_irq(spi->irq);
3567     return 0;
3568     diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
3569     index 0ce868de855d..674f367087c5 100644
3570     --- a/drivers/net/can/vcan.c
3571     +++ b/drivers/net/can/vcan.c
3572     @@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
3573     skb->dev = dev;
3574     skb->ip_summed = CHECKSUM_UNNECESSARY;
3575    
3576     - if (!(skb->tstamp.tv64))
3577     - __net_timestamp(skb);
3578     -
3579     netif_rx_ni(skb);
3580     }
3581    
3582     diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
3583     index 8e604a3931ca..ef20be084b24 100644
3584     --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
3585     +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
3586     @@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
3587     hw_addr = (const u8 *)(mac_override +
3588     MAC_ADDRESS_OVERRIDE_FAMILY_8000);
3589    
3590     - /* The byte order is little endian 16 bit, meaning 214365 */
3591     - data->hw_addr[0] = hw_addr[1];
3592     - data->hw_addr[1] = hw_addr[0];
3593     - data->hw_addr[2] = hw_addr[3];
3594     - data->hw_addr[3] = hw_addr[2];
3595     - data->hw_addr[4] = hw_addr[5];
3596     - data->hw_addr[5] = hw_addr[4];
3597     + /*
3598     + * Store the MAC address from MAO section.
3599     + * No byte swapping is required in MAO section
3600     + */
3601     + memcpy(data->hw_addr, hw_addr, ETH_ALEN);
3602    
3603     /*
3604     * Force the use of the OTP MAC address in case of reserved MAC
3605     diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
3606     index ef32e177f662..281451c274ca 100644
3607     --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
3608     +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
3609     @@ -225,7 +225,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
3610    
3611     if (info->band == IEEE80211_BAND_2GHZ &&
3612     !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
3613     - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
3614     + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
3615     else
3616     rate_flags =
3617     BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
3618     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
3619     index dc179094e6a0..37e6a6f91487 100644
3620     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
3621     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
3622     @@ -2515,6 +2515,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3623     trans->hw_rev = (trans->hw_rev & 0xfff0) |
3624     (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3625    
3626     + ret = iwl_pcie_prepare_card_hw(trans);
3627     + if (ret) {
3628     + IWL_WARN(trans, "Exit HW not ready\n");
3629     + goto out_pci_disable_msi;
3630     + }
3631     +
3632     /*
3633     * in-order to recognize C step driver should read chip version
3634     * id located at the AUX bus MISC address space.
3635     diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
3636     index 5ac59fbb2440..d3a3be7476e1 100644
3637     --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
3638     +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
3639     @@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
3640     unsigned num_configs)
3641     {
3642     struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
3643     - const struct imx1_pinctrl_soc_info *info = ipctl->info;
3644     int i;
3645    
3646     for (i = 0; i != num_configs; ++i) {
3647     imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
3648    
3649     dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
3650     - info->pins[pin_id].name);
3651     + pin_desc_get(pctldev, pin_id)->name);
3652     }
3653    
3654     return 0;
3655     diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
3656     index ff828117798f..8de135174e82 100644
3657     --- a/drivers/regulator/s2mps11.c
3658     +++ b/drivers/regulator/s2mps11.c
3659     @@ -34,6 +34,8 @@
3660     #include <linux/mfd/samsung/s2mps14.h>
3661     #include <linux/mfd/samsung/s2mpu02.h>
3662    
3663     +/* The highest number of possible regulators for supported devices. */
3664     +#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
3665     struct s2mps11_info {
3666     unsigned int rdev_num;
3667     int ramp_delay2;
3668     @@ -49,7 +51,7 @@ struct s2mps11_info {
3669     * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
3670     * the suspend mode was enabled.
3671     */
3672     - unsigned long long s2mps14_suspend_state:50;
3673     + DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
3674    
3675     /* Array of size rdev_num with GPIO-s for external sleep control */
3676     int *ext_control_gpio;
3677     @@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
3678     switch (s2mps11->dev_type) {
3679     case S2MPS13X:
3680     case S2MPS14X:
3681     - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
3682     + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
3683     val = S2MPS14_ENABLE_SUSPEND;
3684     else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
3685     val = S2MPS14_ENABLE_EXT_CONTROL;
3686     @@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
3687     val = rdev->desc->enable_mask;
3688     break;
3689     case S2MPU02:
3690     - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
3691     + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
3692     val = S2MPU02_ENABLE_SUSPEND;
3693     else
3694     val = rdev->desc->enable_mask;
3695     @@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
3696     if (ret < 0)
3697     return ret;
3698    
3699     - s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
3700     + set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
3701     /*
3702     * Don't enable suspend mode if regulator is already disabled because
3703     * this would effectively for a short time turn on the regulator after
3704     @@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
3705     case S2MPS11X:
3706     s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
3707     regulators = s2mps11_regulators;
3708     + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
3709     break;
3710     case S2MPS13X:
3711     s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
3712     regulators = s2mps13_regulators;
3713     + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
3714     break;
3715     case S2MPS14X:
3716     s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
3717     regulators = s2mps14_regulators;
3718     + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
3719     break;
3720     case S2MPU02:
3721     s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
3722     regulators = s2mpu02_regulators;
3723     + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
3724     break;
3725     default:
3726     dev_err(&pdev->dev, "Invalid device type: %u\n",
3727     diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
3728     index 0e6ee3ca30e6..e9ae6b924c70 100644
3729     --- a/drivers/scsi/qla2xxx/qla_dbg.c
3730     +++ b/drivers/scsi/qla2xxx/qla_dbg.c
3731     @@ -68,7 +68,7 @@
3732     * | | | 0xd101-0xd1fe |
3733     * | | | 0xd214-0xd2fe |
3734     * | Target Mode | 0xe079 | |
3735     - * | Target Mode Management | 0xf072 | 0xf002 |
3736     + * | Target Mode Management | 0xf080 | 0xf002 |
3737     * | | | 0xf046-0xf049 |
3738     * | Target Mode Task Management | 0x1000b | |
3739     * ----------------------------------------------------------------------
3740     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3741     index 285cb204f300..998498e2341b 100644
3742     --- a/drivers/scsi/qla2xxx/qla_init.c
3743     +++ b/drivers/scsi/qla2xxx/qla_init.c
3744     @@ -2924,6 +2924,7 @@ qla2x00_rport_del(void *data)
3745     struct fc_rport *rport;
3746     scsi_qla_host_t *vha = fcport->vha;
3747     unsigned long flags;
3748     + unsigned long vha_flags;
3749    
3750     spin_lock_irqsave(fcport->vha->host->host_lock, flags);
3751     rport = fcport->drport ? fcport->drport: fcport->rport;
3752     @@ -2935,7 +2936,9 @@ qla2x00_rport_del(void *data)
3753     * Release the target mode FC NEXUS in qla_target.c code
3754     * if target mod is enabled.
3755     */
3756     + spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
3757     qlt_fc_port_deleted(vha, fcport);
3758     + spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
3759     }
3760     }
3761    
3762     @@ -3303,6 +3306,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3763     * Create target mode FC NEXUS in qla_target.c if target mode is
3764     * enabled..
3765     */
3766     +
3767     qlt_fc_port_added(vha, fcport);
3768    
3769     spin_lock_irqsave(fcport->vha->host->host_lock, flags);
3770     @@ -3460,20 +3464,43 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3771     if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3772     continue;
3773    
3774     - if (fcport->scan_state == QLA_FCPORT_SCAN &&
3775     - atomic_read(&fcport->state) == FCS_ONLINE) {
3776     - qla2x00_mark_device_lost(vha, fcport,
3777     - ql2xplogiabsentdevice, 0);
3778     - if (fcport->loop_id != FC_NO_LOOP_ID &&
3779     - (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3780     - fcport->port_type != FCT_INITIATOR &&
3781     - fcport->port_type != FCT_BROADCAST) {
3782     - ha->isp_ops->fabric_logout(vha,
3783     - fcport->loop_id,
3784     - fcport->d_id.b.domain,
3785     - fcport->d_id.b.area,
3786     - fcport->d_id.b.al_pa);
3787     - qla2x00_clear_loop_id(fcport);
3788     + if (fcport->scan_state == QLA_FCPORT_SCAN) {
3789     + if (qla_ini_mode_enabled(base_vha) &&
3790     + atomic_read(&fcport->state) == FCS_ONLINE) {
3791     + qla2x00_mark_device_lost(vha, fcport,
3792     + ql2xplogiabsentdevice, 0);
3793     + if (fcport->loop_id != FC_NO_LOOP_ID &&
3794     + (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3795     + fcport->port_type != FCT_INITIATOR &&
3796     + fcport->port_type != FCT_BROADCAST) {
3797     + ha->isp_ops->fabric_logout(vha,
3798     + fcport->loop_id,
3799     + fcport->d_id.b.domain,
3800     + fcport->d_id.b.area,
3801     + fcport->d_id.b.al_pa);
3802     + qla2x00_clear_loop_id(fcport);
3803     + }
3804     + } else if (!qla_ini_mode_enabled(base_vha)) {
3805     + /*
3806     + * In target mode, explicitly kill
3807     + * sessions and log out of devices
3808     + * that are gone, so that we don't
3809     + * end up with an initiator using the
3810     + * wrong ACL (if the fabric recycles
3811     + * an FC address and we have a stale
3812     + * session around) and so that we don't
3813     + * report initiators that are no longer
3814     + * on the fabric.
3815     + */
3816     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3817     + "port gone, logging out/killing session: "
3818     + "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3819     + "scan_state %d\n",
3820     + fcport->port_name,
3821     + atomic_read(&fcport->state),
3822     + fcport->flags, fcport->fc4_type,
3823     + fcport->scan_state);
3824     + qlt_fc_port_deleted(vha, fcport);
3825     }
3826     }
3827     }
3828     @@ -3494,6 +3521,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3829     (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3830     continue;
3831    
3832     + /*
3833     + * If we're not an initiator, skip looking for devices
3834     + * and logging in. There's no reason for us to do it,
3835     + * and it seems to actively cause problems in target
3836     + * mode if we race with the initiator logging into us
3837     + * (we might get the "port ID used" status back from
3838     + * our login command and log out the initiator, which
3839     + * seems to cause havoc).
3840     + */
3841     + if (!qla_ini_mode_enabled(base_vha)) {
3842     + if (fcport->scan_state == QLA_FCPORT_FOUND) {
3843     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3844     + "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3845     + "scan_state %d (initiator mode disabled; skipping "
3846     + "login)\n", fcport->port_name,
3847     + atomic_read(&fcport->state),
3848     + fcport->flags, fcport->fc4_type,
3849     + fcport->scan_state);
3850     + }
3851     + continue;
3852     + }
3853     +
3854     if (fcport->loop_id == FC_NO_LOOP_ID) {
3855     fcport->loop_id = next_loopid;
3856     rval = qla2x00_find_new_loop_id(
3857     @@ -3520,16 +3569,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3858     test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3859     break;
3860    
3861     - /* Find a new loop ID to use. */
3862     - fcport->loop_id = next_loopid;
3863     - rval = qla2x00_find_new_loop_id(base_vha, fcport);
3864     - if (rval != QLA_SUCCESS) {
3865     - /* Ran out of IDs to use */
3866     - break;
3867     - }
3868     + /*
3869     + * If we're not an initiator, skip looking for devices
3870     + * and logging in. There's no reason for us to do it,
3871     + * and it seems to actively cause problems in target
3872     + * mode if we race with the initiator logging into us
3873     + * (we might get the "port ID used" status back from
3874     + * our login command and log out the initiator, which
3875     + * seems to cause havoc).
3876     + */
3877     + if (qla_ini_mode_enabled(base_vha)) {
3878     + /* Find a new loop ID to use. */
3879     + fcport->loop_id = next_loopid;
3880     + rval = qla2x00_find_new_loop_id(base_vha,
3881     + fcport);
3882     + if (rval != QLA_SUCCESS) {
3883     + /* Ran out of IDs to use */
3884     + break;
3885     + }
3886    
3887     - /* Login and update database */
3888     - qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3889     + /* Login and update database */
3890     + qla2x00_fabric_dev_login(vha, fcport,
3891     + &next_loopid);
3892     + } else {
3893     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3894     + "new port %8phC state 0x%x flags 0x%x fc4_type "
3895     + "0x%x scan_state %d (initiator mode disabled; "
3896     + "skipping login)\n",
3897     + fcport->port_name,
3898     + atomic_read(&fcport->state),
3899     + fcport->flags, fcport->fc4_type,
3900     + fcport->scan_state);
3901     + }
3902    
3903     list_move_tail(&fcport->list, &vha->vp_fcports);
3904     }
3905     @@ -3725,11 +3796,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3906     fcport->fp_speed = new_fcport->fp_speed;
3907    
3908     /*
3909     - * If address the same and state FCS_ONLINE, nothing
3910     - * changed.
3911     + * If address the same and state FCS_ONLINE
3912     + * (or in target mode), nothing changed.
3913     */
3914     if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3915     - atomic_read(&fcport->state) == FCS_ONLINE) {
3916     + (atomic_read(&fcport->state) == FCS_ONLINE ||
3917     + !qla_ini_mode_enabled(base_vha))) {
3918     break;
3919     }
3920    
3921     @@ -3749,6 +3821,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3922     * Log it out if still logged in and mark it for
3923     * relogin later.
3924     */
3925     + if (!qla_ini_mode_enabled(base_vha)) {
3926     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3927     + "port changed FC ID, %8phC"
3928     + " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3929     + fcport->port_name,
3930     + fcport->d_id.b.domain,
3931     + fcport->d_id.b.area,
3932     + fcport->d_id.b.al_pa,
3933     + fcport->loop_id,
3934     + new_fcport->d_id.b.domain,
3935     + new_fcport->d_id.b.area,
3936     + new_fcport->d_id.b.al_pa);
3937     + fcport->d_id.b24 = new_fcport->d_id.b24;
3938     + break;
3939     + }
3940     +
3941     fcport->d_id.b24 = new_fcport->d_id.b24;
3942     fcport->flags |= FCF_LOGIN_NEEDED;
3943     if (fcport->loop_id != FC_NO_LOOP_ID &&
3944     @@ -3768,6 +3856,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3945     if (found)
3946     continue;
3947     /* If device was not in our fcports list, then add it. */
3948     + new_fcport->scan_state = QLA_FCPORT_FOUND;
3949     list_add_tail(&new_fcport->list, new_fcports);
3950    
3951     /* Allocate a new replacement fcport. */
3952     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3953     index fe8a8d157e22..496a733d0ca3 100644
3954     --- a/drivers/scsi/qla2xxx/qla_target.c
3955     +++ b/drivers/scsi/qla2xxx/qla_target.c
3956     @@ -113,6 +113,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
3957     static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
3958     struct atio_from_isp *atio, uint16_t status, int qfull);
3959     static void qlt_disable_vha(struct scsi_qla_host *vha);
3960     +static void qlt_clear_tgt_db(struct qla_tgt *tgt);
3961     /*
3962     * Global Variables
3963     */
3964     @@ -431,10 +432,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
3965    
3966     loop_id = le16_to_cpu(n->u.isp24.nport_handle);
3967     if (loop_id == 0xFFFF) {
3968     -#if 0 /* FIXME: Re-enable Global event handling.. */
3969     /* Global event */
3970     - atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
3971     - qlt_clear_tgt_db(ha->tgt.qla_tgt);
3972     + atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
3973     + qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
3974     +#if 0 /* FIXME: do we need to choose a session here? */
3975     if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
3976     sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
3977     typeof(*sess), sess_list_entry);
3978     @@ -782,25 +783,20 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
3979    
3980     void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
3981     {
3982     - struct qla_hw_data *ha = vha->hw;
3983     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3984     struct qla_tgt_sess *sess;
3985     - unsigned long flags;
3986    
3987     if (!vha->hw->tgt.tgt_ops)
3988     return;
3989    
3990     - if (!tgt || (fcport->port_type != FCT_INITIATOR))
3991     + if (!tgt)
3992     return;
3993    
3994     - spin_lock_irqsave(&ha->hardware_lock, flags);
3995     if (tgt->tgt_stop) {
3996     - spin_unlock_irqrestore(&ha->hardware_lock, flags);
3997     return;
3998     }
3999     sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
4000     if (!sess) {
4001     - spin_unlock_irqrestore(&ha->hardware_lock, flags);
4002     return;
4003     }
4004    
4005     @@ -808,7 +804,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
4006    
4007     sess->local = 1;
4008     qlt_schedule_sess_for_deletion(sess, false);
4009     - spin_unlock_irqrestore(&ha->hardware_lock, flags);
4010     }
4011    
4012     static inline int test_tgt_sess_count(struct qla_tgt *tgt)
4013     @@ -2347,9 +2342,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
4014     res = qlt_build_ctio_crc2_pkt(&prm, vha);
4015     else
4016     res = qlt_24xx_build_ctio_pkt(&prm, vha);
4017     - if (unlikely(res != 0))
4018     + if (unlikely(res != 0)) {
4019     + vha->req->cnt += full_req_cnt;
4020     goto out_unmap_unlock;
4021     -
4022     + }
4023    
4024     pkt = (struct ctio7_to_24xx *)prm.pkt;
4025    
4026     @@ -2487,8 +2483,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
4027     else
4028     res = qlt_24xx_build_ctio_pkt(&prm, vha);
4029    
4030     - if (unlikely(res != 0))
4031     + if (unlikely(res != 0)) {
4032     + vha->req->cnt += prm.req_cnt;
4033     goto out_unlock_free_unmap;
4034     + }
4035     +
4036     pkt = (struct ctio7_to_24xx *)prm.pkt;
4037     pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
4038     CTIO7_FLAGS_STATUS_MODE_0);
4039     @@ -2717,7 +2716,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
4040     static void qlt_send_term_exchange(struct scsi_qla_host *vha,
4041     struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
4042     {
4043     - unsigned long flags;
4044     + unsigned long flags = 0;
4045     int rc;
4046    
4047     if (qlt_issue_marker(vha, ha_locked) < 0)
4048     @@ -2733,17 +2732,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
4049     rc = __qlt_send_term_exchange(vha, cmd, atio);
4050     if (rc == -ENOMEM)
4051     qlt_alloc_qfull_cmd(vha, atio, 0, 0);
4052     - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4053    
4054     done:
4055     if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
4056     !cmd->cmd_sent_to_fw)) {
4057     - if (!ha_locked && !in_interrupt())
4058     - msleep(250); /* just in case */
4059     -
4060     - qlt_unmap_sg(vha, cmd);
4061     + if (cmd->sg_mapped)
4062     + qlt_unmap_sg(vha, cmd);
4063     vha->hw->tgt.tgt_ops->free_cmd(cmd);
4064     }
4065     +
4066     + if (!ha_locked)
4067     + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4068     +
4069     return;
4070     }
4071    
4072     @@ -3347,6 +3347,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4073     cmd->loop_id = sess->loop_id;
4074     cmd->conf_compl_supported = sess->conf_compl_supported;
4075    
4076     + cmd->cmd_flags = 0;
4077     + cmd->jiffies_at_alloc = get_jiffies_64();
4078     +
4079     + cmd->reset_count = vha->hw->chip_reset;
4080     +
4081     return cmd;
4082     }
4083    
4084     @@ -3453,11 +3458,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4085     return -ENOMEM;
4086     }
4087    
4088     - cmd->cmd_flags = 0;
4089     - cmd->jiffies_at_alloc = get_jiffies_64();
4090     -
4091     - cmd->reset_count = vha->hw->chip_reset;
4092     -
4093     cmd->cmd_in_wq = 1;
4094     cmd->cmd_flags |= BIT_0;
4095     INIT_WORK(&cmd->work, qlt_do_work);
4096     diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4097     index c95a4e943fc6..59c31bf88d92 100644
4098     --- a/drivers/scsi/scsi_error.c
4099     +++ b/drivers/scsi/scsi_error.c
4100     @@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
4101     scmd->sdb.length);
4102     scmd->sdb.table.sgl = &ses->sense_sgl;
4103     scmd->sc_data_direction = DMA_FROM_DEVICE;
4104     - scmd->sdb.table.nents = 1;
4105     + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
4106     scmd->cmnd[0] = REQUEST_SENSE;
4107     scmd->cmnd[4] = scmd->sdb.length;
4108     scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
4109     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
4110     index b1a263137a23..448ebdaa3d69 100644
4111     --- a/drivers/scsi/scsi_lib.c
4112     +++ b/drivers/scsi/scsi_lib.c
4113     @@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
4114    
4115     static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
4116     {
4117     - if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
4118     + if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
4119     return;
4120     __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
4121     }
4122     @@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
4123    
4124     if (mq) {
4125     if (nents <= SCSI_MAX_SG_SEGMENTS) {
4126     - sdb->table.nents = nents;
4127     - sg_init_table(sdb->table.sgl, sdb->table.nents);
4128     + sdb->table.nents = sdb->table.orig_nents = nents;
4129     + sg_init_table(sdb->table.sgl, nents);
4130     return 0;
4131     }
4132     first_chunk = sdb->table.sgl;
4133     diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
4134     index 1ac38e73df7e..9ad41168d26d 100644
4135     --- a/drivers/scsi/scsi_sysfs.c
4136     +++ b/drivers/scsi/scsi_sysfs.c
4137     @@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
4138    
4139     depth = simple_strtoul(buf, NULL, 0);
4140    
4141     - if (depth < 1 || depth > sht->can_queue)
4142     + if (depth < 1 || depth > sdev->host->can_queue)
4143     return -EINVAL;
4144    
4145     retval = sht->change_queue_depth(sdev, depth);
4146     diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
4147     index 9a1c34205254..525ab4c1f306 100644
4148     --- a/drivers/scsi/st.c
4149     +++ b/drivers/scsi/st.c
4150     @@ -1274,9 +1274,9 @@ static int st_open(struct inode *inode, struct file *filp)
4151     spin_lock(&st_use_lock);
4152     STp->in_use = 0;
4153     spin_unlock(&st_use_lock);
4154     - scsi_tape_put(STp);
4155     if (resumed)
4156     scsi_autopm_put_device(STp->device);
4157     + scsi_tape_put(STp);
4158     return retval;
4159    
4160     }
4161     diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
4162     index 788e2b176a4f..acce90ac7371 100644
4163     --- a/drivers/spi/spi-img-spfi.c
4164     +++ b/drivers/spi/spi-img-spfi.c
4165     @@ -40,6 +40,7 @@
4166     #define SPFI_CONTROL_SOFT_RESET BIT(11)
4167     #define SPFI_CONTROL_SEND_DMA BIT(10)
4168     #define SPFI_CONTROL_GET_DMA BIT(9)
4169     +#define SPFI_CONTROL_SE BIT(8)
4170     #define SPFI_CONTROL_TMODE_SHIFT 5
4171     #define SPFI_CONTROL_TMODE_MASK 0x7
4172     #define SPFI_CONTROL_TMODE_SINGLE 0
4173     @@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
4174     else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
4175     xfer->rx_nbits == SPI_NBITS_QUAD)
4176     val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
4177     + val |= SPFI_CONTROL_SE;
4178     spfi_writel(spfi, val, SPFI_CONTROL);
4179     }
4180    
4181     diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
4182     index f08e812b2984..412b9c86b997 100644
4183     --- a/drivers/spi/spi-imx.c
4184     +++ b/drivers/spi/spi-imx.c
4185     @@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
4186     {
4187     struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
4188    
4189     - if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
4190     - && (transfer->len > spi_imx->tx_wml))
4191     + if (spi_imx->dma_is_inited
4192     + && transfer->len > spi_imx->rx_wml * sizeof(u32)
4193     + && transfer->len > spi_imx->tx_wml * sizeof(u32))
4194     return true;
4195     return false;
4196     }
4197     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
4198     index 74e6114ff18f..305a5cbc099a 100644
4199     --- a/drivers/target/iscsi/iscsi_target.c
4200     +++ b/drivers/target/iscsi/iscsi_target.c
4201     @@ -4001,7 +4001,13 @@ get_immediate:
4202     }
4203    
4204     transport_err:
4205     - iscsit_take_action_for_connection_exit(conn);
4206     + /*
4207     + * Avoid the normal connection failure code-path if this connection
4208     + * is still within LOGIN mode, and iscsi_np process context is
4209     + * responsible for cleaning up the early connection failure.
4210     + */
4211     + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4212     + iscsit_take_action_for_connection_exit(conn);
4213     out:
4214     return 0;
4215     }
4216     @@ -4093,7 +4099,7 @@ reject:
4217    
4218     int iscsi_target_rx_thread(void *arg)
4219     {
4220     - int ret;
4221     + int ret, rc;
4222     u8 buffer[ISCSI_HDR_LEN], opcode;
4223     u32 checksum = 0, digest = 0;
4224     struct iscsi_conn *conn = arg;
4225     @@ -4103,10 +4109,16 @@ int iscsi_target_rx_thread(void *arg)
4226     * connection recovery / failure event can be triggered externally.
4227     */
4228     allow_signal(SIGINT);
4229     + /*
4230     + * Wait for iscsi_post_login_handler() to complete before allowing
4231     + * incoming iscsi/tcp socket I/O, and/or failing the connection.
4232     + */
4233     + rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4234     + if (rc < 0)
4235     + return 0;
4236    
4237     if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4238     struct completion comp;
4239     - int rc;
4240    
4241     init_completion(&comp);
4242     rc = wait_for_completion_interruptible(&comp);
4243     @@ -4543,7 +4555,18 @@ static void iscsit_logout_post_handler_closesession(
4244     struct iscsi_conn *conn)
4245     {
4246     struct iscsi_session *sess = conn->sess;
4247     - int sleep = cmpxchg(&conn->tx_thread_active, true, false);
4248     + int sleep = 1;
4249     + /*
4250     + * Traditional iscsi/tcp will invoke this logic from TX thread
4251     + * context during session logout, so clear tx_thread_active and
4252     + * sleep if iscsit_close_connection() has not already occured.
4253     + *
4254     + * Since iser-target invokes this logic from it's own workqueue,
4255     + * always sleep waiting for RX/TX thread shutdown to complete
4256     + * within iscsit_close_connection().
4257     + */
4258     + if (conn->conn_transport->transport_type == ISCSI_TCP)
4259     + sleep = cmpxchg(&conn->tx_thread_active, true, false);
4260    
4261     atomic_set(&conn->conn_logout_remove, 0);
4262     complete(&conn->conn_logout_comp);
4263     @@ -4557,7 +4580,10 @@ static void iscsit_logout_post_handler_closesession(
4264     static void iscsit_logout_post_handler_samecid(
4265     struct iscsi_conn *conn)
4266     {
4267     - int sleep = cmpxchg(&conn->tx_thread_active, true, false);
4268     + int sleep = 1;
4269     +
4270     + if (conn->conn_transport->transport_type == ISCSI_TCP)
4271     + sleep = cmpxchg(&conn->tx_thread_active, true, false);
4272    
4273     atomic_set(&conn->conn_logout_remove, 0);
4274     complete(&conn->conn_logout_comp);
4275     @@ -4776,6 +4802,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4276     struct iscsi_session *sess;
4277     struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4278     struct se_session *se_sess, *se_sess_tmp;
4279     + LIST_HEAD(free_list);
4280     int session_count = 0;
4281    
4282     spin_lock_bh(&se_tpg->session_lock);
4283     @@ -4797,14 +4824,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4284     }
4285     atomic_set(&sess->session_reinstatement, 1);
4286     spin_unlock(&sess->conn_lock);
4287     - spin_unlock_bh(&se_tpg->session_lock);
4288    
4289     - iscsit_free_session(sess);
4290     - spin_lock_bh(&se_tpg->session_lock);
4291     + list_move_tail(&se_sess->sess_list, &free_list);
4292     + }
4293     + spin_unlock_bh(&se_tpg->session_lock);
4294     +
4295     + list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4296     + sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4297    
4298     + iscsit_free_session(sess);
4299     session_count++;
4300     }
4301     - spin_unlock_bh(&se_tpg->session_lock);
4302    
4303     pr_debug("Released %d iSCSI Session(s) from Target Portal"
4304     " Group: %hu\n", session_count, tpg->tpgt);
4305     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
4306     index 70d799dfab03..c3bccaddb592 100644
4307     --- a/drivers/target/iscsi/iscsi_target_login.c
4308     +++ b/drivers/target/iscsi/iscsi_target_login.c
4309     @@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
4310     init_completion(&conn->conn_logout_comp);
4311     init_completion(&conn->rx_half_close_comp);
4312     init_completion(&conn->tx_half_close_comp);
4313     + init_completion(&conn->rx_login_comp);
4314     spin_lock_init(&conn->cmd_lock);
4315     spin_lock_init(&conn->conn_usage_lock);
4316     spin_lock_init(&conn->immed_queue_lock);
4317     @@ -699,7 +700,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
4318     iscsit_start_nopin_timer(conn);
4319     }
4320    
4321     -static int iscsit_start_kthreads(struct iscsi_conn *conn)
4322     +int iscsit_start_kthreads(struct iscsi_conn *conn)
4323     {
4324     int ret = 0;
4325    
4326     @@ -734,6 +735,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
4327    
4328     return 0;
4329     out_tx:
4330     + send_sig(SIGINT, conn->tx_thread, 1);
4331     kthread_stop(conn->tx_thread);
4332     conn->tx_thread_active = false;
4333     out_bitmap:
4334     @@ -744,7 +746,7 @@ out_bitmap:
4335     return ret;
4336     }
4337    
4338     -int iscsi_post_login_handler(
4339     +void iscsi_post_login_handler(
4340     struct iscsi_np *np,
4341     struct iscsi_conn *conn,
4342     u8 zero_tsih)
4343     @@ -754,7 +756,6 @@ int iscsi_post_login_handler(
4344     struct se_session *se_sess = sess->se_sess;
4345     struct iscsi_portal_group *tpg = sess->tpg;
4346     struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4347     - int rc;
4348    
4349     iscsit_inc_conn_usage_count(conn);
4350    
4351     @@ -795,10 +796,6 @@ int iscsi_post_login_handler(
4352     sess->sess_ops->InitiatorName);
4353     spin_unlock_bh(&sess->conn_lock);
4354    
4355     - rc = iscsit_start_kthreads(conn);
4356     - if (rc)
4357     - return rc;
4358     -
4359     iscsi_post_login_start_timers(conn);
4360     /*
4361     * Determine CPU mask to ensure connection's RX and TX kthreads
4362     @@ -807,15 +804,20 @@ int iscsi_post_login_handler(
4363     iscsit_thread_get_cpumask(conn);
4364     conn->conn_rx_reset_cpumask = 1;
4365     conn->conn_tx_reset_cpumask = 1;
4366     -
4367     + /*
4368     + * Wakeup the sleeping iscsi_target_rx_thread() now that
4369     + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
4370     + */
4371     + complete(&conn->rx_login_comp);
4372     iscsit_dec_conn_usage_count(conn);
4373     +
4374     if (stop_timer) {
4375     spin_lock_bh(&se_tpg->session_lock);
4376     iscsit_stop_time2retain_timer(sess);
4377     spin_unlock_bh(&se_tpg->session_lock);
4378     }
4379     iscsit_dec_session_usage_count(sess);
4380     - return 0;
4381     + return;
4382     }
4383    
4384     iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
4385     @@ -856,10 +858,6 @@ int iscsi_post_login_handler(
4386     " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
4387     spin_unlock_bh(&se_tpg->session_lock);
4388    
4389     - rc = iscsit_start_kthreads(conn);
4390     - if (rc)
4391     - return rc;
4392     -
4393     iscsi_post_login_start_timers(conn);
4394     /*
4395     * Determine CPU mask to ensure connection's RX and TX kthreads
4396     @@ -868,10 +866,12 @@ int iscsi_post_login_handler(
4397     iscsit_thread_get_cpumask(conn);
4398     conn->conn_rx_reset_cpumask = 1;
4399     conn->conn_tx_reset_cpumask = 1;
4400     -
4401     + /*
4402     + * Wakeup the sleeping iscsi_target_rx_thread() now that
4403     + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
4404     + */
4405     + complete(&conn->rx_login_comp);
4406     iscsit_dec_conn_usage_count(conn);
4407     -
4408     - return 0;
4409     }
4410    
4411     static void iscsi_handle_login_thread_timeout(unsigned long data)
4412     @@ -1436,23 +1436,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
4413     if (ret < 0)
4414     goto new_sess_out;
4415    
4416     - if (!conn->sess) {
4417     - pr_err("struct iscsi_conn session pointer is NULL!\n");
4418     - goto new_sess_out;
4419     - }
4420     -
4421     iscsi_stop_login_thread_timer(np);
4422    
4423     - if (signal_pending(current))
4424     - goto new_sess_out;
4425     -
4426     if (ret == 1) {
4427     tpg_np = conn->tpg_np;
4428    
4429     - ret = iscsi_post_login_handler(np, conn, zero_tsih);
4430     - if (ret < 0)
4431     - goto new_sess_out;
4432     -
4433     + iscsi_post_login_handler(np, conn, zero_tsih);
4434     iscsit_deaccess_np(np, tpg, tpg_np);
4435     }
4436    
4437     diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
4438     index 29d098324b7f..55cbf4533544 100644
4439     --- a/drivers/target/iscsi/iscsi_target_login.h
4440     +++ b/drivers/target/iscsi/iscsi_target_login.h
4441     @@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
4442     extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
4443     extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
4444     extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
4445     -extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
4446     +extern int iscsit_start_kthreads(struct iscsi_conn *);
4447     +extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
4448     extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
4449     bool, bool);
4450     extern int iscsi_target_login_thread(void *);
4451     diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
4452     index 8c02fa34716f..f9cde9141836 100644
4453     --- a/drivers/target/iscsi/iscsi_target_nego.c
4454     +++ b/drivers/target/iscsi/iscsi_target_nego.c
4455     @@ -17,6 +17,7 @@
4456     ******************************************************************************/
4457    
4458     #include <linux/ctype.h>
4459     +#include <linux/kthread.h>
4460     #include <scsi/iscsi_proto.h>
4461     #include <target/target_core_base.h>
4462     #include <target/target_core_fabric.h>
4463     @@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
4464     ntohl(login_rsp->statsn), login->rsp_length);
4465    
4466     padding = ((-login->rsp_length) & 3);
4467     + /*
4468     + * Before sending the last login response containing the transition
4469     + * bit for full-feature-phase, go ahead and start up TX/RX threads
4470     + * now to avoid potential resource allocation failures after the
4471     + * final login response has been sent.
4472     + */
4473     + if (login->login_complete) {
4474     + int rc = iscsit_start_kthreads(conn);
4475     + if (rc) {
4476     + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
4477     + ISCSI_LOGIN_STATUS_NO_RESOURCES);
4478     + return -1;
4479     + }
4480     + }
4481    
4482     if (conn->conn_transport->iscsit_put_login_tx(conn, login,
4483     login->rsp_length + padding) < 0)
4484     - return -1;
4485     + goto err;
4486    
4487     login->rsp_length = 0;
4488     mutex_lock(&sess->cmdsn_mutex);
4489     @@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
4490     mutex_unlock(&sess->cmdsn_mutex);
4491    
4492     return 0;
4493     +
4494     +err:
4495     + if (login->login_complete) {
4496     + if (conn->rx_thread && conn->rx_thread_active) {
4497     + send_sig(SIGINT, conn->rx_thread, 1);
4498     + kthread_stop(conn->rx_thread);
4499     + }
4500     + if (conn->tx_thread && conn->tx_thread_active) {
4501     + send_sig(SIGINT, conn->tx_thread, 1);
4502     + kthread_stop(conn->tx_thread);
4503     + }
4504     + spin_lock(&iscsit_global->ts_bitmap_lock);
4505     + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4506     + get_order(1));
4507     + spin_unlock(&iscsit_global->ts_bitmap_lock);
4508     + }
4509     + return -1;
4510     }
4511    
4512     static void iscsi_target_sk_data_ready(struct sock *sk)
4513     diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
4514     index 396344cb011f..16ed0b6c7f9c 100644
4515     --- a/drivers/tty/n_tty.c
4516     +++ b/drivers/tty/n_tty.c
4517     @@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
4518     * Locking: ctrl_lock
4519     */
4520    
4521     -static void isig(int sig, struct tty_struct *tty)
4522     +static void __isig(int sig, struct tty_struct *tty)
4523     {
4524     - struct n_tty_data *ldata = tty->disc_data;
4525     struct pid *tty_pgrp = tty_get_pgrp(tty);
4526     if (tty_pgrp) {
4527     kill_pgrp(tty_pgrp, sig, 1);
4528     put_pid(tty_pgrp);
4529     }
4530     +}
4531    
4532     - if (!L_NOFLSH(tty)) {
4533     +static void isig(int sig, struct tty_struct *tty)
4534     +{
4535     + struct n_tty_data *ldata = tty->disc_data;
4536     +
4537     + if (L_NOFLSH(tty)) {
4538     + /* signal only */
4539     + __isig(sig, tty);
4540     +
4541     + } else { /* signal and flush */
4542     up_read(&tty->termios_rwsem);
4543     down_write(&tty->termios_rwsem);
4544    
4545     + __isig(sig, tty);
4546     +
4547     /* clear echo buffer */
4548     mutex_lock(&ldata->output_lock);
4549     ldata->echo_head = ldata->echo_tail = 0;
4550     diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
4551     index 88250395b0ce..01aa52f574e5 100644
4552     --- a/drivers/tty/serial/imx.c
4553     +++ b/drivers/tty/serial/imx.c
4554     @@ -1132,11 +1132,6 @@ static int imx_startup(struct uart_port *port)
4555     while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
4556     udelay(1);
4557    
4558     - /* Can we enable the DMA support? */
4559     - if (is_imx6q_uart(sport) && !uart_console(port) &&
4560     - !sport->dma_is_inited)
4561     - imx_uart_dma_init(sport);
4562     -
4563     spin_lock_irqsave(&sport->port.lock, flags);
4564    
4565     /*
4566     @@ -1145,9 +1140,6 @@ static int imx_startup(struct uart_port *port)
4567     writel(USR1_RTSD, sport->port.membase + USR1);
4568     writel(USR2_ORE, sport->port.membase + USR2);
4569    
4570     - if (sport->dma_is_inited && !sport->dma_is_enabled)
4571     - imx_enable_dma(sport);
4572     -
4573     temp = readl(sport->port.membase + UCR1);
4574     temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
4575    
4576     @@ -1318,6 +1310,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
4577     } else {
4578     ucr2 |= UCR2_CTSC;
4579     }
4580     +
4581     + /* Can we enable the DMA support? */
4582     + if (is_imx6q_uart(sport) && !uart_console(port)
4583     + && !sport->dma_is_inited)
4584     + imx_uart_dma_init(sport);
4585     } else {
4586     termios->c_cflag &= ~CRTSCTS;
4587     }
4588     @@ -1434,6 +1431,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
4589     if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
4590     imx_enable_ms(&sport->port);
4591    
4592     + if (sport->dma_is_inited && !sport->dma_is_enabled)
4593     + imx_enable_dma(sport);
4594     spin_unlock_irqrestore(&sport->port.lock, flags);
4595     }
4596    
4597     diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
4598     index 0b7bb12dfc68..ec540445bb71 100644
4599     --- a/drivers/tty/serial/serial_core.c
4600     +++ b/drivers/tty/serial/serial_core.c
4601     @@ -1409,7 +1409,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
4602     mutex_lock(&port->mutex);
4603     uart_shutdown(tty, state);
4604     tty_port_tty_set(port, NULL);
4605     - tty->closing = 0;
4606     +
4607     spin_lock_irqsave(&port->lock, flags);
4608    
4609     if (port->blocked_open) {
4610     @@ -1435,6 +1435,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
4611     mutex_unlock(&port->mutex);
4612    
4613     tty_ldisc_flush(tty);
4614     + tty->closing = 0;
4615     }
4616    
4617     static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
4618     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
4619     index 0827d7c96527..ee07ba41c8db 100644
4620     --- a/drivers/usb/host/xhci-hub.c
4621     +++ b/drivers/usb/host/xhci-hub.c
4622     @@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
4623     u32 pls = status_reg & PORT_PLS_MASK;
4624    
4625     /* resume state is a xHCI internal state.
4626     - * Do not report it to usb core.
4627     + * Do not report it to usb core, instead, pretend to be U3,
4628     + * thus usb core knows it's not ready for transfer
4629     */
4630     - if (pls == XDEV_RESUME)
4631     + if (pls == XDEV_RESUME) {
4632     + *status |= USB_SS_PORT_LS_U3;
4633     return;
4634     + }
4635    
4636     /* When the CAS bit is set then warm reset
4637     * should be performed on port
4638     @@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
4639     status |= USB_PORT_STAT_C_RESET << 16;
4640     /* USB3.0 only */
4641     if (hcd->speed == HCD_USB3) {
4642     - if ((raw_port_status & PORT_PLC))
4643     + /* Port link change with port in resume state should not be
4644     + * reported to usbcore, as this is an internal state to be
4645     + * handled by xhci driver. Reporting PLC to usbcore may
4646     + * cause usbcore clearing PLC first and port change event
4647     + * irq won't be generated.
4648     + */
4649     + if ((raw_port_status & PORT_PLC) &&
4650     + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
4651     status |= USB_PORT_STAT_C_LINK_STATE << 16;
4652     if ((raw_port_status & PORT_WRC))
4653     status |= USB_PORT_STAT_C_BH_RESET << 16;
4654     @@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
4655     spin_lock_irqsave(&xhci->lock, flags);
4656    
4657     if (hcd->self.root_hub->do_remote_wakeup) {
4658     - if (bus_state->resuming_ports) {
4659     + if (bus_state->resuming_ports || /* USB2 */
4660     + bus_state->port_remote_wakeup) { /* USB3 */
4661     spin_unlock_irqrestore(&xhci->lock, flags);
4662     - xhci_dbg(xhci, "suspend failed because "
4663     - "a port is resuming\n");
4664     + xhci_dbg(xhci, "suspend failed because a port is resuming\n");
4665     return -EBUSY;
4666     }
4667     }
4668     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4669     index 7d34cbfaf373..d095677a0702 100644
4670     --- a/drivers/usb/host/xhci-ring.c
4671     +++ b/drivers/usb/host/xhci-ring.c
4672     @@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
4673     usb_hcd_resume_root_hub(hcd);
4674     }
4675    
4676     + if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
4677     + bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
4678     +
4679     if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
4680     xhci_dbg(xhci, "port resume event for port %d\n", port_id);
4681    
4682     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4683     index 36bf089b708f..c502c2277aeb 100644
4684     --- a/drivers/usb/host/xhci.c
4685     +++ b/drivers/usb/host/xhci.c
4686     @@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
4687     return -EINVAL;
4688     }
4689    
4690     + if (virt_dev->tt_info)
4691     + old_active_eps = virt_dev->tt_info->active_eps;
4692     +
4693     if (virt_dev->udev != udev) {
4694     /* If the virt_dev and the udev does not match, this virt_dev
4695     * may belong to another udev.
4696     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4697     index 6977f8491fa7..0f26dd2697b6 100644
4698     --- a/drivers/usb/host/xhci.h
4699     +++ b/drivers/usb/host/xhci.h
4700     @@ -285,6 +285,7 @@ struct xhci_op_regs {
4701     #define XDEV_U0 (0x0 << 5)
4702     #define XDEV_U2 (0x2 << 5)
4703     #define XDEV_U3 (0x3 << 5)
4704     +#define XDEV_INACTIVE (0x6 << 5)
4705     #define XDEV_RESUME (0xf << 5)
4706     /* true: port has power (see HCC_PPC) */
4707     #define PORT_POWER (1 << 9)
4708     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4709     index caf188800c67..87898ca2ed17 100644
4710     --- a/drivers/usb/storage/unusual_devs.h
4711     +++ b/drivers/usb/storage/unusual_devs.h
4712     @@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
4713     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4714     US_FL_NO_READ_DISC_INFO ),
4715    
4716     +/* Reported by Oliver Neukum <oneukum@suse.com>
4717     + * This device morphes spontaneously into another device if the access
4718     + * pattern of Windows isn't followed. Thus writable media would be dirty
4719     + * if the initial instance is used. So the device is limited to its
4720     + * virtual CD.
4721     + * And yes, the concept that BCD goes up to 9 is not heeded */
4722     +UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
4723     + "ZTE,Incorporated",
4724     + "ZTE WCDMA Technologies MSM",
4725     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4726     + US_FL_SINGLE_LUN ),
4727     +
4728     /* Reported by Sven Geggus <sven-usbst@geggus.net>
4729     * This encrypted pen drive returns bogus data for the initial READ(10).
4730     */
4731     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
4732     index 2ee28266fd07..fa49d3294cd5 100644
4733     --- a/drivers/vhost/vhost.c
4734     +++ b/drivers/vhost/vhost.c
4735     @@ -886,6 +886,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
4736     }
4737     if (eventfp != d->log_file) {
4738     filep = d->log_file;
4739     + d->log_file = eventfp;
4740     ctx = d->log_ctx;
4741     d->log_ctx = eventfp ?
4742     eventfd_ctx_fileget(eventfp) : NULL;
4743     diff --git a/fs/dcache.c b/fs/dcache.c
4744     index 50bb3c207621..5d03eb0ec0ac 100644
4745     --- a/fs/dcache.c
4746     +++ b/fs/dcache.c
4747     @@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
4748    
4749     /*
4750     * If we have a d_op->d_delete() operation, we sould not
4751     - * let the dentry count go to zero, so use "put__or_lock".
4752     + * let the dentry count go to zero, so use "put_or_lock".
4753     */
4754     if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
4755     return lockref_put_or_lock(&dentry->d_lockref);
4756     @@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
4757     */
4758     smp_rmb();
4759     d_flags = ACCESS_ONCE(dentry->d_flags);
4760     - d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
4761     + d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
4762    
4763     /* Nothing to do? Dropping the reference was all we needed? */
4764     if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
4765     @@ -776,6 +776,9 @@ repeat:
4766     if (unlikely(d_unhashed(dentry)))
4767     goto kill_it;
4768    
4769     + if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
4770     + goto kill_it;
4771     +
4772     if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
4773     if (dentry->d_op->d_delete(dentry))
4774     goto kill_it;
4775     diff --git a/fs/namespace.c b/fs/namespace.c
4776     index 02c6875dd945..fce3cc1a3fa7 100644
4777     --- a/fs/namespace.c
4778     +++ b/fs/namespace.c
4779     @@ -1350,6 +1350,36 @@ enum umount_tree_flags {
4780     UMOUNT_PROPAGATE = 2,
4781     UMOUNT_CONNECTED = 4,
4782     };
4783     +
4784     +static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
4785     +{
4786     + /* Leaving mounts connected is only valid for lazy umounts */
4787     + if (how & UMOUNT_SYNC)
4788     + return true;
4789     +
4790     + /* A mount without a parent has nothing to be connected to */
4791     + if (!mnt_has_parent(mnt))
4792     + return true;
4793     +
4794     + /* Because the reference counting rules change when mounts are
4795     + * unmounted and connected, umounted mounts may not be
4796     + * connected to mounted mounts.
4797     + */
4798     + if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
4799     + return true;
4800     +
4801     + /* Has it been requested that the mount remain connected? */
4802     + if (how & UMOUNT_CONNECTED)
4803     + return false;
4804     +
4805     + /* Is the mount locked such that it needs to remain connected? */
4806     + if (IS_MNT_LOCKED(mnt))
4807     + return false;
4808     +
4809     + /* By default disconnect the mount */
4810     + return true;
4811     +}
4812     +
4813     /*
4814     * mount_lock must be held
4815     * namespace_sem must be held for write
4816     @@ -1387,10 +1417,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
4817     if (how & UMOUNT_SYNC)
4818     p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
4819    
4820     - disconnect = !(((how & UMOUNT_CONNECTED) &&
4821     - mnt_has_parent(p) &&
4822     - (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
4823     - IS_MNT_LOCKED_AND_LAZY(p));
4824     + disconnect = disconnect_mount(p, how);
4825    
4826     pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
4827     disconnect ? &unmounted : NULL);
4828     @@ -1527,11 +1554,8 @@ void __detach_mounts(struct dentry *dentry)
4829     while (!hlist_empty(&mp->m_list)) {
4830     mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
4831     if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
4832     - struct mount *p, *tmp;
4833     - list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
4834     - hlist_add_head(&p->mnt_umount.s_list, &unmounted);
4835     - umount_mnt(p);
4836     - }
4837     + hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
4838     + umount_mnt(mnt);
4839     }
4840     else umount_tree(mnt, UMOUNT_CONNECTED);
4841     }
4842     diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
4843     index f734562c6d24..5d25b9d97c29 100644
4844     --- a/fs/nfs/inode.c
4845     +++ b/fs/nfs/inode.c
4846     @@ -1242,9 +1242,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
4847     if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
4848     cur_size = i_size_read(inode);
4849     new_isize = nfs_size_to_loff_t(fattr->size);
4850     - if (cur_size != new_isize && nfsi->nrequests == 0)
4851     + if (cur_size != new_isize)
4852     invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
4853     }
4854     + if (nfsi->nrequests != 0)
4855     + invalid &= ~NFS_INO_REVAL_PAGECACHE;
4856    
4857     /* Have any file permissions changed? */
4858     if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
4859     @@ -1682,8 +1684,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
4860     invalid |= NFS_INO_INVALID_ATTR
4861     | NFS_INO_INVALID_DATA
4862     | NFS_INO_INVALID_ACCESS
4863     - | NFS_INO_INVALID_ACL
4864     - | NFS_INO_REVAL_PAGECACHE;
4865     + | NFS_INO_INVALID_ACL;
4866     if (S_ISDIR(inode->i_mode))
4867     nfs_force_lookup_revalidate(inode);
4868     inode->i_version = fattr->change_attr;
4869     @@ -1715,7 +1716,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
4870     if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
4871     i_size_write(inode, new_isize);
4872     invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
4873     - invalid &= ~NFS_INO_REVAL_PAGECACHE;
4874     }
4875     dprintk("NFS: isize change on server for file %s/%ld "
4876     "(%Ld to %Ld)\n",
4877     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4878     index 55e1e3af23a3..d3f205126609 100644
4879     --- a/fs/nfs/nfs4proc.c
4880     +++ b/fs/nfs/nfs4proc.c
4881     @@ -1204,12 +1204,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
4882    
4883     static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
4884     {
4885     + if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
4886     + return;
4887     if (state->n_wronly)
4888     set_bit(NFS_O_WRONLY_STATE, &state->flags);
4889     if (state->n_rdonly)
4890     set_bit(NFS_O_RDONLY_STATE, &state->flags);
4891     if (state->n_rdwr)
4892     set_bit(NFS_O_RDWR_STATE, &state->flags);
4893     + set_bit(NFS_OPEN_STATE, &state->flags);
4894     }
4895    
4896     static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
4897     diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
4898     index 282b39369510..7b4552678536 100644
4899     --- a/fs/nfs/pagelist.c
4900     +++ b/fs/nfs/pagelist.c
4901     @@ -1110,8 +1110,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
4902     nfs_list_remove_request(req);
4903     if (__nfs_pageio_add_request(desc, req))
4904     continue;
4905     - if (desc->pg_error < 0)
4906     + if (desc->pg_error < 0) {
4907     + list_splice_tail(&head, &mirror->pg_list);
4908     + mirror->pg_recoalesce = 1;
4909     return 0;
4910     + }
4911     break;
4912     }
4913     } while (mirror->pg_recoalesce);
4914     diff --git a/fs/pnode.h b/fs/pnode.h
4915     index 7114ce6e6b9e..0fcdbe7ca648 100644
4916     --- a/fs/pnode.h
4917     +++ b/fs/pnode.h
4918     @@ -20,8 +20,6 @@
4919     #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
4920     #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
4921     #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
4922     -#define IS_MNT_LOCKED_AND_LAZY(m) \
4923     - (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
4924    
4925     #define CL_EXPIRE 0x01
4926     #define CL_SLAVE 0x02
4927     diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
4928     index 20de88d1bf86..dd714037c322 100644
4929     --- a/fs/xfs/libxfs/xfs_attr_remote.c
4930     +++ b/fs/xfs/libxfs/xfs_attr_remote.c
4931     @@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
4932     struct xfs_buf *bp)
4933     {
4934     struct xfs_mount *mp = bp->b_target->bt_mount;
4935     - struct xfs_buf_log_item *bip = bp->b_fspriv;
4936     + int blksize = mp->m_attr_geo->blksize;
4937     char *ptr;
4938     int len;
4939     xfs_daddr_t bno;
4940     - int blksize = mp->m_attr_geo->blksize;
4941    
4942     /* no verification of non-crc buffers */
4943     if (!xfs_sb_version_hascrc(&mp->m_sb))
4944     @@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
4945     ASSERT(len >= blksize);
4946    
4947     while (len > 0) {
4948     + struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
4949     +
4950     if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
4951     xfs_buf_ioerror(bp, -EFSCORRUPTED);
4952     xfs_verifier_error(bp);
4953     return;
4954     }
4955     - if (bip) {
4956     - struct xfs_attr3_rmt_hdr *rmt;
4957    
4958     - rmt = (struct xfs_attr3_rmt_hdr *)ptr;
4959     - rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
4960     + /*
4961     + * Ensure we aren't writing bogus LSNs to disk. See
4962     + * xfs_attr3_rmt_hdr_set() for the explanation.
4963     + */
4964     + if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
4965     + xfs_buf_ioerror(bp, -EFSCORRUPTED);
4966     + xfs_verifier_error(bp);
4967     + return;
4968     }
4969     xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
4970    
4971     @@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
4972     rmt->rm_owner = cpu_to_be64(ino);
4973     rmt->rm_blkno = cpu_to_be64(bno);
4974    
4975     + /*
4976     + * Remote attribute blocks are written synchronously, so we don't
4977     + * have an LSN that we can stamp in them that makes any sense to log
4978     + * recovery. To ensure that log recovery handles overwrites of these
4979     + * blocks sanely (i.e. once they've been freed and reallocated as some
4980     + * other type of metadata) we need to ensure that the LSN has a value
4981     + * that tells log recovery to ignore the LSN and overwrite the buffer
4982     + * with whatever is in it's log. To do this, we use the magic
4983     + * NULLCOMMITLSN to indicate that the LSN is invalid.
4984     + */
4985     + rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
4986     +
4987     return sizeof(struct xfs_attr3_rmt_hdr);
4988     }
4989    
4990     @@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
4991    
4992     /*
4993     * Allocate a single extent, up to the size of the value.
4994     + *
4995     + * Note that we have to consider this a data allocation as we
4996     + * write the remote attribute without logging the contents.
4997     + * Hence we must ensure that we aren't using blocks that are on
4998     + * the busy list so that we don't overwrite blocks which have
4999     + * recently been freed but their transactions are not yet
5000     + * committed to disk. If we overwrite the contents of a busy
5001     + * extent and then crash then the block may not contain the
5002     + * correct metadata after log recovery occurs.
5003     */
5004     xfs_bmap_init(args->flist, args->firstblock);
5005     nmap = 1;
5006     error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
5007     - blkcnt,
5008     - XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
5009     - args->firstblock, args->total, &map, &nmap,
5010     - args->flist);
5011     + blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
5012     + args->total, &map, &nmap, args->flist);
5013     if (!error) {
5014     error = xfs_bmap_finish(&args->trans, args->flist,
5015     &committed);
5016     diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
5017     index 4f5784f85a5b..a5d03396dda0 100644
5018     --- a/fs/xfs/xfs_log_recover.c
5019     +++ b/fs/xfs/xfs_log_recover.c
5020     @@ -1887,9 +1887,14 @@ xlog_recover_get_buf_lsn(
5021     uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
5022     break;
5023     case XFS_ATTR3_RMT_MAGIC:
5024     - lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
5025     - uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
5026     - break;
5027     + /*
5028     + * Remote attr blocks are written synchronously, rather than
5029     + * being logged. That means they do not contain a valid LSN
5030     + * (i.e. transactionally ordered) in them, and hence any time we
5031     + * see a buffer to replay over the top of a remote attribute
5032     + * block we should simply do so.
5033     + */
5034     + goto recover_immediately;
5035     case XFS_SB_MAGIC:
5036     lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
5037     uuid = &((struct xfs_dsb *)blk)->sb_uuid;
5038     diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
5039     index b6a52a4b457a..51bb6532785c 100644
5040     --- a/include/linux/can/skb.h
5041     +++ b/include/linux/can/skb.h
5042     @@ -27,10 +27,12 @@
5043     /**
5044     * struct can_skb_priv - private additional data inside CAN sk_buffs
5045     * @ifindex: ifindex of the first interface the CAN frame appeared on
5046     + * @skbcnt: atomic counter to have an unique id together with skb pointer
5047     * @cf: align to the following CAN frame at skb->data
5048     */
5049     struct can_skb_priv {
5050     int ifindex;
5051     + int skbcnt;
5052     struct can_frame cf[0];
5053     };
5054    
5055     diff --git a/include/linux/cper.h b/include/linux/cper.h
5056     index 76abba4b238e..dcacb1a72e26 100644
5057     --- a/include/linux/cper.h
5058     +++ b/include/linux/cper.h
5059     @@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
5060     __u64 mm_reg_addr;
5061     };
5062    
5063     -/* Memory Error Section */
5064     +/* Old Memory Error Section UEFI 2.1, 2.2 */
5065     +struct cper_sec_mem_err_old {
5066     + __u64 validation_bits;
5067     + __u64 error_status;
5068     + __u64 physical_addr;
5069     + __u64 physical_addr_mask;
5070     + __u16 node;
5071     + __u16 card;
5072     + __u16 module;
5073     + __u16 bank;
5074     + __u16 device;
5075     + __u16 row;
5076     + __u16 column;
5077     + __u16 bit_pos;
5078     + __u64 requestor_id;
5079     + __u64 responder_id;
5080     + __u64 target_id;
5081     + __u8 error_type;
5082     +};
5083     +
5084     +/* Memory Error Section UEFI >= 2.3 */
5085     struct cper_sec_mem_err {
5086     __u64 validation_bits;
5087     __u64 error_status;
5088     diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
5089     index 1da602982cf9..6cd8c0ee4b6f 100644
5090     --- a/include/linux/ftrace.h
5091     +++ b/include/linux/ftrace.h
5092     @@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
5093     * SAVE_REGS. If another ops with this flag set is already registered
5094     * for any of the functions that this ops will be registered for, then
5095     * this ops will fail to register or set_filter_ip.
5096     + * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
5097     */
5098     enum {
5099     FTRACE_OPS_FL_ENABLED = 1 << 0,
5100     @@ -132,6 +133,7 @@ enum {
5101     FTRACE_OPS_FL_MODIFYING = 1 << 11,
5102     FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
5103     FTRACE_OPS_FL_IPMODIFY = 1 << 13,
5104     + FTRACE_OPS_FL_PID = 1 << 14,
5105     };
5106    
5107     #ifdef CONFIG_DYNAMIC_FTRACE
5108     @@ -159,6 +161,7 @@ struct ftrace_ops {
5109     struct ftrace_ops *next;
5110     unsigned long flags;
5111     void *private;
5112     + ftrace_func_t saved_func;
5113     int __percpu *disabled;
5114     #ifdef CONFIG_DYNAMIC_FTRACE
5115     int nr_trampolines;
5116     diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
5117     index 54e7af301888..73abbc54063d 100644
5118     --- a/include/target/iscsi/iscsi_target_core.h
5119     +++ b/include/target/iscsi/iscsi_target_core.h
5120     @@ -606,6 +606,7 @@ struct iscsi_conn {
5121     int bitmap_id;
5122     int rx_thread_active;
5123     struct task_struct *rx_thread;
5124     + struct completion rx_login_comp;
5125     int tx_thread_active;
5126     struct task_struct *tx_thread;
5127     /* list_head for session connection list */
5128     diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
5129     index 9065107f083e..7a5237a1bce5 100644
5130     --- a/kernel/irq/resend.c
5131     +++ b/kernel/irq/resend.c
5132     @@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
5133     !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
5134     #ifdef CONFIG_HARDIRQS_SW_RESEND
5135     /*
5136     - * If the interrupt has a parent irq and runs
5137     - * in the thread context of the parent irq,
5138     - * retrigger the parent.
5139     + * If the interrupt is running in the thread
5140     + * context of the parent irq we need to be
5141     + * careful, because we cannot trigger it
5142     + * directly.
5143     */
5144     - if (desc->parent_irq &&
5145     - irq_settings_is_nested_thread(desc))
5146     + if (irq_settings_is_nested_thread(desc)) {
5147     + /*
5148     + * If the parent_irq is valid, we
5149     + * retrigger the parent, otherwise we
5150     + * do nothing.
5151     + */
5152     + if (!desc->parent_irq)
5153     + return;
5154     irq = desc->parent_irq;
5155     + }
5156     /* Set it pending and activate the softirq: */
5157     set_bit(irq, irqs_resend);
5158     tasklet_schedule(&resend_tasklet);
5159     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
5160     index 02bece4a99ea..eb11011b5292 100644
5161     --- a/kernel/trace/ftrace.c
5162     +++ b/kernel/trace/ftrace.c
5163     @@ -98,6 +98,13 @@ struct ftrace_pid {
5164     struct pid *pid;
5165     };
5166    
5167     +static bool ftrace_pids_enabled(void)
5168     +{
5169     + return !list_empty(&ftrace_pids);
5170     +}
5171     +
5172     +static void ftrace_update_trampoline(struct ftrace_ops *ops);
5173     +
5174     /*
5175     * ftrace_disabled is set when an anomaly is discovered.
5176     * ftrace_disabled is much stronger than ftrace_enabled.
5177     @@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
5178     static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
5179     static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
5180     ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
5181     -ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
5182     static struct ftrace_ops global_ops;
5183     static struct ftrace_ops control_ops;
5184    
5185     @@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
5186     if (!test_tsk_trace_trace(current))
5187     return;
5188    
5189     - ftrace_pid_function(ip, parent_ip, op, regs);
5190     -}
5191     -
5192     -static void set_ftrace_pid_function(ftrace_func_t func)
5193     -{
5194     - /* do not set ftrace_pid_function to itself! */
5195     - if (func != ftrace_pid_func)
5196     - ftrace_pid_function = func;
5197     + op->saved_func(ip, parent_ip, op, regs);
5198     }
5199    
5200     /**
5201     @@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
5202     void clear_ftrace_function(void)
5203     {
5204     ftrace_trace_function = ftrace_stub;
5205     - ftrace_pid_function = ftrace_stub;
5206     }
5207    
5208     static void control_ops_disable_all(struct ftrace_ops *ops)
5209     @@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
5210     } else
5211     add_ftrace_ops(&ftrace_ops_list, ops);
5212    
5213     + /* Always save the function, and reset at unregistering */
5214     + ops->saved_func = ops->func;
5215     +
5216     + if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
5217     + ops->func = ftrace_pid_func;
5218     +
5219     ftrace_update_trampoline(ops);
5220    
5221     if (ftrace_enabled)
5222     @@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
5223     if (ftrace_enabled)
5224     update_ftrace_function();
5225    
5226     + ops->func = ops->saved_func;
5227     +
5228     return 0;
5229     }
5230    
5231     static void ftrace_update_pid_func(void)
5232     {
5233     + bool enabled = ftrace_pids_enabled();
5234     + struct ftrace_ops *op;
5235     +
5236     /* Only do something if we are tracing something */
5237     if (ftrace_trace_function == ftrace_stub)
5238     return;
5239    
5240     + do_for_each_ftrace_op(op, ftrace_ops_list) {
5241     + if (op->flags & FTRACE_OPS_FL_PID) {
5242     + op->func = enabled ? ftrace_pid_func :
5243     + op->saved_func;
5244     + ftrace_update_trampoline(op);
5245     + }
5246     + } while_for_each_ftrace_op(op);
5247     +
5248     update_ftrace_function();
5249     }
5250    
5251     @@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
5252     .local_hash.filter_hash = EMPTY_HASH,
5253     INIT_OPS_HASH(global_ops)
5254     .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5255     - FTRACE_OPS_FL_INITIALIZED,
5256     + FTRACE_OPS_FL_INITIALIZED |
5257     + FTRACE_OPS_FL_PID,
5258     };
5259    
5260     /*
5261     @@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5262    
5263     static struct ftrace_ops global_ops = {
5264     .func = ftrace_stub,
5265     - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5266     + .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5267     + FTRACE_OPS_FL_INITIALIZED |
5268     + FTRACE_OPS_FL_PID,
5269     };
5270    
5271     static int __init ftrace_nodyn_init(void)
5272     @@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5273     if (WARN_ON(tr->ops->func != ftrace_stub))
5274     printk("ftrace ops had %pS for function\n",
5275     tr->ops->func);
5276     - /* Only the top level instance does pid tracing */
5277     - if (!list_empty(&ftrace_pids)) {
5278     - set_ftrace_pid_function(func);
5279     - func = ftrace_pid_func;
5280     - }
5281     }
5282     tr->ops->func = func;
5283     tr->ops->private = tr;
5284     @@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5285     {
5286     mutex_lock(&ftrace_lock);
5287    
5288     - if (list_empty(&ftrace_pids) && (!*pos))
5289     + if (!ftrace_pids_enabled() && (!*pos))
5290     return (void *) 1;
5291    
5292     return seq_list_start(&ftrace_pids, *pos);
5293     @@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5294     .func = ftrace_stub,
5295     .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5296     FTRACE_OPS_FL_INITIALIZED |
5297     + FTRACE_OPS_FL_PID |
5298     FTRACE_OPS_FL_STUB,
5299     #ifdef FTRACE_GRAPH_TRAMP_ADDR
5300     .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5301     diff --git a/lib/dma-debug.c b/lib/dma-debug.c
5302     index ae4b65e17e64..dace71fe41f7 100644
5303     --- a/lib/dma-debug.c
5304     +++ b/lib/dma-debug.c
5305     @@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
5306     unsigned long flags;
5307     phys_addr_t cln;
5308    
5309     + if (dma_debug_disabled())
5310     + return;
5311     +
5312     if (!page)
5313     return;
5314    
5315     diff --git a/net/can/af_can.c b/net/can/af_can.c
5316     index 689c818ed007..62c635f2bcfc 100644
5317     --- a/net/can/af_can.c
5318     +++ b/net/can/af_can.c
5319     @@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* timer for statistics update */
5320     struct s_stats can_stats; /* packet statistics */
5321     struct s_pstats can_pstats; /* receive list statistics */
5322    
5323     +static atomic_t skbcounter = ATOMIC_INIT(0);
5324     +
5325     /*
5326     * af_can socket functions
5327     */
5328     @@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
5329     return err;
5330     }
5331    
5332     - if (newskb) {
5333     - if (!(newskb->tstamp.tv64))
5334     - __net_timestamp(newskb);
5335     -
5336     + if (newskb)
5337     netif_rx_ni(newskb);
5338     - }
5339    
5340     /* update statistics */
5341     can_stats.tx_frames++;
5342     @@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
5343     can_stats.rx_frames++;
5344     can_stats.rx_frames_delta++;
5345    
5346     + /* create non-zero unique skb identifier together with *skb */
5347     + while (!(can_skb_prv(skb)->skbcnt))
5348     + can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
5349     +
5350     rcu_read_lock();
5351    
5352     /* deliver the packet to sockets listening on all devices */
5353     diff --git a/net/can/bcm.c b/net/can/bcm.c
5354     index b523453585be..a1ba6875c2a2 100644
5355     --- a/net/can/bcm.c
5356     +++ b/net/can/bcm.c
5357     @@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
5358    
5359     can_skb_reserve(skb);
5360     can_skb_prv(skb)->ifindex = dev->ifindex;
5361     + can_skb_prv(skb)->skbcnt = 0;
5362    
5363     memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
5364    
5365     @@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
5366     }
5367    
5368     can_skb_prv(skb)->ifindex = dev->ifindex;
5369     + can_skb_prv(skb)->skbcnt = 0;
5370     skb->dev = dev;
5371     can_skb_set_owner(skb, sk);
5372     err = can_send(skb, 1); /* send with loopback */
5373     diff --git a/net/can/raw.c b/net/can/raw.c
5374     index 31b9748cbb4e..2e67b1423cd3 100644
5375     --- a/net/can/raw.c
5376     +++ b/net/can/raw.c
5377     @@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
5378     */
5379    
5380     struct uniqframe {
5381     - ktime_t tstamp;
5382     + int skbcnt;
5383     const struct sk_buff *skb;
5384     unsigned int join_rx_count;
5385     };
5386     @@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
5387    
5388     /* eliminate multiple filter matches for the same skb */
5389     if (this_cpu_ptr(ro->uniq)->skb == oskb &&
5390     - ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
5391     + this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
5392     if (ro->join_filters) {
5393     this_cpu_inc(ro->uniq->join_rx_count);
5394     /* drop frame until all enabled filters matched */
5395     @@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
5396     }
5397     } else {
5398     this_cpu_ptr(ro->uniq)->skb = oskb;
5399     - this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
5400     + this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
5401     this_cpu_ptr(ro->uniq)->join_rx_count = 1;
5402     /* drop first frame to check all enabled filters? */
5403     if (ro->join_filters && ro->count > 1)
5404     @@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
5405    
5406     can_skb_reserve(skb);
5407     can_skb_prv(skb)->ifindex = dev->ifindex;
5408     + can_skb_prv(skb)->skbcnt = 0;
5409    
5410     err = memcpy_from_msg(skb_put(skb, size), msg, size);
5411     if (err < 0)
5412     diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
5413     index 29236e832e44..c09c0131bfa2 100644
5414     --- a/net/mac80211/debugfs_netdev.c
5415     +++ b/net/mac80211/debugfs_netdev.c
5416     @@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
5417    
5418     debugfs_remove_recursive(sdata->vif.debugfs_dir);
5419     sdata->vif.debugfs_dir = NULL;
5420     + sdata->debugfs.subdir_stations = NULL;
5421     }
5422    
5423     void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
5424     diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
5425     index 273b8bff6ba4..657ba9f5d308 100644
5426     --- a/net/rds/ib_rdma.c
5427     +++ b/net/rds/ib_rdma.c
5428     @@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
5429     }
5430    
5431     ibmr = rds_ib_alloc_fmr(rds_ibdev);
5432     - if (IS_ERR(ibmr))
5433     + if (IS_ERR(ibmr)) {
5434     + rds_ib_dev_put(rds_ibdev);
5435     return ibmr;
5436     + }
5437    
5438     ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
5439     if (ret == 0)
5440     diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
5441     index d126c03361ae..75888dd38a7f 100644
5442     --- a/sound/core/pcm_native.c
5443     +++ b/sound/core/pcm_native.c
5444     @@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
5445     void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
5446     {
5447     if (substream->pcm->nonatomic) {
5448     - down_read(&snd_pcm_link_rwsem);
5449     + down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
5450     mutex_lock(&substream->self_group.mutex);
5451     } else {
5452     read_lock(&snd_pcm_link_rwlock);
5453     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5454     index c403dd10d126..44dfc7b92bc3 100644
5455     --- a/sound/pci/hda/hda_intel.c
5456     +++ b/sound/pci/hda/hda_intel.c
5457     @@ -2056,6 +2056,8 @@ static const struct pci_device_id azx_ids[] = {
5458     /* ATI HDMI */
5459     { PCI_DEVICE(0x1002, 0x1308),
5460     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5461     + { PCI_DEVICE(0x1002, 0x157a),
5462     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5463     { PCI_DEVICE(0x1002, 0x793b),
5464     .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
5465     { PCI_DEVICE(0x1002, 0x7919),
5466     @@ -2110,8 +2112,14 @@ static const struct pci_device_id azx_ids[] = {
5467     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5468     { PCI_DEVICE(0x1002, 0xaab0),
5469     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5470     + { PCI_DEVICE(0x1002, 0xaac0),
5471     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5472     { PCI_DEVICE(0x1002, 0xaac8),
5473     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5474     + { PCI_DEVICE(0x1002, 0xaad8),
5475     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5476     + { PCI_DEVICE(0x1002, 0xaae8),
5477     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5478     /* VIA VT8251/VT8237A */
5479     { PCI_DEVICE(0x1106, 0x3288),
5480     .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
5481     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
5482     index 5f44f60a6389..225b78b4ef12 100644
5483     --- a/sound/pci/hda/patch_hdmi.c
5484     +++ b/sound/pci/hda/patch_hdmi.c
5485     @@ -3333,6 +3333,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
5486     { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
5487     { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
5488     { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
5489     +{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi },
5490     { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
5491     { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
5492     { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
5493     @@ -3396,6 +3397,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
5494     MODULE_ALIAS("snd-hda-codec-id:10de0070");
5495     MODULE_ALIAS("snd-hda-codec-id:10de0071");
5496     MODULE_ALIAS("snd-hda-codec-id:10de0072");
5497     +MODULE_ALIAS("snd-hda-codec-id:10de007d");
5498     MODULE_ALIAS("snd-hda-codec-id:10de8001");
5499     MODULE_ALIAS("snd-hda-codec-id:11069f80");
5500     MODULE_ALIAS("snd-hda-codec-id:11069f81");
5501     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5502     index 0e75998db39f..590bcfb0e82f 100644
5503     --- a/sound/pci/hda/patch_realtek.c
5504     +++ b/sound/pci/hda/patch_realtek.c
5505     @@ -2224,7 +2224,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5506     SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
5507     SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
5508     SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
5509     - SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
5510     + SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
5511    
5512     SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
5513     SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
5514     @@ -5004,7 +5004,7 @@ static const struct hda_fixup alc269_fixups[] = {
5515     { 0x14, 0x90170110 },
5516     { 0x17, 0x40000008 },
5517     { 0x18, 0x411111f0 },
5518     - { 0x19, 0x411111f0 },
5519     + { 0x19, 0x01a1913c },
5520     { 0x1a, 0x411111f0 },
5521     { 0x1b, 0x411111f0 },
5522     { 0x1d, 0x40f89b2d },
5523     @@ -5114,6 +5114,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5524     SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5525     SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5526     SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5527     + SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5528     SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5529     SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5530     SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5531     @@ -5382,6 +5383,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5532     {0x1d, 0x40700001},
5533     {0x21, 0x02211030}),
5534     SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5535     + {0x12, 0x40000000},
5536     + {0x14, 0x90170130},
5537     + {0x17, 0x411111f0},
5538     + {0x18, 0x411111f0},
5539     + {0x19, 0x411111f0},
5540     + {0x1a, 0x411111f0},
5541     + {0x1b, 0x01014020},
5542     + {0x1d, 0x4054c029},
5543     + {0x1e, 0x411111f0},
5544     + {0x21, 0x0221103f}),
5545     + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5546     {0x12, 0x90a60160},
5547     {0x14, 0x90170120},
5548     {0x17, 0x90170140},
5549     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5550     index 6c66d7e16439..25f0f45e6640 100644
5551     --- a/sound/pci/hda/patch_sigmatel.c
5552     +++ b/sound/pci/hda/patch_sigmatel.c
5553     @@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
5554     SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
5555     "HP Mini", STAC_92HD83XXX_HP_LED),
5556     SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
5557     - SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
5558     + /* match both for 0xfa91 and 0xfa93 */
5559     + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
5560     "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
5561     {} /* terminator */
5562     };
5563     diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
5564     index 8461d6bf992f..204cc074adb9 100644
5565     --- a/sound/usb/line6/pcm.c
5566     +++ b/sound/usb/line6/pcm.c
5567     @@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
5568     int ret = 0;
5569    
5570     spin_lock_irqsave(&pstr->lock, flags);
5571     - if (!test_and_set_bit(type, &pstr->running)) {
5572     - if (pstr->active_urbs || pstr->unlink_urbs) {
5573     - ret = -EBUSY;
5574     - goto error;
5575     - }
5576     -
5577     + if (!test_and_set_bit(type, &pstr->running) &&
5578     + !(pstr->active_urbs || pstr->unlink_urbs)) {
5579     pstr->count = 0;
5580     /* Submit all currently available URBs */
5581     if (direction == SNDRV_PCM_STREAM_PLAYBACK)
5582     @@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
5583     else
5584     ret = line6_submit_audio_in_all_urbs(line6pcm);
5585     }
5586     - error:
5587     if (ret < 0)
5588     clear_bit(type, &pstr->running);
5589     spin_unlock_irqrestore(&pstr->lock, flags);
5590     diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
5591     index e5000da9e9d7..6a803eff87f7 100644
5592     --- a/sound/usb/mixer_maps.c
5593     +++ b/sound/usb/mixer_maps.c
5594     @@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
5595     { 0 }
5596     };
5597    
5598     +/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
5599     +static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
5600     +static struct usbmix_name_map bose_companion5_map[] = {
5601     + { 3, NULL, .dB = &bose_companion5_dB },
5602     + { 0 } /* terminator */
5603     +};
5604     +
5605     +/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
5606     +static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
5607     +static struct usbmix_name_map dragonfly_1_2_map[] = {
5608     + { 7, NULL, .dB = &dragonfly_1_2_dB },
5609     + { 0 } /* terminator */
5610     +};
5611     +
5612     /*
5613     * Control map entries
5614     */
5615     @@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
5616     .id = USB_ID(0x25c4, 0x0003),
5617     .map = scms_usb3318_map,
5618     },
5619     + {
5620     + /* Bose Companion 5 */
5621     + .id = USB_ID(0x05a7, 0x1020),
5622     + .map = bose_companion5_map,
5623     + },
5624     + {
5625     + /* Dragonfly DAC 1.2 */
5626     + .id = USB_ID(0x21b4, 0x0081),
5627     + .map = dragonfly_1_2_map,
5628     + },
5629     { 0 } /* terminator */
5630     };
5631    
5632     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
5633     index 2f6d3e9a1bcd..e4756651a52c 100644
5634     --- a/sound/usb/quirks-table.h
5635     +++ b/sound/usb/quirks-table.h
5636     @@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
5637     }
5638     },
5639    
5640     +/* Steinberg devices */
5641     +{
5642     + /* Steinberg MI2 */
5643     + USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
5644     + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
5645     + .ifnum = QUIRK_ANY_INTERFACE,
5646     + .type = QUIRK_COMPOSITE,
5647     + .data = & (const struct snd_usb_audio_quirk[]) {
5648     + {
5649     + .ifnum = 0,
5650     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5651     + },
5652     + {
5653     + .ifnum = 1,
5654     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5655     + },
5656     + {
5657     + .ifnum = 2,
5658     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5659     + },
5660     + {
5661     + .ifnum = 3,
5662     + .type = QUIRK_MIDI_FIXED_ENDPOINT,
5663     + .data = &(const struct snd_usb_midi_endpoint_info) {
5664     + .out_cables = 0x0001,
5665     + .in_cables = 0x0001
5666     + }
5667     + },
5668     + {
5669     + .ifnum = -1
5670     + }
5671     + }
5672     + }
5673     +},
5674     +{
5675     + /* Steinberg MI4 */
5676     + USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
5677     + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
5678     + .ifnum = QUIRK_ANY_INTERFACE,
5679     + .type = QUIRK_COMPOSITE,
5680     + .data = & (const struct snd_usb_audio_quirk[]) {
5681     + {
5682     + .ifnum = 0,
5683     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5684     + },
5685     + {
5686     + .ifnum = 1,
5687     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5688     + },
5689     + {
5690     + .ifnum = 2,
5691     + .type = QUIRK_AUDIO_STANDARD_INTERFACE
5692     + },
5693     + {
5694     + .ifnum = 3,
5695     + .type = QUIRK_MIDI_FIXED_ENDPOINT,
5696     + .data = &(const struct snd_usb_midi_endpoint_info) {
5697     + .out_cables = 0x0001,
5698     + .in_cables = 0x0001
5699     + }
5700     + },
5701     + {
5702     + .ifnum = -1
5703     + }
5704     + }
5705     + }
5706     +},
5707     +
5708     /* TerraTec devices */
5709     {
5710     USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
5711     diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
5712     index 995b7a8596b1..658b0a89796d 100644
5713     --- a/tools/perf/ui/browsers/hists.c
5714     +++ b/tools/perf/ui/browsers/hists.c
5715     @@ -45,7 +45,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
5716    
5717     static bool hist_browser__has_filter(struct hist_browser *hb)
5718     {
5719     - return hists__has_filter(hb->hists) || hb->min_pcnt;
5720     + return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
5721     }
5722    
5723     static int hist_browser__get_folding(struct hist_browser *browser)
5724     diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5725     index 201f6c4ca738..99378a5c57a7 100644
5726     --- a/tools/perf/util/symbol.c
5727     +++ b/tools/perf/util/symbol.c
5728     @@ -1893,6 +1893,8 @@ int setup_intlist(struct intlist **list, const char *list_str,
5729     pr_err("problems parsing %s list\n", list_name);
5730     return -1;
5731     }
5732     +
5733     + symbol_conf.has_filter = true;
5734     return 0;
5735     }
5736    
5737     diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
5738     index 09561500164a..be0217989bcc 100644
5739     --- a/tools/perf/util/symbol.h
5740     +++ b/tools/perf/util/symbol.h
5741     @@ -105,7 +105,8 @@ struct symbol_conf {
5742     demangle_kernel,
5743     filter_relative,
5744     show_hist_headers,
5745     - branch_callstack;
5746     + branch_callstack,
5747     + has_filter;
5748     const char *vmlinux_name,
5749     *kallsyms_name,
5750     *source_prefix,