Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.5/0100-4.5.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2782 - (hide annotations) (download)
Fri May 13 07:36:15 2016 UTC (8 years ago) by niro
File size: 314273 byte(s)
-linux-4.5.1
1 niro 2782 diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
2     index ff49cf901148..81eb378210c6 100644
3     --- a/Documentation/cgroup-v2.txt
4     +++ b/Documentation/cgroup-v2.txt
5     @@ -1368,6 +1368,12 @@ system than killing the group. Otherwise, memory.max is there to
6     limit this type of spillover and ultimately contain buggy or even
7     malicious applications.
8    
9     +Setting the original memory.limit_in_bytes below the current usage was
10     +subject to a race condition, where concurrent charges could cause the
11     +limit setting to fail. memory.max on the other hand will first set the
12     +limit to prevent new charges, and then reclaim and OOM kill until the
13     +new limit is met - or the task writing to memory.max is killed.
14     +
15     The combined memory+swap accounting and limiting is replaced by real
16     control over swap space.
17    
18     diff --git a/MAINTAINERS b/MAINTAINERS
19     index 6ee06ea47be4..5a389bc68e0e 100644
20     --- a/MAINTAINERS
21     +++ b/MAINTAINERS
22     @@ -228,13 +228,13 @@ F: kernel/sys_ni.c
23    
24     ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
25     M: Hans de Goede <hdegoede@redhat.com>
26     -L: lm-sensors@lm-sensors.org
27     +L: linux-hwmon@vger.kernel.org
28     S: Maintained
29     F: drivers/hwmon/abituguru.c
30    
31     ABIT UGURU 3 HARDWARE MONITOR DRIVER
32     M: Alistair John Strachan <alistair@devzero.co.uk>
33     -L: lm-sensors@lm-sensors.org
34     +L: linux-hwmon@vger.kernel.org
35     S: Maintained
36     F: drivers/hwmon/abituguru3.c
37    
38     @@ -386,14 +386,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
39    
40     ADM1025 HARDWARE MONITOR DRIVER
41     M: Jean Delvare <jdelvare@suse.com>
42     -L: lm-sensors@lm-sensors.org
43     +L: linux-hwmon@vger.kernel.org
44     S: Maintained
45     F: Documentation/hwmon/adm1025
46     F: drivers/hwmon/adm1025.c
47    
48     ADM1029 HARDWARE MONITOR DRIVER
49     M: Corentin Labbe <clabbe.montjoie@gmail.com>
50     -L: lm-sensors@lm-sensors.org
51     +L: linux-hwmon@vger.kernel.org
52     S: Maintained
53     F: drivers/hwmon/adm1029.c
54    
55     @@ -438,7 +438,7 @@ F: drivers/video/backlight/adp8860_bl.c
56    
57     ADS1015 HARDWARE MONITOR DRIVER
58     M: Dirk Eibach <eibach@gdsys.de>
59     -L: lm-sensors@lm-sensors.org
60     +L: linux-hwmon@vger.kernel.org
61     S: Maintained
62     F: Documentation/hwmon/ads1015
63     F: drivers/hwmon/ads1015.c
64     @@ -451,7 +451,7 @@ F: drivers/macintosh/therm_adt746x.c
65    
66     ADT7475 HARDWARE MONITOR DRIVER
67     M: Jean Delvare <jdelvare@suse.com>
68     -L: lm-sensors@lm-sensors.org
69     +L: linux-hwmon@vger.kernel.org
70     S: Maintained
71     F: Documentation/hwmon/adt7475
72     F: drivers/hwmon/adt7475.c
73     @@ -628,7 +628,7 @@ F: include/linux/ccp.h
74    
75     AMD FAM15H PROCESSOR POWER MONITORING DRIVER
76     M: Huang Rui <ray.huang@amd.com>
77     -L: lm-sensors@lm-sensors.org
78     +L: linux-hwmon@vger.kernel.org
79     S: Supported
80     F: Documentation/hwmon/fam15h_power
81     F: drivers/hwmon/fam15h_power.c
82     @@ -786,7 +786,7 @@ F: drivers/input/mouse/bcm5974.c
83    
84     APPLE SMC DRIVER
85     M: Henrik Rydberg <rydberg@bitmath.org>
86     -L: lm-sensors@lm-sensors.org
87     +L: linux-hwmon@vger.kernel.org
88     S: Odd fixes
89     F: drivers/hwmon/applesmc.c
90    
91     @@ -1825,7 +1825,7 @@ F: include/media/i2c/as3645a.h
92    
93     ASC7621 HARDWARE MONITOR DRIVER
94     M: George Joseph <george.joseph@fairview5.com>
95     -L: lm-sensors@lm-sensors.org
96     +L: linux-hwmon@vger.kernel.org
97     S: Maintained
98     F: Documentation/hwmon/asc7621
99     F: drivers/hwmon/asc7621.c
100     @@ -1918,7 +1918,7 @@ F: drivers/net/wireless/ath/carl9170/
101    
102     ATK0110 HWMON DRIVER
103     M: Luca Tettamanti <kronos.it@gmail.com>
104     -L: lm-sensors@lm-sensors.org
105     +L: linux-hwmon@vger.kernel.org
106     S: Maintained
107     F: drivers/hwmon/asus_atk0110.c
108    
109     @@ -3037,7 +3037,7 @@ F: mm/swap_cgroup.c
110    
111     CORETEMP HARDWARE MONITORING DRIVER
112     M: Fenghua Yu <fenghua.yu@intel.com>
113     -L: lm-sensors@lm-sensors.org
114     +L: linux-hwmon@vger.kernel.org
115     S: Maintained
116     F: Documentation/hwmon/coretemp
117     F: drivers/hwmon/coretemp.c
118     @@ -3625,7 +3625,7 @@ T: git git://git.infradead.org/users/vkoul/slave-dma.git
119    
120     DME1737 HARDWARE MONITOR DRIVER
121     M: Juerg Haefliger <juergh@gmail.com>
122     -L: lm-sensors@lm-sensors.org
123     +L: linux-hwmon@vger.kernel.org
124     S: Maintained
125     F: Documentation/hwmon/dme1737
126     F: drivers/hwmon/dme1737.c
127     @@ -4322,7 +4322,7 @@ F: include/video/exynos_mipi*
128    
129     F71805F HARDWARE MONITORING DRIVER
130     M: Jean Delvare <jdelvare@suse.com>
131     -L: lm-sensors@lm-sensors.org
132     +L: linux-hwmon@vger.kernel.org
133     S: Maintained
134     F: Documentation/hwmon/f71805f
135     F: drivers/hwmon/f71805f.c
136     @@ -4401,7 +4401,7 @@ F: fs/*
137    
138     FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
139     M: Riku Voipio <riku.voipio@iki.fi>
140     -L: lm-sensors@lm-sensors.org
141     +L: linux-hwmon@vger.kernel.org
142     S: Maintained
143     F: drivers/hwmon/f75375s.c
144     F: include/linux/f75375s.h
145     @@ -4958,8 +4958,8 @@ F: drivers/media/usb/hackrf/
146     HARDWARE MONITORING
147     M: Jean Delvare <jdelvare@suse.com>
148     M: Guenter Roeck <linux@roeck-us.net>
149     -L: lm-sensors@lm-sensors.org
150     -W: http://www.lm-sensors.org/
151     +L: linux-hwmon@vger.kernel.org
152     +W: http://hwmon.wiki.kernel.org/
153     T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
154     T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
155     S: Maintained
156     @@ -5484,7 +5484,7 @@ F: drivers/usb/atm/ueagle-atm.c
157    
158     INA209 HARDWARE MONITOR DRIVER
159     M: Guenter Roeck <linux@roeck-us.net>
160     -L: lm-sensors@lm-sensors.org
161     +L: linux-hwmon@vger.kernel.org
162     S: Maintained
163     F: Documentation/hwmon/ina209
164     F: Documentation/devicetree/bindings/i2c/ina209.txt
165     @@ -5492,7 +5492,7 @@ F: drivers/hwmon/ina209.c
166    
167     INA2XX HARDWARE MONITOR DRIVER
168     M: Guenter Roeck <linux@roeck-us.net>
169     -L: lm-sensors@lm-sensors.org
170     +L: linux-hwmon@vger.kernel.org
171     S: Maintained
172     F: Documentation/hwmon/ina2xx
173     F: drivers/hwmon/ina2xx.c
174     @@ -5985,7 +5985,7 @@ F: drivers/isdn/hardware/eicon/
175    
176     IT87 HARDWARE MONITORING DRIVER
177     M: Jean Delvare <jdelvare@suse.com>
178     -L: lm-sensors@lm-sensors.org
179     +L: linux-hwmon@vger.kernel.org
180     S: Maintained
181     F: Documentation/hwmon/it87
182     F: drivers/hwmon/it87.c
183     @@ -6021,7 +6021,7 @@ F: drivers/media/dvb-frontends/ix2505v*
184    
185     JC42.4 TEMPERATURE SENSOR DRIVER
186     M: Guenter Roeck <linux@roeck-us.net>
187     -L: lm-sensors@lm-sensors.org
188     +L: linux-hwmon@vger.kernel.org
189     S: Maintained
190     F: drivers/hwmon/jc42.c
191     F: Documentation/hwmon/jc42
192     @@ -6071,14 +6071,14 @@ F: drivers/tty/serial/jsm/
193    
194     K10TEMP HARDWARE MONITORING DRIVER
195     M: Clemens Ladisch <clemens@ladisch.de>
196     -L: lm-sensors@lm-sensors.org
197     +L: linux-hwmon@vger.kernel.org
198     S: Maintained
199     F: Documentation/hwmon/k10temp
200     F: drivers/hwmon/k10temp.c
201    
202     K8TEMP HARDWARE MONITORING DRIVER
203     M: Rudolf Marek <r.marek@assembler.cz>
204     -L: lm-sensors@lm-sensors.org
205     +L: linux-hwmon@vger.kernel.org
206     S: Maintained
207     F: Documentation/hwmon/k8temp
208     F: drivers/hwmon/k8temp.c
209     @@ -6605,27 +6605,27 @@ F: net/llc/
210    
211     LM73 HARDWARE MONITOR DRIVER
212     M: Guillaume Ligneul <guillaume.ligneul@gmail.com>
213     -L: lm-sensors@lm-sensors.org
214     +L: linux-hwmon@vger.kernel.org
215     S: Maintained
216     F: drivers/hwmon/lm73.c
217    
218     LM78 HARDWARE MONITOR DRIVER
219     M: Jean Delvare <jdelvare@suse.com>
220     -L: lm-sensors@lm-sensors.org
221     +L: linux-hwmon@vger.kernel.org
222     S: Maintained
223     F: Documentation/hwmon/lm78
224     F: drivers/hwmon/lm78.c
225    
226     LM83 HARDWARE MONITOR DRIVER
227     M: Jean Delvare <jdelvare@suse.com>
228     -L: lm-sensors@lm-sensors.org
229     +L: linux-hwmon@vger.kernel.org
230     S: Maintained
231     F: Documentation/hwmon/lm83
232     F: drivers/hwmon/lm83.c
233    
234     LM90 HARDWARE MONITOR DRIVER
235     M: Jean Delvare <jdelvare@suse.com>
236     -L: lm-sensors@lm-sensors.org
237     +L: linux-hwmon@vger.kernel.org
238     S: Maintained
239     F: Documentation/hwmon/lm90
240     F: Documentation/devicetree/bindings/hwmon/lm90.txt
241     @@ -6633,7 +6633,7 @@ F: drivers/hwmon/lm90.c
242    
243     LM95234 HARDWARE MONITOR DRIVER
244     M: Guenter Roeck <linux@roeck-us.net>
245     -L: lm-sensors@lm-sensors.org
246     +L: linux-hwmon@vger.kernel.org
247     S: Maintained
248     F: Documentation/hwmon/lm95234
249     F: drivers/hwmon/lm95234.c
250     @@ -6700,7 +6700,7 @@ F: drivers/scsi/sym53c8xx_2/
251    
252     LTC4261 HARDWARE MONITOR DRIVER
253     M: Guenter Roeck <linux@roeck-us.net>
254     -L: lm-sensors@lm-sensors.org
255     +L: linux-hwmon@vger.kernel.org
256     S: Maintained
257     F: Documentation/hwmon/ltc4261
258     F: drivers/hwmon/ltc4261.c
259     @@ -6870,28 +6870,28 @@ F: include/uapi/linux/matroxfb.h
260    
261     MAX16065 HARDWARE MONITOR DRIVER
262     M: Guenter Roeck <linux@roeck-us.net>
263     -L: lm-sensors@lm-sensors.org
264     +L: linux-hwmon@vger.kernel.org
265     S: Maintained
266     F: Documentation/hwmon/max16065
267     F: drivers/hwmon/max16065.c
268    
269     MAX20751 HARDWARE MONITOR DRIVER
270     M: Guenter Roeck <linux@roeck-us.net>
271     -L: lm-sensors@lm-sensors.org
272     +L: linux-hwmon@vger.kernel.org
273     S: Maintained
274     F: Documentation/hwmon/max20751
275     F: drivers/hwmon/max20751.c
276    
277     MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
278     M: "Hans J. Koch" <hjk@hansjkoch.de>
279     -L: lm-sensors@lm-sensors.org
280     +L: linux-hwmon@vger.kernel.org
281     S: Maintained
282     F: Documentation/hwmon/max6650
283     F: drivers/hwmon/max6650.c
284    
285     MAX6697 HARDWARE MONITOR DRIVER
286     M: Guenter Roeck <linux@roeck-us.net>
287     -L: lm-sensors@lm-sensors.org
288     +L: linux-hwmon@vger.kernel.org
289     S: Maintained
290     F: Documentation/hwmon/max6697
291     F: Documentation/devicetree/bindings/i2c/max6697.txt
292     @@ -7455,7 +7455,7 @@ F: drivers/scsi/NCR_D700.*
293    
294     NCT6775 HARDWARE MONITOR DRIVER
295     M: Guenter Roeck <linux@roeck-us.net>
296     -L: lm-sensors@lm-sensors.org
297     +L: linux-hwmon@vger.kernel.org
298     S: Maintained
299     F: Documentation/hwmon/nct6775
300     F: drivers/hwmon/nct6775.c
301     @@ -8235,7 +8235,7 @@ F: drivers/video/logo/logo_parisc*
302    
303     PC87360 HARDWARE MONITORING DRIVER
304     M: Jim Cromie <jim.cromie@gmail.com>
305     -L: lm-sensors@lm-sensors.org
306     +L: linux-hwmon@vger.kernel.org
307     S: Maintained
308     F: Documentation/hwmon/pc87360
309     F: drivers/hwmon/pc87360.c
310     @@ -8247,7 +8247,7 @@ F: drivers/char/pc8736x_gpio.c
311    
312     PC87427 HARDWARE MONITORING DRIVER
313     M: Jean Delvare <jdelvare@suse.com>
314     -L: lm-sensors@lm-sensors.org
315     +L: linux-hwmon@vger.kernel.org
316     S: Maintained
317     F: Documentation/hwmon/pc87427
318     F: drivers/hwmon/pc87427.c
319     @@ -8601,8 +8601,8 @@ F: drivers/rtc/rtc-puv3.c
320    
321     PMBUS HARDWARE MONITORING DRIVERS
322     M: Guenter Roeck <linux@roeck-us.net>
323     -L: lm-sensors@lm-sensors.org
324     -W: http://www.lm-sensors.org/
325     +L: linux-hwmon@vger.kernel.org
326     +W: http://hwmon.wiki.kernel.org/
327     W: http://www.roeck-us.net/linux/drivers/
328     T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
329     S: Maintained
330     @@ -8807,7 +8807,7 @@ F: drivers/media/usb/pwc/*
331    
332     PWM FAN DRIVER
333     M: Kamil Debski <k.debski@samsung.com>
334     -L: lm-sensors@lm-sensors.org
335     +L: linux-hwmon@vger.kernel.org
336     S: Supported
337     F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
338     F: Documentation/hwmon/pwm-fan
339     @@ -10113,28 +10113,28 @@ F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
340    
341     SMM665 HARDWARE MONITOR DRIVER
342     M: Guenter Roeck <linux@roeck-us.net>
343     -L: lm-sensors@lm-sensors.org
344     +L: linux-hwmon@vger.kernel.org
345     S: Maintained
346     F: Documentation/hwmon/smm665
347     F: drivers/hwmon/smm665.c
348    
349     SMSC EMC2103 HARDWARE MONITOR DRIVER
350     M: Steve Glendinning <steve.glendinning@shawell.net>
351     -L: lm-sensors@lm-sensors.org
352     +L: linux-hwmon@vger.kernel.org
353     S: Maintained
354     F: Documentation/hwmon/emc2103
355     F: drivers/hwmon/emc2103.c
356    
357     SMSC SCH5627 HARDWARE MONITOR DRIVER
358     M: Hans de Goede <hdegoede@redhat.com>
359     -L: lm-sensors@lm-sensors.org
360     +L: linux-hwmon@vger.kernel.org
361     S: Supported
362     F: Documentation/hwmon/sch5627
363     F: drivers/hwmon/sch5627.c
364    
365     SMSC47B397 HARDWARE MONITOR DRIVER
366     M: Jean Delvare <jdelvare@suse.com>
367     -L: lm-sensors@lm-sensors.org
368     +L: linux-hwmon@vger.kernel.org
369     S: Maintained
370     F: Documentation/hwmon/smsc47b397
371     F: drivers/hwmon/smsc47b397.c
372     @@ -11067,7 +11067,7 @@ F: include/linux/mmc/sh_mobile_sdhi.h
373    
374     TMP401 HARDWARE MONITOR DRIVER
375     M: Guenter Roeck <linux@roeck-us.net>
376     -L: lm-sensors@lm-sensors.org
377     +L: linux-hwmon@vger.kernel.org
378     S: Maintained
379     F: Documentation/hwmon/tmp401
380     F: drivers/hwmon/tmp401.c
381     @@ -11812,14 +11812,14 @@ F: Documentation/networking/vrf.txt
382    
383     VT1211 HARDWARE MONITOR DRIVER
384     M: Juerg Haefliger <juergh@gmail.com>
385     -L: lm-sensors@lm-sensors.org
386     +L: linux-hwmon@vger.kernel.org
387     S: Maintained
388     F: Documentation/hwmon/vt1211
389     F: drivers/hwmon/vt1211.c
390    
391     VT8231 HARDWARE MONITOR DRIVER
392     M: Roger Lucas <vt8231@hiddenengine.co.uk>
393     -L: lm-sensors@lm-sensors.org
394     +L: linux-hwmon@vger.kernel.org
395     S: Maintained
396     F: drivers/hwmon/vt8231.c
397    
398     @@ -11838,21 +11838,21 @@ F: drivers/w1/
399    
400     W83791D HARDWARE MONITORING DRIVER
401     M: Marc Hulsman <m.hulsman@tudelft.nl>
402     -L: lm-sensors@lm-sensors.org
403     +L: linux-hwmon@vger.kernel.org
404     S: Maintained
405     F: Documentation/hwmon/w83791d
406     F: drivers/hwmon/w83791d.c
407    
408     W83793 HARDWARE MONITORING DRIVER
409     M: Rudolf Marek <r.marek@assembler.cz>
410     -L: lm-sensors@lm-sensors.org
411     +L: linux-hwmon@vger.kernel.org
412     S: Maintained
413     F: Documentation/hwmon/w83793
414     F: drivers/hwmon/w83793.c
415    
416     W83795 HARDWARE MONITORING DRIVER
417     M: Jean Delvare <jdelvare@suse.com>
418     -L: lm-sensors@lm-sensors.org
419     +L: linux-hwmon@vger.kernel.org
420     S: Maintained
421     F: drivers/hwmon/w83795.c
422    
423     diff --git a/Makefile b/Makefile
424     index 7b3ecdcdc6c1..c621889b8827 100644
425     --- a/Makefile
426     +++ b/Makefile
427     @@ -1,6 +1,6 @@
428     VERSION = 4
429     PATCHLEVEL = 5
430     -SUBLEVEL = 0
431     +SUBLEVEL = 1
432     EXTRAVERSION =
433     NAME = Blurry Fish Butt
434    
435     diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
436     index 44a578c10732..ab5d5701e11d 100644
437     --- a/arch/arc/boot/dts/axs10x_mb.dtsi
438     +++ b/arch/arc/boot/dts/axs10x_mb.dtsi
439     @@ -47,6 +47,14 @@
440     clocks = <&apbclk>;
441     clock-names = "stmmaceth";
442     max-speed = <100>;
443     + mdio0 {
444     + #address-cells = <1>;
445     + #size-cells = <0>;
446     + compatible = "snps,dwmac-mdio";
447     + phy1: ethernet-phy@1 {
448     + reg = <1>;
449     + };
450     + };
451     };
452    
453     ehci@0x40000 {
454     diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
455     index 57c1f33844d4..0352fb8d21b9 100644
456     --- a/arch/arc/include/asm/bitops.h
457     +++ b/arch/arc/include/asm/bitops.h
458     @@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
459     \
460     m += nr >> 5; \
461     \
462     - /* \
463     - * ARC ISA micro-optimization: \
464     - * \
465     - * Instructions dealing with bitpos only consider lower 5 bits \
466     - * e.g (x << 33) is handled like (x << 1) by ASL instruction \
467     - * (mem pointer still needs adjustment to point to next word) \
468     - * \
469     - * Hence the masking to clamp @nr arg can be elided in general. \
470     - * \
471     - * However if @nr is a constant (above assumed in a register), \
472     - * and greater than 31, gcc can optimize away (x << 33) to 0, \
473     - * as overflow, given the 32-bit ISA. Thus masking needs to be \
474     - * done for const @nr, but no code is generated due to gcc \
475     - * const prop. \
476     - */ \
477     nr &= 0x1f; \
478     \
479     __asm__ __volatile__( \
480     diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
481     index 694ece8a0243..27b17adea50d 100644
482     --- a/arch/arc/include/asm/io.h
483     +++ b/arch/arc/include/asm/io.h
484     @@ -129,15 +129,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
485     #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
486    
487     /*
488     - * Relaxed API for drivers which can handle any ordering themselves
489     + * Relaxed API for drivers which can handle barrier ordering themselves
490     + *
491     + * Also these are defined to perform little endian accesses.
492     + * To provide the typical device register semantics of fixed endian,
493     + * swap the byte order for Big Endian
494     + *
495     + * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
496     */
497     #define readb_relaxed(c) __raw_readb(c)
498     -#define readw_relaxed(c) __raw_readw(c)
499     -#define readl_relaxed(c) __raw_readl(c)
500     +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
501     + __raw_readw(c)); __r; })
502     +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
503     + __raw_readl(c)); __r; })
504    
505     #define writeb_relaxed(v,c) __raw_writeb(v,c)
506     -#define writew_relaxed(v,c) __raw_writew(v,c)
507     -#define writel_relaxed(v,c) __raw_writel(v,c)
508     +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
509     +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
510    
511     #include <asm-generic/io.h>
512    
513     diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
514     index ff888d21c786..f3e2b96c06a3 100644
515     --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
516     +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
517     @@ -303,6 +303,7 @@
518     regulator-name = "mmc0-card-supply";
519     regulator-min-microvolt = <3300000>;
520     regulator-max-microvolt = <3300000>;
521     + regulator-always-on;
522     };
523    
524     gpio_keys {
525     diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
526     index 569026e8f96c..da84e65b56ef 100644
527     --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
528     +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
529     @@ -268,5 +268,6 @@
530     regulator-min-microvolt = <3300000>;
531     regulator-max-microvolt = <3300000>;
532     vin-supply = <&vcc_3v3_reg>;
533     + regulator-always-on;
534     };
535     };
536     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
537     index 819aff5d593f..7273210782d5 100644
538     --- a/arch/arm64/include/asm/pgtable.h
539     +++ b/arch/arm64/include/asm/pgtable.h
540     @@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
541     static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
542     pte_t *ptep, pte_t pte)
543     {
544     - if (pte_valid(pte)) {
545     + if (pte_present(pte)) {
546     if (pte_sw_dirty(pte) && pte_write(pte))
547     pte_val(pte) &= ~PTE_RDONLY;
548     else
549     @@ -649,6 +649,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
550     * bits 0-1: present (must be zero)
551     * bits 2-7: swap type
552     * bits 8-57: swap offset
553     + * bit 58: PTE_PROT_NONE (must be zero)
554     */
555     #define __SWP_TYPE_SHIFT 2
556     #define __SWP_TYPE_BITS 6
557     diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
558     index a865d2a04f75..5de673ac9cb1 100644
559     --- a/arch/ia64/include/asm/io.h
560     +++ b/arch/ia64/include/asm/io.h
561     @@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
562     return ioremap(phys_addr, size);
563     }
564     #define ioremap_cache ioremap_cache
565     +#define ioremap_uc ioremap_nocache
566    
567    
568     /*
569     diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
570     index c873e682b67f..2b2ced9dc00a 100644
571     --- a/arch/s390/include/asm/pci.h
572     +++ b/arch/s390/include/asm/pci.h
573     @@ -45,7 +45,7 @@ struct zpci_fmb {
574     u64 rpcit_ops;
575     u64 dma_rbytes;
576     u64 dma_wbytes;
577     -} __packed __aligned(16);
578     +} __packed __aligned(64);
579    
580     enum zpci_state {
581     ZPCI_FN_STATE_RESERVED,
582     diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
583     index cd5a191381b9..c920b81be5bb 100644
584     --- a/arch/s390/kernel/entry.S
585     +++ b/arch/s390/kernel/entry.S
586     @@ -1199,114 +1199,12 @@ cleanup_critical:
587     .quad .Lpsw_idle_lpsw
588    
589     .Lcleanup_save_fpu_regs:
590     - TSTMSK __LC_CPU_FLAGS,_CIF_FPU
591     - bor %r14
592     - clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
593     - jhe 5f
594     - clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
595     - jhe 4f
596     - clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
597     - jhe 3f
598     - clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
599     - jhe 2f
600     - clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
601     - jhe 1f
602     - lg %r2,__LC_CURRENT
603     - aghi %r2,__TASK_thread
604     -0: # Store floating-point controls
605     - stfpc __THREAD_FPU_fpc(%r2)
606     -1: # Load register save area and check if VX is active
607     - lg %r3,__THREAD_FPU_regs(%r2)
608     - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
609     - jz 4f # no VX -> store FP regs
610     -2: # Store vector registers (V0-V15)
611     - VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
612     -3: # Store vector registers (V16-V31)
613     - VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
614     - j 5f # -> done, set CIF_FPU flag
615     -4: # Store floating-point registers
616     - std 0,0(%r3)
617     - std 1,8(%r3)
618     - std 2,16(%r3)
619     - std 3,24(%r3)
620     - std 4,32(%r3)
621     - std 5,40(%r3)
622     - std 6,48(%r3)
623     - std 7,56(%r3)
624     - std 8,64(%r3)
625     - std 9,72(%r3)
626     - std 10,80(%r3)
627     - std 11,88(%r3)
628     - std 12,96(%r3)
629     - std 13,104(%r3)
630     - std 14,112(%r3)
631     - std 15,120(%r3)
632     -5: # Set CIF_FPU flag
633     - oi __LC_CPU_FLAGS+7,_CIF_FPU
634     - lg %r9,48(%r11) # return from save_fpu_regs
635     + larl %r9,save_fpu_regs
636     br %r14
637     -.Lcleanup_save_fpu_fpc_end:
638     - .quad .Lsave_fpu_regs_fpc_end
639     -.Lcleanup_save_fpu_regs_vx_low:
640     - .quad .Lsave_fpu_regs_vx_low
641     -.Lcleanup_save_fpu_regs_vx_high:
642     - .quad .Lsave_fpu_regs_vx_high
643     -.Lcleanup_save_fpu_regs_fp:
644     - .quad .Lsave_fpu_regs_fp
645     -.Lcleanup_save_fpu_regs_done:
646     - .quad .Lsave_fpu_regs_done
647    
648     .Lcleanup_load_fpu_regs:
649     - TSTMSK __LC_CPU_FLAGS,_CIF_FPU
650     - bnor %r14
651     - clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
652     - jhe 1f
653     - clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
654     - jhe 2f
655     - clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
656     - jhe 3f
657     - clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
658     - jhe 4f
659     - lg %r4,__LC_CURRENT
660     - aghi %r4,__TASK_thread
661     - lfpc __THREAD_FPU_fpc(%r4)
662     - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
663     - lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
664     - jz 2f # -> no VX, load FP regs
665     -4: # Load V0 ..V15 registers
666     - VLM %v0,%v15,0,%r4
667     -3: # Load V16..V31 registers
668     - VLM %v16,%v31,256,%r4
669     - j 1f
670     -2: # Load floating-point registers
671     - ld 0,0(%r4)
672     - ld 1,8(%r4)
673     - ld 2,16(%r4)
674     - ld 3,24(%r4)
675     - ld 4,32(%r4)
676     - ld 5,40(%r4)
677     - ld 6,48(%r4)
678     - ld 7,56(%r4)
679     - ld 8,64(%r4)
680     - ld 9,72(%r4)
681     - ld 10,80(%r4)
682     - ld 11,88(%r4)
683     - ld 12,96(%r4)
684     - ld 13,104(%r4)
685     - ld 14,112(%r4)
686     - ld 15,120(%r4)
687     -1: # Clear CIF_FPU bit
688     - ni __LC_CPU_FLAGS+7,255-_CIF_FPU
689     - lg %r9,48(%r11) # return from load_fpu_regs
690     + larl %r9,load_fpu_regs
691     br %r14
692     -.Lcleanup_load_fpu_regs_vx:
693     - .quad .Lload_fpu_regs_vx
694     -.Lcleanup_load_fpu_regs_vx_high:
695     - .quad .Lload_fpu_regs_vx_high
696     -.Lcleanup_load_fpu_regs_fp:
697     - .quad .Lload_fpu_regs_fp
698     -.Lcleanup_load_fpu_regs_done:
699     - .quad .Lload_fpu_regs_done
700    
701     /*
702     * Integer constants
703     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
704     index 9220db5c996a..93fc63ef6e95 100644
705     --- a/arch/s390/kernel/setup.c
706     +++ b/arch/s390/kernel/setup.c
707     @@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
708     + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
709     lc->current_task = (unsigned long) init_thread_union.thread_info.task;
710     lc->thread_info = (unsigned long) &init_thread_union;
711     + lc->lpp = LPP_MAGIC;
712     lc->machine_flags = S390_lowcore.machine_flags;
713     lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
714     memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
715     diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
716     index 8f19c8f9d660..8f75edc998ff 100644
717     --- a/arch/s390/pci/pci.c
718     +++ b/arch/s390/pci/pci.c
719     @@ -864,8 +864,11 @@ static inline int barsize(u8 size)
720    
721     static int zpci_mem_init(void)
722     {
723     + BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
724     + __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
725     +
726     zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
727     - 16, 0, NULL);
728     + __alignof__(struct zpci_fmb), 0, NULL);
729     if (!zdev_fmb_cache)
730     goto error_fmb;
731    
732     diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
733     index ec29e14ec5a8..bf25d7c79a2d 100644
734     --- a/arch/sh/mm/kmap.c
735     +++ b/arch/sh/mm/kmap.c
736     @@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
737    
738     BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
739    
740     + preempt_disable();
741     pagefault_disable();
742    
743     idx = FIX_CMAP_END -
744     @@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
745     }
746    
747     pagefault_enable();
748     + preempt_enable();
749     }
750     diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
751     index b821b13d343a..8a6b57108ac2 100644
752     --- a/arch/um/drivers/mconsole_kern.c
753     +++ b/arch/um/drivers/mconsole_kern.c
754     @@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
755     ptr += strlen("proc");
756     ptr = skip_spaces(ptr);
757    
758     - file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
759     + file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
760     if (IS_ERR(file)) {
761     mconsole_reply(req, "Failed to open file", 1, 0);
762     printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
763     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
764     index c46662f64c39..3bf45a0cd69e 100644
765     --- a/arch/x86/Kconfig
766     +++ b/arch/x86/Kconfig
767     @@ -1160,22 +1160,23 @@ config MICROCODE
768     bool "CPU microcode loading support"
769     default y
770     depends on CPU_SUP_AMD || CPU_SUP_INTEL
771     - depends on BLK_DEV_INITRD
772     select FW_LOADER
773     ---help---
774     -
775     If you say Y here, you will be able to update the microcode on
776     - certain Intel and AMD processors. The Intel support is for the
777     - IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
778     - Xeon etc. The AMD support is for families 0x10 and later. You will
779     - obviously need the actual microcode binary data itself which is not
780     - shipped with the Linux kernel.
781     -
782     - This option selects the general module only, you need to select
783     - at least one vendor specific module as well.
784     -
785     - To compile this driver as a module, choose M here: the module
786     - will be called microcode.
787     + Intel and AMD processors. The Intel support is for the IA32 family,
788     + e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
789     + AMD support is for families 0x10 and later. You will obviously need
790     + the actual microcode binary data itself which is not shipped with
791     + the Linux kernel.
792     +
793     + The preferred method to load microcode from a detached initrd is described
794     + in Documentation/x86/early-microcode.txt. For that you need to enable
795     + CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
796     + initrd for microcode blobs.
797     +
798     + In addition, you can build-in the microcode into the kernel. For that you
799     + need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
800     + to the CONFIG_EXTRA_FIRMWARE config option.
801    
802     config MICROCODE_INTEL
803     bool "Intel microcode loading support"
804     diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
805     index 03663740c866..1a4477cedc49 100644
806     --- a/arch/x86/entry/common.c
807     +++ b/arch/x86/entry/common.c
808     @@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
809     /* Called with IRQs disabled. */
810     __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
811     {
812     + struct thread_info *ti = pt_regs_to_thread_info(regs);
813     u32 cached_flags;
814    
815     if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
816     @@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
817    
818     lockdep_sys_exit();
819    
820     - cached_flags =
821     - READ_ONCE(pt_regs_to_thread_info(regs)->flags);
822     + cached_flags = READ_ONCE(ti->flags);
823    
824     if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
825     exit_to_usermode_loop(regs, cached_flags);
826    
827     +#ifdef CONFIG_COMPAT
828     + /*
829     + * Compat syscalls set TS_COMPAT. Make sure we clear it before
830     + * returning to user mode. We need to clear it *after* signal
831     + * handling, because syscall restart has a fixup for compat
832     + * syscalls. The fixup is exercised by the ptrace_syscall_32
833     + * selftest.
834     + */
835     + ti->status &= ~TS_COMPAT;
836     +#endif
837     +
838     user_enter();
839     }
840    
841     @@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
842     if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
843     syscall_slow_exit_work(regs, cached_flags);
844    
845     -#ifdef CONFIG_COMPAT
846     - /*
847     - * Compat syscalls set TS_COMPAT. Make sure we clear it before
848     - * returning to user mode.
849     - */
850     - ti->status &= ~TS_COMPAT;
851     -#endif
852     -
853     local_irq_disable();
854     prepare_exit_to_usermode(regs);
855     }
856     diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
857     index c80f6b6f3da2..e8c4fba52d3d 100644
858     --- a/arch/x86/include/asm/apic.h
859     +++ b/arch/x86/include/asm/apic.h
860     @@ -644,8 +644,8 @@ static inline void entering_irq(void)
861    
862     static inline void entering_ack_irq(void)
863     {
864     - ack_APIC_irq();
865     entering_irq();
866     + ack_APIC_irq();
867     }
868    
869     static inline void ipi_entering_ack_irq(void)
870     diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
871     index 1815b736269d..84b3d194a958 100644
872     --- a/arch/x86/include/asm/hw_irq.h
873     +++ b/arch/x86/include/asm/hw_irq.h
874     @@ -141,6 +141,7 @@ struct irq_alloc_info {
875     struct irq_cfg {
876     unsigned int dest_apicid;
877     u8 vector;
878     + u8 old_vector;
879     };
880    
881     extern struct irq_cfg *irq_cfg(unsigned int irq);
882     diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
883     index 1e1b07a5a738..9d3a96c4da78 100644
884     --- a/arch/x86/include/asm/microcode.h
885     +++ b/arch/x86/include/asm/microcode.h
886     @@ -3,6 +3,7 @@
887    
888     #include <asm/cpu.h>
889     #include <linux/earlycpio.h>
890     +#include <linux/initrd.h>
891    
892     #define native_rdmsr(msr, val1, val2) \
893     do { \
894     @@ -143,4 +144,29 @@ static inline void reload_early_microcode(void) { }
895     static inline bool
896     get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
897     #endif
898     +
899     +static inline unsigned long get_initrd_start(void)
900     +{
901     +#ifdef CONFIG_BLK_DEV_INITRD
902     + return initrd_start;
903     +#else
904     + return 0;
905     +#endif
906     +}
907     +
908     +static inline unsigned long get_initrd_start_addr(void)
909     +{
910     +#ifdef CONFIG_BLK_DEV_INITRD
911     +#ifdef CONFIG_X86_32
912     + unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
913     +
914     + return (unsigned long)__pa_nodebug(*initrd_start_p);
915     +#else
916     + return get_initrd_start();
917     +#endif
918     +#else /* CONFIG_BLK_DEV_INITRD */
919     + return 0;
920     +#endif
921     +}
922     +
923     #endif /* _ASM_X86_MICROCODE_H */
924     diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
925     index 7bcb861a04e5..5a2ed3ed2f26 100644
926     --- a/arch/x86/include/asm/perf_event.h
927     +++ b/arch/x86/include/asm/perf_event.h
928     @@ -165,6 +165,7 @@ struct x86_pmu_capability {
929     #define GLOBAL_STATUS_ASIF BIT_ULL(60)
930     #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
931     #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
932     +#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
933    
934     /*
935     * IBS cpuid feature detection
936     diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
937     index 8b2d4bea9962..39171b3646bb 100644
938     --- a/arch/x86/include/asm/xen/hypervisor.h
939     +++ b/arch/x86/include/asm/xen/hypervisor.h
940     @@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
941     void xen_arch_unregister_cpu(int num);
942     #endif
943    
944     +extern void xen_set_iopl_mask(unsigned mask);
945     +
946     #endif /* _ASM_X86_XEN_HYPERVISOR_H */
947     diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
948     index 3b670df4ba7b..ad59d70bcb1a 100644
949     --- a/arch/x86/kernel/apic/vector.c
950     +++ b/arch/x86/kernel/apic/vector.c
951     @@ -213,6 +213,7 @@ update:
952     */
953     cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
954     d->move_in_progress = !cpumask_empty(d->old_domain);
955     + d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
956     d->cfg.vector = vector;
957     cpumask_copy(d->domain, vector_cpumask);
958     success:
959     @@ -655,46 +656,97 @@ void irq_complete_move(struct irq_cfg *cfg)
960     }
961    
962     /*
963     - * Called with @desc->lock held and interrupts disabled.
964     + * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
965     */
966     void irq_force_complete_move(struct irq_desc *desc)
967     {
968     struct irq_data *irqdata = irq_desc_get_irq_data(desc);
969     struct apic_chip_data *data = apic_chip_data(irqdata);
970     struct irq_cfg *cfg = data ? &data->cfg : NULL;
971     + unsigned int cpu;
972    
973     if (!cfg)
974     return;
975    
976     - __irq_complete_move(cfg, cfg->vector);
977     -
978     /*
979     * This is tricky. If the cleanup of @data->old_domain has not been
980     * done yet, then the following setaffinity call will fail with
981     * -EBUSY. This can leave the interrupt in a stale state.
982     *
983     - * The cleanup cannot make progress because we hold @desc->lock. So in
984     - * case @data->old_domain is not yet cleaned up, we need to drop the
985     - * lock and acquire it again. @desc cannot go away, because the
986     - * hotplug code holds the sparse irq lock.
987     + * All CPUs are stuck in stop machine with interrupts disabled so
988     + * calling __irq_complete_move() would be completely pointless.
989     */
990     raw_spin_lock(&vector_lock);
991     - /* Clean out all offline cpus (including ourself) first. */
992     + /*
993     + * Clean out all offline cpus (including the outgoing one) from the
994     + * old_domain mask.
995     + */
996     cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
997     - while (!cpumask_empty(data->old_domain)) {
998     +
999     + /*
1000     + * If move_in_progress is cleared and the old_domain mask is empty,
1001     + * then there is nothing to cleanup. fixup_irqs() will take care of
1002     + * the stale vectors on the outgoing cpu.
1003     + */
1004     + if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
1005     raw_spin_unlock(&vector_lock);
1006     - raw_spin_unlock(&desc->lock);
1007     - cpu_relax();
1008     - raw_spin_lock(&desc->lock);
1009     + return;
1010     + }
1011     +
1012     + /*
1013     + * 1) The interrupt is in move_in_progress state. That means that we
1014     + * have not seen an interrupt since the io_apic was reprogrammed to
1015     + * the new vector.
1016     + *
1017     + * 2) The interrupt has fired on the new vector, but the cleanup IPIs
1018     + * have not been processed yet.
1019     + */
1020     + if (data->move_in_progress) {
1021     /*
1022     - * Reevaluate apic_chip_data. It might have been cleared after
1023     - * we dropped @desc->lock.
1024     + * In theory there is a race:
1025     + *
1026     + * set_ioapic(new_vector) <-- Interrupt is raised before update
1027     + * is effective, i.e. it's raised on
1028     + * the old vector.
1029     + *
1030     + * So if the target cpu cannot handle that interrupt before
1031     + * the old vector is cleaned up, we get a spurious interrupt
1032     + * and in the worst case the ioapic irq line becomes stale.
1033     + *
1034     + * But in case of cpu hotplug this should be a non issue
1035     + * because if the affinity update happens right before all
1036     + * cpus rendevouz in stop machine, there is no way that the
1037     + * interrupt can be blocked on the target cpu because all cpus
1038     + * loops first with interrupts enabled in stop machine, so the
1039     + * old vector is not yet cleaned up when the interrupt fires.
1040     + *
1041     + * So the only way to run into this issue is if the delivery
1042     + * of the interrupt on the apic/system bus would be delayed
1043     + * beyond the point where the target cpu disables interrupts
1044     + * in stop machine. I doubt that it can happen, but at least
1045     + * there is a theroretical chance. Virtualization might be
1046     + * able to expose this, but AFAICT the IOAPIC emulation is not
1047     + * as stupid as the real hardware.
1048     + *
1049     + * Anyway, there is nothing we can do about that at this point
1050     + * w/o refactoring the whole fixup_irq() business completely.
1051     + * We print at least the irq number and the old vector number,
1052     + * so we have the necessary information when a problem in that
1053     + * area arises.
1054     */
1055     - data = apic_chip_data(irqdata);
1056     - if (!data)
1057     - return;
1058     - raw_spin_lock(&vector_lock);
1059     + pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1060     + irqdata->irq, cfg->old_vector);
1061     }
1062     + /*
1063     + * If old_domain is not empty, then other cpus still have the irq
1064     + * descriptor set in their vector array. Clean it up.
1065     + */
1066     + for_each_cpu(cpu, data->old_domain)
1067     + per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
1068     +
1069     + /* Cleanup the left overs of the (half finished) move */
1070     + cpumask_clear(data->old_domain);
1071     + data->move_in_progress = 0;
1072     raw_spin_unlock(&vector_lock);
1073     }
1074     #endif
1075     diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
1076     index ee81c544ee0d..4f4735bd8698 100644
1077     --- a/arch/x86/kernel/cpu/microcode/intel.c
1078     +++ b/arch/x86/kernel/cpu/microcode/intel.c
1079     @@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
1080     cd.data = NULL;
1081     cd.size = 0;
1082    
1083     - cd = find_cpio_data(p, (void *)start, size, &offset);
1084     - if (!cd.data) {
1085     + /* try built-in microcode if no initrd */
1086     + if (!size) {
1087     if (!load_builtin_intel_microcode(&cd))
1088     return UCODE_ERROR;
1089     + } else {
1090     + cd = find_cpio_data(p, (void *)start, size, &offset);
1091     + if (!cd.data)
1092     + return UCODE_ERROR;
1093     }
1094    
1095     return get_matching_model_microcode(0, start, cd.data, cd.size,
1096     @@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
1097     if (count == 0)
1098     return ret;
1099    
1100     - copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
1101     + copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
1102     ret = save_microcode(&mc_saved_data, mc_saved, count);
1103     if (ret)
1104     pr_err("Cannot save microcode patches from initrd.\n");
1105     @@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
1106     struct boot_params *p;
1107    
1108     p = (struct boot_params *)__pa_nodebug(&boot_params);
1109     - start = p->hdr.ramdisk_image;
1110     size = p->hdr.ramdisk_size;
1111    
1112     - _load_ucode_intel_bsp(
1113     - (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
1114     - (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
1115     - start, size);
1116     + /*
1117     + * Set start only if we have an initrd image. We cannot use initrd_start
1118     + * because it is not set that early yet.
1119     + */
1120     + start = (size ? p->hdr.ramdisk_image : 0);
1121     +
1122     + _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
1123     + (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
1124     + start, size);
1125     #else
1126     - start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
1127     size = boot_params.hdr.ramdisk_size;
1128     + start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
1129    
1130     _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
1131     #endif
1132     @@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
1133     struct mc_saved_data *mc_saved_data_p;
1134     struct ucode_cpu_info uci;
1135     unsigned long *mc_saved_in_initrd_p;
1136     - unsigned long initrd_start_addr;
1137     enum ucode_state ret;
1138     #ifdef CONFIG_X86_32
1139     - unsigned long *initrd_start_p;
1140    
1141     - mc_saved_in_initrd_p =
1142     - (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
1143     + mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
1144     mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
1145     - initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
1146     - initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
1147     #else
1148     - mc_saved_data_p = &mc_saved_data;
1149     mc_saved_in_initrd_p = mc_saved_in_initrd;
1150     - initrd_start_addr = initrd_start;
1151     + mc_saved_data_p = &mc_saved_data;
1152     #endif
1153    
1154     /*
1155     @@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
1156    
1157     collect_cpu_info_early(&uci);
1158     ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
1159     - initrd_start_addr, &uci);
1160     + get_initrd_start_addr(), &uci);
1161    
1162     if (ret != UCODE_OK)
1163     return;
1164     diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
1165     index 1b443db2db50..6532f5b40646 100644
1166     --- a/arch/x86/kernel/cpu/perf_event.c
1167     +++ b/arch/x86/kernel/cpu/perf_event.c
1168     @@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
1169     }
1170     }
1171    
1172     +/*
1173     + * There may be PMI landing after enabled=0. The PMI hitting could be before or
1174     + * after disable_all.
1175     + *
1176     + * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
1177     + * It will not be re-enabled in the NMI handler again, because enabled=0. After
1178     + * handling the NMI, disable_all will be called, which will not change the
1179     + * state either. If PMI hits after disable_all, the PMU is already disabled
1180     + * before entering NMI handler. The NMI handler will not change the state
1181     + * either.
1182     + *
1183     + * So either situation is harmless.
1184     + */
1185     static void x86_pmu_disable(struct pmu *pmu)
1186     {
1187     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1188     diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
1189     index 7bb61e32fb29..98be6d6d32fa 100644
1190     --- a/arch/x86/kernel/cpu/perf_event.h
1191     +++ b/arch/x86/kernel/cpu/perf_event.h
1192     @@ -586,6 +586,7 @@ struct x86_pmu {
1193     pebs_broken :1,
1194     pebs_prec_dist :1;
1195     int pebs_record_size;
1196     + int pebs_buffer_size;
1197     void (*drain_pebs)(struct pt_regs *regs);
1198     struct event_constraint *pebs_constraints;
1199     void (*pebs_aliases)(struct perf_event *event);
1200     @@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
1201    
1202     void intel_pmu_lbr_init_knl(void);
1203    
1204     +void intel_pmu_pebs_data_source_nhm(void);
1205     +
1206     int intel_pmu_setup_lbr_filter(struct perf_event *event);
1207    
1208     void intel_pt_interrupt(void);
1209     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
1210     index fed2ab1f1065..760aec1e8f82 100644
1211     --- a/arch/x86/kernel/cpu/perf_event_intel.c
1212     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
1213     @@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
1214     };
1215    
1216     /*
1217     - * Use from PMIs where the LBRs are already disabled.
1218     + * Used from PMIs where the LBRs are already disabled.
1219     + *
1220     + * This function could be called consecutively. It is required to remain in
1221     + * disabled state if called consecutively.
1222     + *
1223     + * During consecutive calls, the same disable value will be written to related
1224     + * registers, so the PMU state remains unchanged. hw.state in
1225     + * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
1226     + * calls.
1227     */
1228     static void __intel_pmu_disable_all(void)
1229     {
1230     @@ -1884,6 +1892,16 @@ again:
1231     if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1232     handled++;
1233     x86_pmu.drain_pebs(regs);
1234     + /*
1235     + * There are cases where, even though, the PEBS ovfl bit is set
1236     + * in GLOBAL_OVF_STATUS, the PEBS events may also have their
1237     + * overflow bits set for their counters. We must clear them
1238     + * here because they have been processed as exact samples in
1239     + * the drain_pebs() routine. They must not be processed again
1240     + * in the for_each_bit_set() loop for regular samples below.
1241     + */
1242     + status &= ~cpuc->pebs_enabled;
1243     + status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
1244     }
1245    
1246     /*
1247     @@ -1929,7 +1947,10 @@ again:
1248     goto again;
1249    
1250     done:
1251     - __intel_pmu_enable_all(0, true);
1252     + /* Only restore PMU state when it's active. See x86_pmu_disable(). */
1253     + if (cpuc->enabled)
1254     + __intel_pmu_enable_all(0, true);
1255     +
1256     /*
1257     * Only unmask the NMI after the overflow counters
1258     * have been reset. This avoids spurious NMIs on
1259     @@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
1260     intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1261     X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1262    
1263     + intel_pmu_pebs_data_source_nhm();
1264     x86_add_quirk(intel_nehalem_quirk);
1265    
1266     pr_cont("Nehalem events, ");
1267     @@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
1268     intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
1269     X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
1270    
1271     + intel_pmu_pebs_data_source_nhm();
1272     pr_cont("Westmere events, ");
1273     break;
1274    
1275     diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
1276     index 10602f0a438f..955140140fd4 100644
1277     --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
1278     +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
1279     @@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
1280     #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
1281     #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
1282    
1283     -static const u64 pebs_data_source[] = {
1284     +/* Version for Sandy Bridge and later */
1285     +static u64 pebs_data_source[] = {
1286     P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
1287     OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
1288     OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
1289     @@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
1290     OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
1291     };
1292    
1293     +/* Patch up minor differences in the bits */
1294     +void __init intel_pmu_pebs_data_source_nhm(void)
1295     +{
1296     + pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
1297     + pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
1298     + pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
1299     +}
1300     +
1301     static u64 precise_store_data(u64 status)
1302     {
1303     union intel_x86_pebs_dse dse;
1304     @@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
1305     if (!x86_pmu.pebs)
1306     return 0;
1307    
1308     - buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
1309     + buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
1310     if (unlikely(!buffer))
1311     return -ENOMEM;
1312    
1313     @@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
1314     per_cpu(insn_buffer, cpu) = ibuffer;
1315     }
1316    
1317     - max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
1318     + max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
1319    
1320     ds->pebs_buffer_base = (u64)(unsigned long)buffer;
1321     ds->pebs_index = ds->pebs_buffer_base;
1322     @@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
1323    
1324     x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
1325     x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1326     + x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
1327     if (x86_pmu.pebs) {
1328     char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
1329     int format = x86_pmu.intel_cap.pebs_format;
1330     @@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
1331     case 0:
1332     printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
1333     x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
1334     + /*
1335     + * Using >PAGE_SIZE buffers makes the WRMSR to
1336     + * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1337     + * mysteriously hang on Core2.
1338     + *
1339     + * As a workaround, we don't do this.
1340     + */
1341     + x86_pmu.pebs_buffer_size = PAGE_SIZE;
1342     x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
1343     break;
1344    
1345     diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
1346     index 33acb884ccf1..4547b2cca71b 100644
1347     --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
1348     +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
1349     @@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
1350     .format_group = &hswep_uncore_sbox_format_group,
1351     };
1352    
1353     +#define BDX_MSR_UNCORE_SBOX 3
1354     +
1355     static struct intel_uncore_type *bdx_msr_uncores[] = {
1356     &bdx_uncore_ubox,
1357     &bdx_uncore_cbox,
1358     - &bdx_uncore_sbox,
1359     &hswep_uncore_pcu,
1360     + &bdx_uncore_sbox,
1361     NULL,
1362     };
1363    
1364     @@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
1365     if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1366     bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1367     uncore_msr_uncores = bdx_msr_uncores;
1368     +
1369     + /* BDX-DE doesn't have SBOX */
1370     + if (boot_cpu_data.x86_model == 86)
1371     + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
1372     }
1373    
1374     static struct intel_uncore_type bdx_uncore_ha = {
1375     diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
1376     index 5b0c232d1ee6..b931095e86d4 100644
1377     --- a/arch/x86/kernel/cpu/perf_event_knc.c
1378     +++ b/arch/x86/kernel/cpu/perf_event_knc.c
1379     @@ -263,7 +263,9 @@ again:
1380     goto again;
1381    
1382     done:
1383     - knc_pmu_enable_all(0);
1384     + /* Only restore PMU state when it's active. See x86_pmu_disable(). */
1385     + if (cpuc->enabled)
1386     + knc_pmu_enable_all(0);
1387    
1388     return handled;
1389     }
1390     diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
1391     index 37dae792dbbe..589b3193f102 100644
1392     --- a/arch/x86/kernel/ioport.c
1393     +++ b/arch/x86/kernel/ioport.c
1394     @@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
1395     SYSCALL_DEFINE1(iopl, unsigned int, level)
1396     {
1397     struct pt_regs *regs = current_pt_regs();
1398     - unsigned int old = (regs->flags >> 12) & 3;
1399     struct thread_struct *t = &current->thread;
1400    
1401     + /*
1402     + * Careful: the IOPL bits in regs->flags are undefined under Xen PV
1403     + * and changing them has no effect.
1404     + */
1405     + unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
1406     +
1407     if (level > 3)
1408     return -EINVAL;
1409     /* Trying to gain more privileges? */
1410     @@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
1411     if (!capable(CAP_SYS_RAWIO))
1412     return -EPERM;
1413     }
1414     - regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
1415     - t->iopl = level << 12;
1416     + regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
1417     + (level << X86_EFLAGS_IOPL_BIT);
1418     + t->iopl = level << X86_EFLAGS_IOPL_BIT;
1419     set_iopl_mask(t->iopl);
1420    
1421     return 0;
1422     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1423     index b9d99e0f82c4..9f751876066f 100644
1424     --- a/arch/x86/kernel/process_64.c
1425     +++ b/arch/x86/kernel/process_64.c
1426     @@ -48,6 +48,7 @@
1427     #include <asm/syscalls.h>
1428     #include <asm/debugreg.h>
1429     #include <asm/switch_to.h>
1430     +#include <asm/xen/hypervisor.h>
1431    
1432     asmlinkage extern void ret_from_fork(void);
1433    
1434     @@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1435     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
1436     __switch_to_xtra(prev_p, next_p, tss);
1437    
1438     +#ifdef CONFIG_XEN
1439     + /*
1440     + * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
1441     + * current_pt_regs()->flags may not match the current task's
1442     + * intended IOPL. We need to switch it manually.
1443     + */
1444     + if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
1445     + prev->iopl != next->iopl))
1446     + xen_set_iopl_mask(next->iopl);
1447     +#endif
1448     +
1449     if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
1450     /*
1451     * AMD CPUs have a misfeature: SYSRET sets the SS selector but
1452     diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
1453     index b0ea42b78ccd..ab5318727579 100644
1454     --- a/arch/x86/kvm/i8254.c
1455     +++ b/arch/x86/kvm/i8254.c
1456     @@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
1457     * PIC is being reset. Handle it gracefully here
1458     */
1459     atomic_inc(&ps->pending);
1460     - else if (value > 0)
1461     + else if (value > 0 && ps->reinject)
1462     /* in this case, we had multiple outstanding pit interrupts
1463     * that we needed to inject. Reinject
1464     */
1465     @@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
1466     * last one has been acked.
1467     */
1468     spin_lock(&ps->inject_lock);
1469     - if (ps->irq_ack) {
1470     + if (!ps->reinject)
1471     + inject = 1;
1472     + else if (ps->irq_ack) {
1473     ps->irq_ack = 0;
1474     inject = 1;
1475     }
1476     @@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
1477     struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
1478     struct kvm_pit *pt = ps->kvm->arch.vpit;
1479    
1480     - if (ps->reinject || !atomic_read(&ps->pending)) {
1481     + if (ps->reinject)
1482     atomic_inc(&ps->pending);
1483     - queue_kthread_work(&pt->worker, &pt->expired);
1484     - }
1485     +
1486     + queue_kthread_work(&pt->worker, &pt->expired);
1487    
1488     if (ps->is_periodic) {
1489     hrtimer_add_expires_ns(&ps->timer, ps->period);
1490     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1491     index 9bd8f44baded..539062e24de1 100644
1492     --- a/arch/x86/kvm/vmx.c
1493     +++ b/arch/x86/kvm/vmx.c
1494     @@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
1495     } else
1496     vmx->nested.nested_vmx_ept_caps = 0;
1497    
1498     + /*
1499     + * Old versions of KVM use the single-context version without
1500     + * checking for support, so declare that it is supported even
1501     + * though it is treated as global context. The alternative is
1502     + * not failing the single-context invvpid, and it is worse.
1503     + */
1504     if (enable_vpid)
1505     vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
1506     + VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
1507     VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1508     else
1509     vmx->nested.nested_vmx_vpid_caps = 0;
1510     @@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
1511     if (!(types & (1UL << type))) {
1512     nested_vmx_failValid(vcpu,
1513     VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
1514     + skip_emulated_instruction(vcpu);
1515     return 1;
1516     }
1517    
1518     @@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
1519     if (!(types & (1UL << type))) {
1520     nested_vmx_failValid(vcpu,
1521     VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
1522     + skip_emulated_instruction(vcpu);
1523     return 1;
1524     }
1525    
1526     @@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
1527     }
1528    
1529     switch (type) {
1530     + case VMX_VPID_EXTENT_SINGLE_CONTEXT:
1531     + /*
1532     + * Old versions of KVM use the single-context version so we
1533     + * have to support it; just treat it the same as all-context.
1534     + */
1535     case VMX_VPID_EXTENT_ALL_CONTEXT:
1536     __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
1537     nested_vmx_succeed(vcpu);
1538     break;
1539     default:
1540     - /* Trap single context invalidation invvpid calls */
1541     + /* Trap individual address invalidation invvpid calls */
1542     BUG_ON(1);
1543     break;
1544     }
1545     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1546     index eaf6ee8c28b8..d47d231e0d4b 100644
1547     --- a/arch/x86/kvm/x86.c
1548     +++ b/arch/x86/kvm/x86.c
1549     @@ -2752,6 +2752,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1550     }
1551    
1552     kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1553     + vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1554     }
1555    
1556     void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1557     diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1558     index 8f4cc3dfac32..5fb6adaaa796 100644
1559     --- a/arch/x86/mm/tlb.c
1560     +++ b/arch/x86/mm/tlb.c
1561     @@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
1562    
1563     if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
1564     return;
1565     - if (!f->flush_end)
1566     - f->flush_end = f->flush_start + PAGE_SIZE;
1567    
1568     count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
1569     if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
1570     @@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
1571     unsigned long end)
1572     {
1573     struct flush_tlb_info info;
1574     +
1575     + if (end == 0)
1576     + end = start + PAGE_SIZE;
1577     info.flush_mm = mm;
1578     info.flush_start = start;
1579     info.flush_end = end;
1580    
1581     count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
1582     - trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
1583     + if (end == TLB_FLUSH_ALL)
1584     + trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
1585     + else
1586     + trace_tlb_flush(TLB_REMOTE_SEND_IPI,
1587     + (end - start) >> PAGE_SHIFT);
1588     +
1589     if (is_uv_system()) {
1590     unsigned int cpu;
1591    
1592     diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
1593     index e58565556703..0ae7e9fa348d 100644
1594     --- a/arch/x86/pci/fixup.c
1595     +++ b/arch/x86/pci/fixup.c
1596     @@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
1597     }
1598     }
1599     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
1600     +
1601     +static void pci_bdwep_bar(struct pci_dev *dev)
1602     +{
1603     + dev->non_compliant_bars = 1;
1604     +}
1605     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
1606     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
1607     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1608     index d09e4c9d7cc5..e3679db17545 100644
1609     --- a/arch/x86/xen/enlighten.c
1610     +++ b/arch/x86/xen/enlighten.c
1611     @@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
1612     tss->x86_tss.sp0 = thread->sp0;
1613     }
1614    
1615     -static void xen_set_iopl_mask(unsigned mask)
1616     +void xen_set_iopl_mask(unsigned mask)
1617     {
1618     struct physdev_set_iopl set_iopl;
1619    
1620     diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
1621     index 9ed55649ac8e..05e1df943856 100644
1622     --- a/arch/xtensa/kernel/head.S
1623     +++ b/arch/xtensa/kernel/head.S
1624     @@ -128,7 +128,7 @@ ENTRY(_startup)
1625     wsr a0, icountlevel
1626    
1627     .set _index, 0
1628     - .rept XCHAL_NUM_DBREAK - 1
1629     + .rept XCHAL_NUM_DBREAK
1630     wsr a0, SREG_DBREAKC + _index
1631     .set _index, _index + 1
1632     .endr
1633     diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
1634     index d75aa1476da7..1a804a2f9a5b 100644
1635     --- a/arch/xtensa/mm/cache.c
1636     +++ b/arch/xtensa/mm/cache.c
1637     @@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
1638     unsigned long paddr;
1639     void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
1640    
1641     - pagefault_disable();
1642     + preempt_disable();
1643     kmap_invalidate_coherent(page, vaddr);
1644     set_bit(PG_arch_1, &page->flags);
1645     clear_page_alias(kvaddr, paddr);
1646     - pagefault_enable();
1647     + preempt_enable();
1648     }
1649    
1650     void copy_user_highpage(struct page *dst, struct page *src,
1651     @@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
1652     void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
1653     &src_paddr);
1654    
1655     - pagefault_disable();
1656     + preempt_disable();
1657     kmap_invalidate_coherent(dst, vaddr);
1658     set_bit(PG_arch_1, &dst->flags);
1659     copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
1660     - pagefault_enable();
1661     + preempt_enable();
1662     }
1663    
1664     #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
1665     diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
1666     index 70cb408bc20d..92d785fefb6d 100644
1667     --- a/arch/xtensa/platforms/iss/console.c
1668     +++ b/arch/xtensa/platforms/iss/console.c
1669     @@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
1670     {
1671     struct tty_port *port = (struct tty_port *)priv;
1672     int i = 0;
1673     + int rd = 1;
1674     unsigned char c;
1675    
1676     spin_lock(&timer_lock);
1677    
1678     while (simc_poll(0)) {
1679     - simc_read(0, &c, 1);
1680     + rd = simc_read(0, &c, 1);
1681     + if (rd <= 0)
1682     + break;
1683     tty_insert_flip_char(port, c, TTY_NORMAL);
1684     i++;
1685     }
1686    
1687     if (i)
1688     tty_flip_buffer_push(port);
1689     -
1690     -
1691     - mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
1692     + if (rd)
1693     + mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
1694     spin_unlock(&timer_lock);
1695     }
1696    
1697     diff --git a/block/blk-core.c b/block/blk-core.c
1698     index b83d29755b5a..45f4d7efbf34 100644
1699     --- a/block/blk-core.c
1700     +++ b/block/blk-core.c
1701     @@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1702     if (q->mq_ops) {
1703     if (blk_queue_io_stat(q))
1704     blk_account_io_start(rq, true);
1705     - blk_mq_insert_request(rq, false, true, true);
1706     + blk_mq_insert_request(rq, false, true, false);
1707     return 0;
1708     }
1709    
1710     diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
1711     index 021d39c0ba75..13c4e5a5fe8c 100644
1712     --- a/crypto/asymmetric_keys/x509_cert_parser.c
1713     +++ b/crypto/asymmetric_keys/x509_cert_parser.c
1714     @@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
1715     unsigned char tag,
1716     const unsigned char *value, size_t vlen)
1717     {
1718     - static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
1719     + static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
1720     31, 31, 30, 31, 30, 31 };
1721     const unsigned char *p = value;
1722     unsigned year, mon, day, hour, min, sec, mon_len;
1723     @@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
1724     if (year % 4 == 0) {
1725     mon_len = 29;
1726     if (year % 100 == 0) {
1727     - year /= 100;
1728     - if (year % 4 != 0)
1729     - mon_len = 28;
1730     + mon_len = 28;
1731     + if (year % 400 == 0)
1732     + mon_len = 29;
1733     }
1734     }
1735     }
1736     diff --git a/crypto/keywrap.c b/crypto/keywrap.c
1737     index b1d106ce55f3..72014f963ba7 100644
1738     --- a/crypto/keywrap.c
1739     +++ b/crypto/keywrap.c
1740     @@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
1741     SEMIBSIZE))
1742     ret = -EBADMSG;
1743    
1744     - memzero_explicit(&block, sizeof(struct crypto_kw_block));
1745     + memzero_explicit(block, sizeof(struct crypto_kw_block));
1746    
1747     return ret;
1748     }
1749     @@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
1750     /* establish the IV for the caller to pick up */
1751     memcpy(desc->info, block->A, SEMIBSIZE);
1752    
1753     - memzero_explicit(&block, sizeof(struct crypto_kw_block));
1754     + memzero_explicit(block, sizeof(struct crypto_kw_block));
1755    
1756     return 0;
1757     }
1758     diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
1759     index d02fd53042a5..56241eb341f4 100644
1760     --- a/drivers/acpi/resource.c
1761     +++ b/drivers/acpi/resource.c
1762     @@ -27,8 +27,20 @@
1763    
1764     #ifdef CONFIG_X86
1765     #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
1766     +static inline bool acpi_iospace_resource_valid(struct resource *res)
1767     +{
1768     + /* On X86 IO space is limited to the [0 - 64K] IO port range */
1769     + return res->end < 0x10003;
1770     +}
1771     #else
1772     #define valid_IRQ(i) (true)
1773     +/*
1774     + * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
1775     + * addresses mapping IO space in CPU physical address space, IO space
1776     + * resources can be placed anywhere in the 64-bit physical address space.
1777     + */
1778     +static inline bool
1779     +acpi_iospace_resource_valid(struct resource *res) { return true; }
1780     #endif
1781    
1782     static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
1783     @@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
1784     if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
1785     res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
1786    
1787     - if (res->end >= 0x10003)
1788     + if (!acpi_iospace_resource_valid(res))
1789     res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
1790    
1791     if (io_decode == ACPI_DECODE_16)
1792     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1793     index 9cb975200cac..f054cadf30d8 100644
1794     --- a/drivers/acpi/sleep.c
1795     +++ b/drivers/acpi/sleep.c
1796     @@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
1797    
1798     static void acpi_hibernation_leave(void)
1799     {
1800     + pm_set_resume_via_firmware();
1801     /*
1802     * If ACPI is not enabled by the BIOS and the boot kernel, we need to
1803     * enable it here.
1804     diff --git a/drivers/block/brd.c b/drivers/block/brd.c
1805     index cb27190e9f39..f7ecc287d733 100644
1806     --- a/drivers/block/brd.c
1807     +++ b/drivers/block/brd.c
1808     @@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
1809    
1810     if (unlikely(bio->bi_rw & REQ_DISCARD)) {
1811     if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
1812     - bio->bi_iter.bi_size & PAGE_MASK)
1813     + bio->bi_iter.bi_size & ~PAGE_MASK)
1814     goto io_error;
1815     discard_from_brd(brd, sector, bio->bi_iter.bi_size);
1816     goto out;
1817     diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
1818     index 9b180dbbd03c..1c330b61f05d 100644
1819     --- a/drivers/block/mtip32xx/mtip32xx.c
1820     +++ b/drivers/block/mtip32xx/mtip32xx.c
1821     @@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
1822     {
1823     struct request *rq;
1824    
1825     + if (mtip_check_surprise_removal(dd->pdev))
1826     + return NULL;
1827     +
1828     rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
1829     + if (IS_ERR(rq))
1830     + return NULL;
1831     +
1832     return blk_mq_rq_to_pdu(rq);
1833     }
1834    
1835     @@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
1836     "Command tag %d failed due to TFE\n", tag);
1837     }
1838    
1839     - /* Unmap the DMA scatter list entries */
1840     - dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
1841     -
1842     rq = mtip_rq_from_tag(dd, tag);
1843    
1844     - if (unlikely(cmd->unaligned))
1845     - up(&port->cmd_slot_unal);
1846     -
1847     - blk_mq_end_request(rq, status ? -EIO : 0);
1848     + blk_mq_complete_request(rq, status);
1849     }
1850    
1851     /*
1852     @@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
1853     dev_warn(&port->dd->pdev->dev,
1854     "Internal command %d completed with TFE\n", tag);
1855    
1856     + command->comp_func = NULL;
1857     + command->comp_data = NULL;
1858     complete(waiting);
1859     }
1860    
1861     @@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
1862    
1863     port = dd->port;
1864    
1865     - set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
1866     -
1867     if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
1868     cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
1869     dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
1870     @@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
1871     cmd->comp_func(port, MTIP_TAG_INTERNAL,
1872     cmd, PORT_IRQ_TF_ERR);
1873     }
1874     - goto handle_tfe_exit;
1875     + return;
1876     }
1877    
1878     /* clear the tag accumulator */
1879     @@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
1880     fail_reason = "thermal shutdown";
1881     }
1882     if (buf[288] == 0xBF) {
1883     - set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
1884     + set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
1885     dev_info(&dd->pdev->dev,
1886     "Drive indicates rebuild has failed. Secure erase required.\n");
1887     fail_all_ncq_cmds = 1;
1888     @@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
1889     }
1890     }
1891     print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
1892     -
1893     -handle_tfe_exit:
1894     - /* clear eh_active */
1895     - clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
1896     - wake_up_interruptible(&port->svc_wait);
1897     }
1898    
1899     /*
1900     @@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
1901     (fis->features == 0x27 || fis->features == 0x72 ||
1902     fis->features == 0x62 || fis->features == 0x26))) {
1903     clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1904     + clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
1905     /* Com reset after secure erase or lowlevel format */
1906     mtip_restart_port(port);
1907     clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1908     @@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
1909     *
1910     * @port Pointer to port data structure
1911     * @timeout Max duration to wait (ms)
1912     + * @atomic gfp_t flag to indicate blockable context or not
1913     *
1914     * return value
1915     * 0 Success
1916     * -EBUSY Commands still active
1917     */
1918     -static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1919     +static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
1920     + gfp_t atomic)
1921     {
1922     unsigned long to;
1923     unsigned int n;
1924     @@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1925     to = jiffies + msecs_to_jiffies(timeout);
1926     do {
1927     if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
1928     - test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
1929     + test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
1930     + atomic == GFP_KERNEL) {
1931     msleep(20);
1932     continue; /* svc thd is actively issuing commands */
1933     }
1934    
1935     - msleep(100);
1936     + if (atomic == GFP_KERNEL)
1937     + msleep(100);
1938     + else {
1939     + cpu_relax();
1940     + udelay(100);
1941     + }
1942     +
1943     if (mtip_check_surprise_removal(port->dd->pdev))
1944     goto err_fault;
1945     - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1946     - goto err_fault;
1947    
1948     /*
1949     * Ignore s_active bit 0 of array element 0.
1950     @@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1951     struct mtip_cmd *int_cmd;
1952     struct driver_data *dd = port->dd;
1953     int rv = 0;
1954     + unsigned long start;
1955    
1956     /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1957     if (buffer & 0x00000007) {
1958     @@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1959     }
1960    
1961     int_cmd = mtip_get_int_command(dd);
1962     + if (!int_cmd) {
1963     + dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
1964     + return -EFAULT;
1965     + }
1966    
1967     set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1968    
1969     @@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1970     if (fis->command != ATA_CMD_STANDBYNOW1) {
1971     /* wait for io to complete if non atomic */
1972     if (mtip_quiesce_io(port,
1973     - MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
1974     + MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
1975     dev_warn(&dd->pdev->dev,
1976     "Failed to quiesce IO\n");
1977     mtip_put_int_command(dd, int_cmd);
1978     @@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1979     /* Populate the command header */
1980     int_cmd->command_header->byte_count = 0;
1981    
1982     + start = jiffies;
1983     +
1984     /* Issue the command to the hardware */
1985     mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1986    
1987     @@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1988     if ((rv = wait_for_completion_interruptible_timeout(
1989     &wait,
1990     msecs_to_jiffies(timeout))) <= 0) {
1991     +
1992     if (rv == -ERESTARTSYS) { /* interrupted */
1993     dev_err(&dd->pdev->dev,
1994     - "Internal command [%02X] was interrupted after %lu ms\n",
1995     - fis->command, timeout);
1996     + "Internal command [%02X] was interrupted after %u ms\n",
1997     + fis->command,
1998     + jiffies_to_msecs(jiffies - start));
1999     rv = -EINTR;
2000     goto exec_ic_exit;
2001     } else if (rv == 0) /* timeout */
2002     @@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2003     return -EFAULT;
2004     }
2005    
2006     +static void mtip_softirq_done_fn(struct request *rq)
2007     +{
2008     + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
2009     + struct driver_data *dd = rq->q->queuedata;
2010     +
2011     + /* Unmap the DMA scatter list entries */
2012     + dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
2013     + cmd->direction);
2014     +
2015     + if (unlikely(cmd->unaligned))
2016     + up(&dd->port->cmd_slot_unal);
2017     +
2018     + blk_mq_end_request(rq, rq->errors);
2019     +}
2020     +
2021     +static void mtip_abort_cmd(struct request *req, void *data,
2022     + bool reserved)
2023     +{
2024     + struct driver_data *dd = data;
2025     +
2026     + dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
2027     +
2028     + clear_bit(req->tag, dd->port->cmds_to_issue);
2029     + req->errors = -EIO;
2030     + mtip_softirq_done_fn(req);
2031     +}
2032     +
2033     +static void mtip_queue_cmd(struct request *req, void *data,
2034     + bool reserved)
2035     +{
2036     + struct driver_data *dd = data;
2037     +
2038     + set_bit(req->tag, dd->port->cmds_to_issue);
2039     + blk_abort_request(req);
2040     +}
2041     +
2042     /*
2043     * service thread to issue queued commands
2044     *
2045     @@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2046     static int mtip_service_thread(void *data)
2047     {
2048     struct driver_data *dd = (struct driver_data *)data;
2049     - unsigned long slot, slot_start, slot_wrap;
2050     + unsigned long slot, slot_start, slot_wrap, to;
2051     unsigned int num_cmd_slots = dd->slot_groups * 32;
2052     struct mtip_port *port = dd->port;
2053    
2054     @@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
2055     * is in progress nor error handling is active
2056     */
2057     wait_event_interruptible(port->svc_wait, (port->flags) &&
2058     - !(port->flags & MTIP_PF_PAUSE_IO));
2059     -
2060     - set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2061     + (port->flags & MTIP_PF_SVC_THD_WORK));
2062    
2063     if (kthread_should_stop() ||
2064     test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2065     @@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
2066     &dd->dd_flag)))
2067     goto st_out;
2068    
2069     + set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2070     +
2071     restart_eh:
2072     /* Demux bits: start with error handling */
2073     if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2074     @@ -2939,6 +2987,32 @@ restart_eh:
2075     if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
2076     goto restart_eh;
2077    
2078     + if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
2079     + to = jiffies + msecs_to_jiffies(5000);
2080     +
2081     + do {
2082     + mdelay(100);
2083     + } while (atomic_read(&dd->irq_workers_active) != 0 &&
2084     + time_before(jiffies, to));
2085     +
2086     + if (atomic_read(&dd->irq_workers_active) != 0)
2087     + dev_warn(&dd->pdev->dev,
2088     + "Completion workers still active!");
2089     +
2090     + spin_lock(dd->queue->queue_lock);
2091     + blk_mq_all_tag_busy_iter(*dd->tags.tags,
2092     + mtip_queue_cmd, dd);
2093     + spin_unlock(dd->queue->queue_lock);
2094     +
2095     + set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
2096     +
2097     + if (mtip_device_reset(dd))
2098     + blk_mq_all_tag_busy_iter(*dd->tags.tags,
2099     + mtip_abort_cmd, dd);
2100     +
2101     + clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
2102     + }
2103     +
2104     if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
2105     slot = 1;
2106     /* used to restrict the loop to one iteration */
2107     @@ -2971,10 +3045,8 @@ restart_eh:
2108     }
2109    
2110     if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2111     - if (mtip_ftl_rebuild_poll(dd) < 0)
2112     - set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
2113     - &dd->dd_flag);
2114     - clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2115     + if (mtip_ftl_rebuild_poll(dd) == 0)
2116     + clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2117     }
2118     }
2119    
2120     @@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
2121     if (buf[288] == 0xBF) {
2122     dev_info(&dd->pdev->dev,
2123     "Drive indicates rebuild has failed.\n");
2124     - /* TODO */
2125     + set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
2126     }
2127     }
2128    
2129     @@ -3263,20 +3335,25 @@ out1:
2130     return rv;
2131     }
2132    
2133     -static void mtip_standby_drive(struct driver_data *dd)
2134     +static int mtip_standby_drive(struct driver_data *dd)
2135     {
2136     - if (dd->sr)
2137     - return;
2138     + int rv = 0;
2139    
2140     + if (dd->sr || !dd->port)
2141     + return -ENODEV;
2142     /*
2143     * Send standby immediate (E0h) to the drive so that it
2144     * saves its state.
2145     */
2146     if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
2147     - !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
2148     - if (mtip_standby_immediate(dd->port))
2149     + !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
2150     + !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
2151     + rv = mtip_standby_immediate(dd->port);
2152     + if (rv)
2153     dev_warn(&dd->pdev->dev,
2154     "STANDBY IMMEDIATE failed\n");
2155     + }
2156     + return rv;
2157     }
2158    
2159     /*
2160     @@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
2161     */
2162     static int mtip_hw_exit(struct driver_data *dd)
2163     {
2164     - /*
2165     - * Send standby immediate (E0h) to the drive so that it
2166     - * saves its state.
2167     - */
2168     if (!dd->sr) {
2169     /* de-initialize the port. */
2170     mtip_deinit_port(dd->port);
2171     @@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
2172     * Send standby immediate (E0h) to the drive so that it
2173     * saves its state.
2174     */
2175     - if (!dd->sr && dd->port)
2176     - mtip_standby_immediate(dd->port);
2177     + mtip_standby_drive(dd);
2178    
2179     return 0;
2180     }
2181     @@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
2182     * Send standby immediate (E0h) to the drive
2183     * so that it saves its state.
2184     */
2185     - if (mtip_standby_immediate(dd->port) != 0) {
2186     + if (mtip_standby_drive(dd) != 0) {
2187     dev_err(&dd->pdev->dev,
2188     "Failed standby-immediate command\n");
2189     return -EFAULT;
2190     @@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
2191     return 0;
2192     }
2193    
2194     +static int mtip_block_open(struct block_device *dev, fmode_t mode)
2195     +{
2196     + struct driver_data *dd;
2197     +
2198     + if (dev && dev->bd_disk) {
2199     + dd = (struct driver_data *) dev->bd_disk->private_data;
2200     +
2201     + if (dd) {
2202     + if (test_bit(MTIP_DDF_REMOVAL_BIT,
2203     + &dd->dd_flag)) {
2204     + return -ENODEV;
2205     + }
2206     + return 0;
2207     + }
2208     + }
2209     + return -ENODEV;
2210     +}
2211     +
2212     +void mtip_block_release(struct gendisk *disk, fmode_t mode)
2213     +{
2214     +}
2215     +
2216     /*
2217     * Block device operation function.
2218     *
2219     @@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
2220     * layer.
2221     */
2222     static const struct block_device_operations mtip_block_ops = {
2223     + .open = mtip_block_open,
2224     + .release = mtip_block_release,
2225     .ioctl = mtip_block_ioctl,
2226     #ifdef CONFIG_COMPAT
2227     .compat_ioctl = mtip_block_compat_ioctl,
2228     @@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
2229     rq_data_dir(rq))) {
2230     return -ENODATA;
2231     }
2232     - if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
2233     + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
2234     + test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
2235     return -ENODATA;
2236     - if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
2237     - return -ENXIO;
2238     }
2239    
2240     if (rq->cmd_flags & REQ_DISCARD) {
2241     @@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
2242     return 0;
2243     }
2244    
2245     +static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
2246     + bool reserved)
2247     +{
2248     + struct driver_data *dd = req->q->queuedata;
2249     + int ret = BLK_EH_RESET_TIMER;
2250     +
2251     + if (reserved)
2252     + goto exit_handler;
2253     +
2254     + if (test_bit(req->tag, dd->port->cmds_to_issue))
2255     + goto exit_handler;
2256     +
2257     + if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
2258     + goto exit_handler;
2259     +
2260     + wake_up_interruptible(&dd->port->svc_wait);
2261     +exit_handler:
2262     + return ret;
2263     +}
2264     +
2265     static struct blk_mq_ops mtip_mq_ops = {
2266     .queue_rq = mtip_queue_rq,
2267     .map_queue = blk_mq_map_queue,
2268     .init_request = mtip_init_cmd,
2269     .exit_request = mtip_free_cmd,
2270     + .complete = mtip_softirq_done_fn,
2271     + .timeout = mtip_cmd_timeout,
2272     };
2273    
2274     /*
2275     @@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
2276    
2277     mtip_hw_debugfs_init(dd);
2278    
2279     -skip_create_disk:
2280     memset(&dd->tags, 0, sizeof(dd->tags));
2281     dd->tags.ops = &mtip_mq_ops;
2282     dd->tags.nr_hw_queues = 1;
2283     @@ -3860,12 +3976,13 @@ skip_create_disk:
2284     dd->tags.numa_node = dd->numa_node;
2285     dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
2286     dd->tags.driver_data = dd;
2287     + dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
2288    
2289     rv = blk_mq_alloc_tag_set(&dd->tags);
2290     if (rv) {
2291     dev_err(&dd->pdev->dev,
2292     "Unable to allocate request queue\n");
2293     - goto block_queue_alloc_init_error;
2294     + goto block_queue_alloc_tag_error;
2295     }
2296    
2297     /* Allocate the request queue. */
2298     @@ -3880,6 +3997,7 @@ skip_create_disk:
2299     dd->disk->queue = dd->queue;
2300     dd->queue->queuedata = dd;
2301    
2302     +skip_create_disk:
2303     /* Initialize the protocol layer. */
2304     wait_for_rebuild = mtip_hw_get_identify(dd);
2305     if (wait_for_rebuild < 0) {
2306     @@ -3976,8 +4094,9 @@ kthread_run_error:
2307     read_capacity_error:
2308     init_hw_cmds_error:
2309     blk_cleanup_queue(dd->queue);
2310     - blk_mq_free_tag_set(&dd->tags);
2311     block_queue_alloc_init_error:
2312     + blk_mq_free_tag_set(&dd->tags);
2313     +block_queue_alloc_tag_error:
2314     mtip_hw_debugfs_exit(dd);
2315     disk_index_error:
2316     spin_lock(&rssd_index_lock);
2317     @@ -3994,6 +4113,22 @@ protocol_init_error:
2318     return rv;
2319     }
2320    
2321     +static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
2322     +{
2323     + struct driver_data *dd = (struct driver_data *)data;
2324     + struct mtip_cmd *cmd;
2325     +
2326     + if (likely(!reserv))
2327     + blk_mq_complete_request(rq, -ENODEV);
2328     + else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
2329     +
2330     + cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
2331     + if (cmd->comp_func)
2332     + cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
2333     + cmd, -ENODEV);
2334     + }
2335     +}
2336     +
2337     /*
2338     * Block layer deinitialization function.
2339     *
2340     @@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
2341     }
2342     }
2343    
2344     - if (!dd->sr)
2345     - mtip_standby_drive(dd);
2346     + if (!dd->sr) {
2347     + /*
2348     + * Explicitly wait here for IOs to quiesce,
2349     + * as mtip_standby_drive usually won't wait for IOs.
2350     + */
2351     + if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
2352     + GFP_KERNEL))
2353     + mtip_standby_drive(dd);
2354     + }
2355     else
2356     dev_info(&dd->pdev->dev, "device %s surprise removal\n",
2357     dd->disk->disk_name);
2358    
2359     + blk_mq_freeze_queue_start(dd->queue);
2360     + blk_mq_stop_hw_queues(dd->queue);
2361     + blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
2362     +
2363     /*
2364     * Delete our gendisk structure. This also removes the device
2365     * from /dev
2366     @@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
2367     dd->bdev = NULL;
2368     }
2369     if (dd->disk) {
2370     - del_gendisk(dd->disk);
2371     + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2372     + del_gendisk(dd->disk);
2373     if (dd->disk->queue) {
2374     blk_cleanup_queue(dd->queue);
2375     blk_mq_free_tag_set(&dd->tags);
2376     @@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
2377     dev_info(&dd->pdev->dev,
2378     "Shutting down %s ...\n", dd->disk->disk_name);
2379    
2380     - del_gendisk(dd->disk);
2381     + if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
2382     + del_gendisk(dd->disk);
2383     if (dd->disk->queue) {
2384     blk_cleanup_queue(dd->queue);
2385     blk_mq_free_tag_set(&dd->tags);
2386     @@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
2387     struct driver_data *dd = pci_get_drvdata(pdev);
2388     unsigned long flags, to;
2389    
2390     - set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
2391     + set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
2392    
2393     spin_lock_irqsave(&dev_lock, flags);
2394     list_del_init(&dd->online_list);
2395     @@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
2396     } while (atomic_read(&dd->irq_workers_active) != 0 &&
2397     time_before(jiffies, to));
2398    
2399     + if (!dd->sr)
2400     + fsync_bdev(dd->bdev);
2401     +
2402     if (atomic_read(&dd->irq_workers_active) != 0) {
2403     dev_warn(&dd->pdev->dev,
2404     "Completion workers still active!\n");
2405     }
2406    
2407     - blk_mq_stop_hw_queues(dd->queue);
2408     + blk_set_queue_dying(dd->queue);
2409     + set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
2410     +
2411     /* Clean up the block layer. */
2412     mtip_block_remove(dd);
2413    
2414     diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
2415     index 3274784008eb..7617888f7944 100644
2416     --- a/drivers/block/mtip32xx/mtip32xx.h
2417     +++ b/drivers/block/mtip32xx/mtip32xx.h
2418     @@ -134,16 +134,24 @@ enum {
2419     MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
2420     MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
2421     MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
2422     + MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
2423     MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
2424     (1 << MTIP_PF_EH_ACTIVE_BIT) |
2425     (1 << MTIP_PF_SE_ACTIVE_BIT) |
2426     - (1 << MTIP_PF_DM_ACTIVE_BIT)),
2427     + (1 << MTIP_PF_DM_ACTIVE_BIT) |
2428     + (1 << MTIP_PF_TO_ACTIVE_BIT)),
2429    
2430     MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
2431     MTIP_PF_ISSUE_CMDS_BIT = 5,
2432     MTIP_PF_REBUILD_BIT = 6,
2433     MTIP_PF_SVC_THD_STOP_BIT = 8,
2434    
2435     + MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
2436     + (1 << MTIP_PF_ISSUE_CMDS_BIT) |
2437     + (1 << MTIP_PF_REBUILD_BIT) |
2438     + (1 << MTIP_PF_SVC_THD_STOP_BIT) |
2439     + (1 << MTIP_PF_TO_ACTIVE_BIT)),
2440     +
2441     /* below are bit numbers in 'dd_flag' defined in driver_data */
2442     MTIP_DDF_SEC_LOCK_BIT = 0,
2443     MTIP_DDF_REMOVE_PENDING_BIT = 1,
2444     @@ -153,6 +161,7 @@ enum {
2445     MTIP_DDF_RESUME_BIT = 6,
2446     MTIP_DDF_INIT_DONE_BIT = 7,
2447     MTIP_DDF_REBUILD_FAILED_BIT = 8,
2448     + MTIP_DDF_REMOVAL_BIT = 9,
2449    
2450     MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
2451     (1 << MTIP_DDF_SEC_LOCK_BIT) |
2452     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
2453     index fa893c3ec408..0beaa52df66b 100644
2454     --- a/drivers/bluetooth/ath3k.c
2455     +++ b/drivers/bluetooth/ath3k.c
2456     @@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
2457     { USB_DEVICE(0x0489, 0xe05f) },
2458     { USB_DEVICE(0x0489, 0xe076) },
2459     { USB_DEVICE(0x0489, 0xe078) },
2460     + { USB_DEVICE(0x0489, 0xe095) },
2461     { USB_DEVICE(0x04c5, 0x1330) },
2462     { USB_DEVICE(0x04CA, 0x3004) },
2463     { USB_DEVICE(0x04CA, 0x3005) },
2464     @@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
2465     { USB_DEVICE(0x04CA, 0x300d) },
2466     { USB_DEVICE(0x04CA, 0x300f) },
2467     { USB_DEVICE(0x04CA, 0x3010) },
2468     + { USB_DEVICE(0x04CA, 0x3014) },
2469     { USB_DEVICE(0x0930, 0x0219) },
2470     { USB_DEVICE(0x0930, 0x021c) },
2471     { USB_DEVICE(0x0930, 0x0220) },
2472     @@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
2473     { USB_DEVICE(0x13d3, 0x3362) },
2474     { USB_DEVICE(0x13d3, 0x3375) },
2475     { USB_DEVICE(0x13d3, 0x3393) },
2476     + { USB_DEVICE(0x13d3, 0x3395) },
2477     { USB_DEVICE(0x13d3, 0x3402) },
2478     { USB_DEVICE(0x13d3, 0x3408) },
2479     { USB_DEVICE(0x13d3, 0x3423) },
2480     { USB_DEVICE(0x13d3, 0x3432) },
2481     + { USB_DEVICE(0x13d3, 0x3472) },
2482     { USB_DEVICE(0x13d3, 0x3474) },
2483    
2484     /* Atheros AR5BBU12 with sflash firmware */
2485     @@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
2486     { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
2487     { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
2488     { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
2489     + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
2490     { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
2491     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
2492     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
2493     @@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
2494     { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
2495     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
2496     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
2497     + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
2498     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
2499     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
2500     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
2501     @@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
2502     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
2503     { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
2504     { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
2505     + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
2506     { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
2507     { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
2508     { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
2509     { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
2510     + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
2511     { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
2512    
2513     /* Atheros AR5BBU22 with sflash firmware */
2514     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2515     index a191e318fab8..0d4e372e426d 100644
2516     --- a/drivers/bluetooth/btusb.c
2517     +++ b/drivers/bluetooth/btusb.c
2518     @@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
2519     { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
2520     { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
2521     { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
2522     + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
2523     { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
2524     { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
2525     { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
2526     @@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
2527     { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
2528     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
2529     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
2530     + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
2531     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
2532     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
2533     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
2534     @@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
2535     { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
2536     { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
2537     { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
2538     + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
2539     { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
2540     { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
2541     { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
2542     { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
2543     + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
2544     { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
2545    
2546     /* Atheros AR5BBU12 with sflash firmware */
2547     diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
2548     index 45cc39aabeee..252142524ff2 100644
2549     --- a/drivers/char/tpm/tpm-chip.c
2550     +++ b/drivers/char/tpm/tpm-chip.c
2551     @@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
2552     chip->cdev.owner = chip->pdev->driver->owner;
2553     chip->cdev.kobj.parent = &chip->dev.kobj;
2554    
2555     + devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
2556     +
2557     return chip;
2558     }
2559     EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
2560    
2561     -static int tpm_dev_add_device(struct tpm_chip *chip)
2562     +static int tpm_add_char_device(struct tpm_chip *chip)
2563     {
2564     int rc;
2565    
2566     @@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
2567     chip->devname, MAJOR(chip->dev.devt),
2568     MINOR(chip->dev.devt), rc);
2569    
2570     - device_unregister(&chip->dev);
2571     return rc;
2572     }
2573    
2574     @@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
2575     chip->devname, MAJOR(chip->dev.devt),
2576     MINOR(chip->dev.devt), rc);
2577    
2578     + cdev_del(&chip->cdev);
2579     return rc;
2580     }
2581    
2582     return rc;
2583     }
2584    
2585     -static void tpm_dev_del_device(struct tpm_chip *chip)
2586     +static void tpm_del_char_device(struct tpm_chip *chip)
2587     {
2588     cdev_del(&chip->cdev);
2589     - device_unregister(&chip->dev);
2590     + device_del(&chip->dev);
2591     }
2592    
2593     static int tpm1_chip_register(struct tpm_chip *chip)
2594     @@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
2595    
2596     tpm_add_ppi(chip);
2597    
2598     - rc = tpm_dev_add_device(chip);
2599     + rc = tpm_add_char_device(chip);
2600     if (rc)
2601     goto out_err;
2602    
2603     @@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
2604     sysfs_remove_link(&chip->pdev->kobj, "ppi");
2605    
2606     tpm1_chip_unregister(chip);
2607     - tpm_dev_del_device(chip);
2608     + tpm_del_char_device(chip);
2609     }
2610     EXPORT_SYMBOL_GPL(tpm_chip_unregister);
2611     diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
2612     index 8342cf51ffdc..26bab5a2959f 100644
2613     --- a/drivers/char/tpm/tpm_crb.c
2614     +++ b/drivers/char/tpm/tpm_crb.c
2615     @@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
2616     struct device *dev = &device->dev;
2617     struct tpm_chip *chip = dev_get_drvdata(dev);
2618    
2619     - tpm_chip_unregister(chip);
2620     -
2621     if (chip->flags & TPM_CHIP_FLAG_TPM2)
2622     tpm2_shutdown(chip, TPM2_SU_CLEAR);
2623    
2624     + tpm_chip_unregister(chip);
2625     +
2626     return 0;
2627     }
2628    
2629     diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
2630     index bd72fb04225e..4e6940acf639 100644
2631     --- a/drivers/char/tpm/tpm_eventlog.c
2632     +++ b/drivers/char/tpm/tpm_eventlog.c
2633     @@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
2634     {
2635     struct tcpa_event *event = v;
2636     struct tcpa_event temp_event;
2637     - char *tempPtr;
2638     + char *temp_ptr;
2639     int i;
2640    
2641     memcpy(&temp_event, event, sizeof(struct tcpa_event));
2642     @@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
2643     temp_event.event_type = do_endian_conversion(event->event_type);
2644     temp_event.event_size = do_endian_conversion(event->event_size);
2645    
2646     - tempPtr = (char *)&temp_event;
2647     + temp_ptr = (char *) &temp_event;
2648    
2649     - for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
2650     - seq_putc(m, tempPtr[i]);
2651     + for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
2652     + seq_putc(m, temp_ptr[i]);
2653     +
2654     + temp_ptr = (char *) v;
2655     +
2656     + for (i = (sizeof(struct tcpa_event) - 1);
2657     + i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
2658     + seq_putc(m, temp_ptr[i]);
2659    
2660     return 0;
2661    
2662     diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
2663     index 015e687ffabe..9f4df8f645f8 100644
2664     --- a/drivers/clk/bcm/clk-bcm2835.c
2665     +++ b/drivers/clk/bcm/clk-bcm2835.c
2666     @@ -1107,13 +1107,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
2667     struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
2668     struct bcm2835_cprman *cprman = divider->cprman;
2669     const struct bcm2835_pll_divider_data *data = divider->data;
2670     - u32 cm;
2671     - int ret;
2672     + u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
2673    
2674     - ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
2675     - if (ret)
2676     - return ret;
2677     + div = DIV_ROUND_UP_ULL(parent_rate, rate);
2678     +
2679     + div = min(div, max_div);
2680     + if (div == max_div)
2681     + div = 0;
2682    
2683     + cprman_write(cprman, data->a2w_reg, div);
2684     cm = cprman_read(cprman, data->cm_reg);
2685     cprman_write(cprman, data->cm_reg, cm | data->load_mask);
2686     cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
2687     diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
2688     index 7f7444cbf6fc..05263571c223 100644
2689     --- a/drivers/clk/rockchip/clk-rk3188.c
2690     +++ b/drivers/clk/rockchip/clk-rk3188.c
2691     @@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
2692     "hclk_peri",
2693     "pclk_cpu",
2694     "pclk_peri",
2695     + "hclk_cpubus"
2696     };
2697    
2698     static void __init rk3188_common_clk_init(struct device_node *np)
2699     diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
2700     index 21f3ea909fab..57acb625c8ff 100644
2701     --- a/drivers/clk/rockchip/clk-rk3368.c
2702     +++ b/drivers/clk/rockchip/clk-rk3368.c
2703     @@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
2704     .core_reg = RK3368_CLKSEL_CON(0),
2705     .div_core_shift = 0,
2706     .div_core_mask = 0x1f,
2707     - .mux_core_shift = 15,
2708     + .mux_core_shift = 7,
2709     };
2710    
2711     static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
2712     @@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
2713     }
2714    
2715     static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
2716     - RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
2717     - RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
2718     - RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
2719     - RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
2720     - RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
2721     - RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
2722     - RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
2723     - RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
2724     - RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
2725     - RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
2726     + RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
2727     + RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
2728     + RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
2729     + RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
2730     + RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
2731     + RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
2732     + RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
2733     + RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
2734     + RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
2735     + RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
2736     };
2737    
2738     static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
2739     - RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
2740     - RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
2741     - RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
2742     - RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
2743     - RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
2744     - RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
2745     - RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
2746     - RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
2747     - RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
2748     - RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
2749     + RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
2750     + RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
2751     + RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
2752     + RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
2753     + RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
2754     + RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
2755     + RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
2756     + RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
2757     + RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
2758     + RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
2759     };
2760    
2761     static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
2762     @@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
2763     * Clock-Architecture Diagram 3
2764     */
2765    
2766     - COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
2767     + COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
2768     RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
2769     RK3368_CLKGATE_CON(4), 6, GFLAGS),
2770     - COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
2771     + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
2772     RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
2773     RK3368_CLKGATE_CON(4), 7, GFLAGS),
2774    
2775     @@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
2776     GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
2777     RK3368_CLKGATE_CON(4), 13, GFLAGS),
2778     GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
2779     - RK3368_CLKGATE_CON(5), 12, GFLAGS),
2780     + RK3368_CLKGATE_CON(4), 12, GFLAGS),
2781    
2782     COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
2783     RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
2784     diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
2785     index 3eb3f1279fb7..7de007abe46e 100644
2786     --- a/drivers/crypto/atmel-aes.c
2787     +++ b/drivers/crypto/atmel-aes.c
2788     @@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
2789     }
2790    
2791     aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2792     - if (!aes_dd->io_base) {
2793     + if (IS_ERR(aes_dd->io_base)) {
2794     dev_err(dev, "can't ioremap\n");
2795     - err = -ENOMEM;
2796     + err = PTR_ERR(aes_dd->io_base);
2797     goto res_err;
2798     }
2799    
2800     diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
2801     index 8bf9914d4d15..68d47a2da4a1 100644
2802     --- a/drivers/crypto/atmel-sha.c
2803     +++ b/drivers/crypto/atmel-sha.c
2804     @@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
2805     }
2806    
2807     sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
2808     - if (!sha_dd->io_base) {
2809     + if (IS_ERR(sha_dd->io_base)) {
2810     dev_err(dev, "can't ioremap\n");
2811     - err = -ENOMEM;
2812     + err = PTR_ERR(sha_dd->io_base);
2813     goto res_err;
2814     }
2815    
2816     diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
2817     index 2c7a628d0375..bf467d7be35c 100644
2818     --- a/drivers/crypto/atmel-tdes.c
2819     +++ b/drivers/crypto/atmel-tdes.c
2820     @@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
2821     }
2822    
2823     tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
2824     - if (!tdes_dd->io_base) {
2825     + if (IS_ERR(tdes_dd->io_base)) {
2826     dev_err(dev, "can't ioremap\n");
2827     - err = -ENOMEM;
2828     + err = PTR_ERR(tdes_dd->io_base);
2829     goto res_err;
2830     }
2831    
2832     diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
2833     index d89f20c04266..3d9acc53d247 100644
2834     --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
2835     +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
2836     @@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
2837     return ccp_aes_cmac_finup(req);
2838     }
2839    
2840     +static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
2841     +{
2842     + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
2843     + struct ccp_aes_cmac_exp_ctx state;
2844     +
2845     + state.null_msg = rctx->null_msg;
2846     + memcpy(state.iv, rctx->iv, sizeof(state.iv));
2847     + state.buf_count = rctx->buf_count;
2848     + memcpy(state.buf, rctx->buf, sizeof(state.buf));
2849     +
2850     + /* 'out' may not be aligned so memcpy from local variable */
2851     + memcpy(out, &state, sizeof(state));
2852     +
2853     + return 0;
2854     +}
2855     +
2856     +static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
2857     +{
2858     + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
2859     + struct ccp_aes_cmac_exp_ctx state;
2860     +
2861     + /* 'in' may not be aligned so memcpy to local variable */
2862     + memcpy(&state, in, sizeof(state));
2863     +
2864     + memset(rctx, 0, sizeof(*rctx));
2865     + rctx->null_msg = state.null_msg;
2866     + memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
2867     + rctx->buf_count = state.buf_count;
2868     + memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
2869     +
2870     + return 0;
2871     +}
2872     +
2873     static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2874     unsigned int key_len)
2875     {
2876     @@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
2877     alg->final = ccp_aes_cmac_final;
2878     alg->finup = ccp_aes_cmac_finup;
2879     alg->digest = ccp_aes_cmac_digest;
2880     + alg->export = ccp_aes_cmac_export;
2881     + alg->import = ccp_aes_cmac_import;
2882     alg->setkey = ccp_aes_cmac_setkey;
2883    
2884     halg = &alg->halg;
2885     halg->digestsize = AES_BLOCK_SIZE;
2886     + halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
2887    
2888     base = &halg->base;
2889     snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
2890     diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
2891     index d14b3f28e010..8ef06fad8b14 100644
2892     --- a/drivers/crypto/ccp/ccp-crypto-sha.c
2893     +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
2894     @@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
2895     return ccp_sha_finup(req);
2896     }
2897    
2898     +static int ccp_sha_export(struct ahash_request *req, void *out)
2899     +{
2900     + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
2901     + struct ccp_sha_exp_ctx state;
2902     +
2903     + state.type = rctx->type;
2904     + state.msg_bits = rctx->msg_bits;
2905     + state.first = rctx->first;
2906     + memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
2907     + state.buf_count = rctx->buf_count;
2908     + memcpy(state.buf, rctx->buf, sizeof(state.buf));
2909     +
2910     + /* 'out' may not be aligned so memcpy from local variable */
2911     + memcpy(out, &state, sizeof(state));
2912     +
2913     + return 0;
2914     +}
2915     +
2916     +static int ccp_sha_import(struct ahash_request *req, const void *in)
2917     +{
2918     + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
2919     + struct ccp_sha_exp_ctx state;
2920     +
2921     + /* 'in' may not be aligned so memcpy to local variable */
2922     + memcpy(&state, in, sizeof(state));
2923     +
2924     + memset(rctx, 0, sizeof(*rctx));
2925     + rctx->type = state.type;
2926     + rctx->msg_bits = state.msg_bits;
2927     + rctx->first = state.first;
2928     + memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
2929     + rctx->buf_count = state.buf_count;
2930     + memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
2931     +
2932     + return 0;
2933     +}
2934     +
2935     static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
2936     unsigned int key_len)
2937     {
2938     @@ -403,9 +440,12 @@ static int ccp_register_sha_alg(struct list_head *head,
2939     alg->final = ccp_sha_final;
2940     alg->finup = ccp_sha_finup;
2941     alg->digest = ccp_sha_digest;
2942     + alg->export = ccp_sha_export;
2943     + alg->import = ccp_sha_import;
2944    
2945     halg = &alg->halg;
2946     halg->digestsize = def->digest_size;
2947     + halg->statesize = sizeof(struct ccp_sha_exp_ctx);
2948    
2949     base = &halg->base;
2950     snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
2951     diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
2952     index 76a96f0f44c6..a326ec20bfa8 100644
2953     --- a/drivers/crypto/ccp/ccp-crypto.h
2954     +++ b/drivers/crypto/ccp/ccp-crypto.h
2955     @@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
2956     struct ccp_cmd cmd;
2957     };
2958    
2959     +struct ccp_aes_cmac_exp_ctx {
2960     + unsigned int null_msg;
2961     +
2962     + u8 iv[AES_BLOCK_SIZE];
2963     +
2964     + unsigned int buf_count;
2965     + u8 buf[AES_BLOCK_SIZE];
2966     +};
2967     +
2968     /***** SHA related defines *****/
2969     #define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
2970     #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
2971     @@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
2972     struct ccp_cmd cmd;
2973     };
2974    
2975     +struct ccp_sha_exp_ctx {
2976     + enum ccp_sha_type type;
2977     +
2978     + u64 msg_bits;
2979     +
2980     + unsigned int first;
2981     +
2982     + u8 ctx[MAX_SHA_CONTEXT_SIZE];
2983     +
2984     + unsigned int buf_count;
2985     + u8 buf[MAX_SHA_BLOCK_SIZE];
2986     +};
2987     +
2988     /***** Common Context Structure *****/
2989     struct ccp_ctx {
2990     int (*complete)(struct crypto_async_request *req, int ret);
2991     diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
2992     index c0656e7f37b5..80239ae69527 100644
2993     --- a/drivers/crypto/marvell/cesa.c
2994     +++ b/drivers/crypto/marvell/cesa.c
2995     @@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
2996     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
2997     cesa->regs = devm_ioremap_resource(dev, res);
2998     if (IS_ERR(cesa->regs))
2999     - return -ENOMEM;
3000     + return PTR_ERR(cesa->regs);
3001    
3002     ret = mv_cesa_dev_dma_init(cesa);
3003     if (ret)
3004     diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
3005     index 4c243c1ffc7f..790f7cadc1ed 100644
3006     --- a/drivers/crypto/ux500/cryp/cryp_core.c
3007     +++ b/drivers/crypto/ux500/cryp/cryp_core.c
3008     @@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
3009    
3010     device_data->phybase = res->start;
3011     device_data->base = devm_ioremap_resource(dev, res);
3012     - if (!device_data->base) {
3013     + if (IS_ERR(device_data->base)) {
3014     dev_err(dev, "[%s]: ioremap failed!", __func__);
3015     - ret = -ENOMEM;
3016     + ret = PTR_ERR(device_data->base);
3017     goto out;
3018     }
3019    
3020     diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
3021     index d6fdc583ce5d..574e87c7f2b8 100644
3022     --- a/drivers/crypto/ux500/hash/hash_core.c
3023     +++ b/drivers/crypto/ux500/hash/hash_core.c
3024     @@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
3025    
3026     device_data->phybase = res->start;
3027     device_data->base = devm_ioremap_resource(dev, res);
3028     - if (!device_data->base) {
3029     + if (IS_ERR(device_data->base)) {
3030     dev_err(dev, "%s: ioremap() failed!\n", __func__);
3031     - ret = -ENOMEM;
3032     + ret = PTR_ERR(device_data->base);
3033     goto out;
3034     }
3035     spin_lock_init(&device_data->ctx_lock);
3036     diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
3037     index 9eee13ef83a5..d87a47547ba5 100644
3038     --- a/drivers/edac/amd64_edac.c
3039     +++ b/drivers/edac/amd64_edac.c
3040     @@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
3041     u64 chan_off;
3042     u64 dram_base = get_dram_base(pvt, range);
3043     u64 hole_off = f10_dhar_offset(pvt);
3044     - u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
3045     + u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
3046    
3047     if (hi_rng) {
3048     /*
3049     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
3050     index f5c6b97c8958..93f0d4120289 100644
3051     --- a/drivers/edac/sb_edac.c
3052     +++ b/drivers/edac/sb_edac.c
3053     @@ -1839,8 +1839,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
3054     edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
3055     n_tads, gb, (mb*1000)/1024,
3056     ((u64)tmp_mb) << 20L,
3057     - (u32)TAD_SOCK(reg),
3058     - (u32)TAD_CH(reg),
3059     + (u32)(1 << TAD_SOCK(reg)),
3060     + (u32)TAD_CH(reg) + 1,
3061     (u32)TAD_TGT0(reg),
3062     (u32)TAD_TGT1(reg),
3063     (u32)TAD_TGT2(reg),
3064     @@ -2118,7 +2118,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
3065     }
3066    
3067     ch_way = TAD_CH(reg) + 1;
3068     - sck_way = TAD_SOCK(reg) + 1;
3069     + sck_way = 1 << TAD_SOCK(reg);
3070    
3071     if (ch_way == 3)
3072     idx = addr >> 6;
3073     @@ -2175,7 +2175,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
3074     n_tads,
3075     addr,
3076     limit,
3077     - (u32)TAD_SOCK(reg),
3078     + sck_way,
3079     ch_way,
3080     offset,
3081     idx,
3082     @@ -2190,18 +2190,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
3083     offset, addr);
3084     return -EINVAL;
3085     }
3086     - addr -= offset;
3087     - /* Store the low bits [0:6] of the addr */
3088     - ch_addr = addr & 0x7f;
3089     - /* Remove socket wayness and remove 6 bits */
3090     - addr >>= 6;
3091     - addr = div_u64(addr, sck_xch);
3092     -#if 0
3093     - /* Divide by channel way */
3094     - addr = addr / ch_way;
3095     -#endif
3096     - /* Recover the last 6 bits */
3097     - ch_addr |= addr << 6;
3098     +
3099     + ch_addr = addr - offset;
3100     + ch_addr >>= (6 + shiftup);
3101     + ch_addr /= ch_way * sck_way;
3102     + ch_addr <<= (6 + shiftup);
3103     + ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
3104    
3105     /*
3106     * Step 3) Decode rank
3107     diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
3108     index 0c2f0a61b0ea..0b631e5b5b84 100644
3109     --- a/drivers/firmware/broadcom/bcm47xx_nvram.c
3110     +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
3111     @@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
3112    
3113     found:
3114     __ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
3115     - header = (struct nvram_header *)nvram_buf;
3116     - nvram_len = header->len;
3117     + nvram_len = ((struct nvram_header *)(nvram_buf))->len;
3118     if (nvram_len > size) {
3119     pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
3120     nvram_len = size;
3121     }
3122     if (nvram_len >= NVRAM_SPACE) {
3123     pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
3124     - header->len, NVRAM_SPACE - 1);
3125     + nvram_len, NVRAM_SPACE - 1);
3126     nvram_len = NVRAM_SPACE - 1;
3127     }
3128     /* proceed reading data after header */
3129     diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
3130     index 23196c5fc17c..99b375c95998 100644
3131     --- a/drivers/gpio/gpio-pca953x.c
3132     +++ b/drivers/gpio/gpio-pca953x.c
3133     @@ -367,9 +367,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
3134     memcpy(reg_val, chip->reg_output, NBANK(chip));
3135     mutex_lock(&chip->i2c_lock);
3136     for(bank=0; bank<NBANK(chip); bank++) {
3137     - unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
3138     + unsigned bankmask = mask[bank / sizeof(*mask)] >>
3139     + ((bank % sizeof(*mask)) * 8);
3140     if(bankmask) {
3141     - unsigned bankval = bits[bank/4] >> ((bank % 4) * 8);
3142     + unsigned bankval = bits[bank / sizeof(*bits)] >>
3143     + ((bank % sizeof(*bits)) * 8);
3144     reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
3145     }
3146     }
3147     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
3148     index 3c895863fcf5..81dc6b65436f 100644
3149     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
3150     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
3151     @@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
3152     return amdgpu_atpx_priv.atpx_detected;
3153     }
3154    
3155     +bool amdgpu_has_atpx_dgpu_power_cntl(void) {
3156     + return amdgpu_atpx_priv.atpx.functions.power_cntl;
3157     +}
3158     +
3159     /**
3160     * amdgpu_atpx_call - call an ATPX method
3161     *
3162     @@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
3163     */
3164     static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
3165     {
3166     - /* make sure required functions are enabled */
3167     - /* dGPU power control is required */
3168     - atpx->functions.power_cntl = true;
3169     -
3170     if (atpx->functions.px_params) {
3171     union acpi_object *info;
3172     struct atpx_px_params output;
3173     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3174     index 51bfc114584e..d6c68d00cbb0 100644
3175     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3176     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
3177     @@ -62,6 +62,12 @@ static const char *amdgpu_asic_name[] = {
3178     "LAST",
3179     };
3180    
3181     +#if defined(CONFIG_VGA_SWITCHEROO)
3182     +bool amdgpu_has_atpx_dgpu_power_cntl(void);
3183     +#else
3184     +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
3185     +#endif
3186     +
3187     bool amdgpu_device_is_px(struct drm_device *dev)
3188     {
3189     struct amdgpu_device *adev = dev->dev_private;
3190     @@ -1511,7 +1517,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
3191    
3192     if (amdgpu_runtime_pm == 1)
3193     runtime = true;
3194     - if (amdgpu_device_is_px(ddev))
3195     + if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
3196     runtime = true;
3197     vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
3198     if (runtime)
3199     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
3200     index 2cf50180cc51..b1c7a9b3631b 100644
3201     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
3202     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
3203     @@ -32,8 +32,8 @@
3204     #include "oss/oss_2_4_d.h"
3205     #include "oss/oss_2_4_sh_mask.h"
3206    
3207     -#include "gmc/gmc_8_1_d.h"
3208     -#include "gmc/gmc_8_1_sh_mask.h"
3209     +#include "gmc/gmc_7_1_d.h"
3210     +#include "gmc/gmc_7_1_sh_mask.h"
3211    
3212     #include "gca/gfx_8_0_d.h"
3213     #include "gca/gfx_8_0_enum.h"
3214     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
3215     index cf01177ca3b5..2ea012e88991 100644
3216     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
3217     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
3218     @@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
3219     phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3220     PHM_PlatformCaps_DynamicUVDState);
3221    
3222     + phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3223     + PHM_PlatformCaps_UVDDPM);
3224     + phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3225     + PHM_PlatformCaps_VCEDPM);
3226     +
3227     cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
3228     cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
3229     cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
3230     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
3231     index 01b20e14a247..6104d7d7449e 100644
3232     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
3233     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
3234     @@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
3235     else
3236     args.v1.ucLaneNum = 4;
3237    
3238     - if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
3239     - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
3240     switch (radeon_encoder->encoder_id) {
3241     case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3242     args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
3243     @@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
3244     args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
3245     else
3246     args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
3247     +
3248     + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
3249     + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
3250     +
3251     break;
3252     case 2:
3253     case 3:
3254     diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3255     index c4b4f298a283..9bc408c9f9f6 100644
3256     --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3257     +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
3258     @@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
3259     return radeon_atpx_priv.atpx_detected;
3260     }
3261    
3262     +bool radeon_has_atpx_dgpu_power_cntl(void) {
3263     + return radeon_atpx_priv.atpx.functions.power_cntl;
3264     +}
3265     +
3266     /**
3267     * radeon_atpx_call - call an ATPX method
3268     *
3269     @@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
3270     */
3271     static int radeon_atpx_validate(struct radeon_atpx *atpx)
3272     {
3273     - /* make sure required functions are enabled */
3274     - /* dGPU power control is required */
3275     - atpx->functions.power_cntl = true;
3276     -
3277     if (atpx->functions.px_params) {
3278     union acpi_object *info;
3279     struct atpx_px_params output;
3280     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
3281     index 4197ca1bb1e4..e2396336f9e8 100644
3282     --- a/drivers/gpu/drm/radeon/radeon_device.c
3283     +++ b/drivers/gpu/drm/radeon/radeon_device.c
3284     @@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
3285     "LAST",
3286     };
3287    
3288     +#if defined(CONFIG_VGA_SWITCHEROO)
3289     +bool radeon_has_atpx_dgpu_power_cntl(void);
3290     +#else
3291     +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
3292     +#endif
3293     +
3294     #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
3295     #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
3296    
3297     @@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
3298     * ignore it */
3299     vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
3300    
3301     - if (rdev->flags & RADEON_IS_PX)
3302     + if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
3303     runtime = true;
3304     vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
3305     if (runtime)
3306     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
3307     index 2d9196a447fd..bfcef4db8138 100644
3308     --- a/drivers/gpu/drm/radeon/radeon_display.c
3309     +++ b/drivers/gpu/drm/radeon/radeon_display.c
3310     @@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
3311     /* setup afmt */
3312     radeon_afmt_init(rdev);
3313    
3314     - if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
3315     - radeon_fbdev_init(rdev);
3316     - drm_kms_helper_poll_init(rdev->ddev);
3317     - }
3318     + radeon_fbdev_init(rdev);
3319     + drm_kms_helper_poll_init(rdev->ddev);
3320    
3321     /* do pm late init */
3322     ret = radeon_pm_late_init(rdev);
3323     diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
3324     index df7a1719c841..9d210bbcab50 100644
3325     --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
3326     +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
3327     @@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
3328     drm_mode_set_crtcinfo(adjusted_mode, 0);
3329     {
3330     struct radeon_connector_atom_dig *dig_connector;
3331     - int ret;
3332     -
3333     dig_connector = mst_enc->connector->con_priv;
3334     - ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
3335     - dig_connector->dpcd, adjusted_mode->clock,
3336     - &dig_connector->dp_lane_count,
3337     - &dig_connector->dp_clock);
3338     - if (ret) {
3339     - dig_connector->dp_lane_count = 0;
3340     - dig_connector->dp_clock = 0;
3341     - }
3342     + dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
3343     + dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
3344     DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
3345     dig_connector->dp_lane_count, dig_connector->dp_clock);
3346     }
3347     diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
3348     index d2e628eea53d..d179596334a7 100644
3349     --- a/drivers/gpu/drm/radeon/radeon_fb.c
3350     +++ b/drivers/gpu/drm/radeon/radeon_fb.c
3351     @@ -292,7 +292,8 @@ out_unref:
3352    
3353     void radeon_fb_output_poll_changed(struct radeon_device *rdev)
3354     {
3355     - drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
3356     + if (rdev->mode_info.rfbdev)
3357     + drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
3358     }
3359    
3360     static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
3361     @@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
3362     int bpp_sel = 32;
3363     int ret;
3364    
3365     + /* don't enable fbdev if no connectors */
3366     + if (list_empty(&rdev->ddev->mode_config.connector_list))
3367     + return 0;
3368     +
3369     /* select 8 bpp console on RN50 or 16MB cards */
3370     if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
3371     bpp_sel = 8;
3372     @@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
3373    
3374     void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
3375     {
3376     - fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
3377     + if (rdev->mode_info.rfbdev)
3378     + fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
3379     }
3380    
3381     bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
3382     {
3383     + if (!rdev->mode_info.rfbdev)
3384     + return false;
3385     +
3386     if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
3387     return true;
3388     return false;
3389     @@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
3390    
3391     void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
3392     {
3393     - drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
3394     + if (rdev->mode_info.rfbdev)
3395     + drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
3396     }
3397    
3398     void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
3399     {
3400     - drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
3401     + if (rdev->mode_info.rfbdev)
3402     + drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
3403     }
3404    
3405     void radeon_fbdev_restore_mode(struct radeon_device *rdev)
3406     diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
3407     index 22278bcfc60e..ac8eafea6361 100644
3408     --- a/drivers/gpu/drm/vc4/vc4_bo.c
3409     +++ b/drivers/gpu/drm/vc4/vc4_bo.c
3410     @@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
3411     if (IS_ERR(bo))
3412     return PTR_ERR(bo);
3413    
3414     - ret = copy_from_user(bo->base.vaddr,
3415     + if (copy_from_user(bo->base.vaddr,
3416     (void __user *)(uintptr_t)args->data,
3417     - args->size);
3418     - if (ret != 0)
3419     + args->size)) {
3420     + ret = -EFAULT;
3421     goto fail;
3422     + }
3423     /* Clear the rest of the memory from allocating from the BO
3424     * cache.
3425     */
3426     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
3427     index 7e89288b1537..99446ffd71fb 100644
3428     --- a/drivers/hid/hid-core.c
3429     +++ b/drivers/hid/hid-core.c
3430     @@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
3431     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
3432     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
3433     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
3434     + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
3435     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
3436     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
3437     { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
3438     @@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
3439     /*
3440     * Scan generic devices for group information
3441     */
3442     - if (hid_ignore_special_drivers ||
3443     - (!hdev->group &&
3444     - !hid_match_id(hdev, hid_have_special_driver))) {
3445     + if (hid_ignore_special_drivers) {
3446     + hdev->group = HID_GROUP_GENERIC;
3447     + } else if (!hdev->group &&
3448     + !hid_match_id(hdev, hid_have_special_driver)) {
3449     ret = hid_scan_report(hdev);
3450     if (ret)
3451     hid_warn(hdev, "bad device descriptor (%d)\n", ret);
3452     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
3453     index 296d4991560e..a20fc604ffd8 100644
3454     --- a/drivers/hid/hid-multitouch.c
3455     +++ b/drivers/hid/hid-multitouch.c
3456     @@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
3457     td->is_buttonpad = true;
3458    
3459     break;
3460     + case 0xff0000c5:
3461     + /* Retrieve the Win8 blob once to enable some devices */
3462     + if (usage->usage_index == 0)
3463     + mt_get_feature(hdev, field->report);
3464     + break;
3465     }
3466     }
3467    
3468     diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
3469     index b9216938a718..bb897497f008 100644
3470     --- a/drivers/hid/i2c-hid/i2c-hid.c
3471     +++ b/drivers/hid/i2c-hid/i2c-hid.c
3472     @@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
3473     u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
3474     u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
3475     u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
3476     + u16 size;
3477     + int args_len;
3478     + int index = 0;
3479     +
3480     + i2c_hid_dbg(ihid, "%s\n", __func__);
3481     +
3482     + if (data_len > ihid->bufsize)
3483     + return -EINVAL;
3484    
3485     - /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
3486     - u16 size = 2 /* size */ +
3487     + size = 2 /* size */ +
3488     (reportID ? 1 : 0) /* reportID */ +
3489     data_len /* buf */;
3490     - int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
3491     + args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
3492     2 /* dataRegister */ +
3493     size /* args */;
3494     - int index = 0;
3495     -
3496     - i2c_hid_dbg(ihid, "%s\n", __func__);
3497    
3498     if (!use_data && maxOutputLength == 0)
3499     return -ENOSYS;
3500     diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
3501     index cd4510a63375..146eed70bdf4 100644
3502     --- a/drivers/idle/intel_idle.c
3503     +++ b/drivers/idle/intel_idle.c
3504     @@ -65,7 +65,7 @@
3505     #include <asm/mwait.h>
3506     #include <asm/msr.h>
3507    
3508     -#define INTEL_IDLE_VERSION "0.4"
3509     +#define INTEL_IDLE_VERSION "0.4.1"
3510     #define PREFIX "intel_idle: "
3511    
3512     static struct cpuidle_driver intel_idle_driver = {
3513     @@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
3514     }
3515    
3516     /*
3517     - * intel_idle_state_table_update()
3518     - *
3519     - * Update the default state_table for this CPU-id
3520     + * ivt_idle_state_table_update(void)
3521     *
3522     - * Currently used to access tuned IVT multi-socket targets
3523     + * Tune IVT multi-socket targets
3524     * Assumption: num_sockets == (max_package_num + 1)
3525     */
3526     -void intel_idle_state_table_update(void)
3527     +static void ivt_idle_state_table_update(void)
3528     {
3529     /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
3530     - if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
3531     - int cpu, package_num, num_sockets = 1;
3532     -
3533     - for_each_online_cpu(cpu) {
3534     - package_num = topology_physical_package_id(cpu);
3535     - if (package_num + 1 > num_sockets) {
3536     - num_sockets = package_num + 1;
3537     -
3538     - if (num_sockets > 4) {
3539     - cpuidle_state_table = ivt_cstates_8s;
3540     - return;
3541     - }
3542     + int cpu, package_num, num_sockets = 1;
3543     +
3544     + for_each_online_cpu(cpu) {
3545     + package_num = topology_physical_package_id(cpu);
3546     + if (package_num + 1 > num_sockets) {
3547     + num_sockets = package_num + 1;
3548     +
3549     + if (num_sockets > 4) {
3550     + cpuidle_state_table = ivt_cstates_8s;
3551     + return;
3552     }
3553     }
3554     + }
3555     +
3556     + if (num_sockets > 2)
3557     + cpuidle_state_table = ivt_cstates_4s;
3558     +
3559     + /* else, 1 and 2 socket systems use default ivt_cstates */
3560     +}
3561     +/*
3562     + * sklh_idle_state_table_update(void)
3563     + *
3564     + * On SKL-H (model 0x5e) disable C8 and C9 if:
3565     + * C10 is enabled and SGX disabled
3566     + */
3567     +static void sklh_idle_state_table_update(void)
3568     +{
3569     + unsigned long long msr;
3570     + unsigned int eax, ebx, ecx, edx;
3571     +
3572     +
3573     + /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
3574     + if (max_cstate <= 7)
3575     + return;
3576     +
3577     + /* if PC10 not present in CPUID.MWAIT.EDX */
3578     + if ((mwait_substates & (0xF << 28)) == 0)
3579     + return;
3580     +
3581     + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
3582     +
3583     + /* PC10 is not enabled in PKG C-state limit */
3584     + if ((msr & 0xF) != 8)
3585     + return;
3586     +
3587     + ecx = 0;
3588     + cpuid(7, &eax, &ebx, &ecx, &edx);
3589     +
3590     + /* if SGX is present */
3591     + if (ebx & (1 << 2)) {
3592    
3593     - if (num_sockets > 2)
3594     - cpuidle_state_table = ivt_cstates_4s;
3595     - /* else, 1 and 2 socket systems use default ivt_cstates */
3596     + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
3597     +
3598     + /* if SGX is enabled */
3599     + if (msr & (1 << 18))
3600     + return;
3601     + }
3602     +
3603     + skl_cstates[5].disabled = 1; /* C8-SKL */
3604     + skl_cstates[6].disabled = 1; /* C9-SKL */
3605     +}
3606     +/*
3607     + * intel_idle_state_table_update()
3608     + *
3609     + * Update the default state_table for this CPU-id
3610     + */
3611     +
3612     +static void intel_idle_state_table_update(void)
3613     +{
3614     + switch (boot_cpu_data.x86_model) {
3615     +
3616     + case 0x3e: /* IVT */
3617     + ivt_idle_state_table_update();
3618     + break;
3619     + case 0x5e: /* SKL-H */
3620     + sklh_idle_state_table_update();
3621     + break;
3622     }
3623     - return;
3624     }
3625    
3626     /*
3627     @@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
3628     if (num_substates == 0)
3629     continue;
3630    
3631     + /* if state marked as disabled, skip it */
3632     + if (cpuidle_state_table[cstate].disabled != 0) {
3633     + pr_debug(PREFIX "state %s is disabled",
3634     + cpuidle_state_table[cstate].name);
3635     + continue;
3636     + }
3637     +
3638     +
3639     if (((mwait_cstate + 1) > 2) &&
3640     !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
3641     mark_tsc_unstable("TSC halts in idle"
3642     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
3643     index f121e6129339..0e1a802c3618 100644
3644     --- a/drivers/infiniband/ulp/isert/ib_isert.c
3645     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
3646     @@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
3647     struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
3648    
3649     static void isert_release_work(struct work_struct *work);
3650     +static void isert_wait4flush(struct isert_conn *isert_conn);
3651    
3652     static inline bool
3653     isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
3654     @@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
3655     kref_put(&isert_conn->kref, isert_release_kref);
3656     }
3657    
3658     +static void
3659     +isert_handle_unbound_conn(struct isert_conn *isert_conn)
3660     +{
3661     + struct isert_np *isert_np = isert_conn->cm_id->context;
3662     +
3663     + mutex_lock(&isert_np->mutex);
3664     + if (!list_empty(&isert_conn->node)) {
3665     + /*
3666     + * This means iscsi doesn't know this connection
3667     + * so schedule a cleanup ourselves
3668     + */
3669     + list_del_init(&isert_conn->node);
3670     + isert_put_conn(isert_conn);
3671     + complete(&isert_conn->wait);
3672     + queue_work(isert_release_wq, &isert_conn->release_work);
3673     + }
3674     + mutex_unlock(&isert_np->mutex);
3675     +}
3676     +
3677     /**
3678     * isert_conn_terminate() - Initiate connection termination
3679     * @isert_conn: isert connection struct
3680     *
3681     * Notes:
3682     - * In case the connection state is FULL_FEATURE, move state
3683     + * In case the connection state is BOUND, move state
3684     * to TEMINATING and start teardown sequence (rdma_disconnect).
3685     * In case the connection state is UP, complete flush as well.
3686     *
3687     @@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
3688     {
3689     int err;
3690    
3691     - switch (isert_conn->state) {
3692     - case ISER_CONN_TERMINATING:
3693     - break;
3694     - case ISER_CONN_UP:
3695     - case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
3696     - isert_info("Terminating conn %p state %d\n",
3697     - isert_conn, isert_conn->state);
3698     - isert_conn->state = ISER_CONN_TERMINATING;
3699     - err = rdma_disconnect(isert_conn->cm_id);
3700     - if (err)
3701     - isert_warn("Failed rdma_disconnect isert_conn %p\n",
3702     - isert_conn);
3703     - break;
3704     - default:
3705     - isert_warn("conn %p teminating in state %d\n",
3706     - isert_conn, isert_conn->state);
3707     - }
3708     + if (isert_conn->state >= ISER_CONN_TERMINATING)
3709     + return;
3710     +
3711     + isert_info("Terminating conn %p state %d\n",
3712     + isert_conn, isert_conn->state);
3713     + isert_conn->state = ISER_CONN_TERMINATING;
3714     + err = rdma_disconnect(isert_conn->cm_id);
3715     + if (err)
3716     + isert_warn("Failed rdma_disconnect isert_conn %p\n",
3717     + isert_conn);
3718     +
3719     + isert_info("conn %p completing wait\n", isert_conn);
3720     + complete(&isert_conn->wait);
3721     }
3722    
3723     static int
3724     @@ -887,35 +903,27 @@ static int
3725     isert_disconnected_handler(struct rdma_cm_id *cma_id,
3726     enum rdma_cm_event_type event)
3727     {
3728     - struct isert_np *isert_np = cma_id->context;
3729     - struct isert_conn *isert_conn;
3730     - bool terminating = false;
3731     -
3732     - if (isert_np->cm_id == cma_id)
3733     - return isert_np_cma_handler(cma_id->context, event);
3734     -
3735     - isert_conn = cma_id->qp->qp_context;
3736     + struct isert_conn *isert_conn = cma_id->qp->qp_context;
3737    
3738     mutex_lock(&isert_conn->mutex);
3739     - terminating = (isert_conn->state == ISER_CONN_TERMINATING);
3740     - isert_conn_terminate(isert_conn);
3741     - mutex_unlock(&isert_conn->mutex);
3742     -
3743     - isert_info("conn %p completing wait\n", isert_conn);
3744     - complete(&isert_conn->wait);
3745     -
3746     - if (terminating)
3747     - goto out;
3748     -
3749     - mutex_lock(&isert_np->mutex);
3750     - if (!list_empty(&isert_conn->node)) {
3751     - list_del_init(&isert_conn->node);
3752     - isert_put_conn(isert_conn);
3753     - queue_work(isert_release_wq, &isert_conn->release_work);
3754     + switch (isert_conn->state) {
3755     + case ISER_CONN_TERMINATING:
3756     + break;
3757     + case ISER_CONN_UP:
3758     + isert_conn_terminate(isert_conn);
3759     + isert_wait4flush(isert_conn);
3760     + isert_handle_unbound_conn(isert_conn);
3761     + break;
3762     + case ISER_CONN_BOUND:
3763     + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
3764     + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
3765     + break;
3766     + default:
3767     + isert_warn("conn %p teminating in state %d\n",
3768     + isert_conn, isert_conn->state);
3769     }
3770     - mutex_unlock(&isert_np->mutex);
3771     + mutex_unlock(&isert_conn->mutex);
3772    
3773     -out:
3774     return 0;
3775     }
3776    
3777     @@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
3778     static int
3779     isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
3780     {
3781     + struct isert_np *isert_np = cma_id->context;
3782     int ret = 0;
3783    
3784     isert_info("%s (%d): status %d id %p np %p\n",
3785     rdma_event_msg(event->event), event->event,
3786     event->status, cma_id, cma_id->context);
3787    
3788     + if (isert_np->cm_id == cma_id)
3789     + return isert_np_cma_handler(cma_id->context, event->event);
3790     +
3791     switch (event->event) {
3792     case RDMA_CM_EVENT_CONNECT_REQUEST:
3793     ret = isert_connect_request(cma_id, event);
3794     @@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
3795     rx_wr--;
3796     rx_wr->next = NULL; /* mark end of work requests list */
3797    
3798     - isert_conn->post_recv_buf_count += count;
3799     ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
3800     &rx_wr_failed);
3801     - if (ret) {
3802     + if (ret)
3803     isert_err("ib_post_recv() failed with ret: %d\n", ret);
3804     - isert_conn->post_recv_buf_count -= count;
3805     - }
3806    
3807     return ret;
3808     }
3809     @@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
3810     rx_wr.num_sge = 1;
3811     rx_wr.next = NULL;
3812    
3813     - isert_conn->post_recv_buf_count++;
3814     ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
3815     - if (ret) {
3816     + if (ret)
3817     isert_err("ib_post_recv() failed with ret: %d\n", ret);
3818     - isert_conn->post_recv_buf_count--;
3819     - }
3820    
3821     return ret;
3822     }
3823     @@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
3824     rx_wr.sg_list = &sge;
3825     rx_wr.num_sge = 1;
3826    
3827     - isert_conn->post_recv_buf_count++;
3828     ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
3829     - if (ret) {
3830     + if (ret)
3831     isert_err("ib_post_recv() failed: %d\n", ret);
3832     - isert_conn->post_recv_buf_count--;
3833     - }
3834    
3835     return ret;
3836     }
3837     @@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
3838     ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
3839     DMA_FROM_DEVICE);
3840    
3841     - isert_conn->post_recv_buf_count--;
3842     }
3843    
3844     static int
3845     @@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
3846     void *start = isert_conn->rx_descs;
3847     int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
3848    
3849     - if (wr_id >= start && wr_id < start + len)
3850     + if ((wr_id >= start && wr_id < start + len) ||
3851     + (wr_id == isert_conn->login_req_buf))
3852     return false;
3853    
3854     return true;
3855     @@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
3856     isert_unmap_tx_desc(desc, ib_dev);
3857     else
3858     isert_completion_put(desc, isert_cmd, ib_dev, true);
3859     - } else {
3860     - isert_conn->post_recv_buf_count--;
3861     - if (!isert_conn->post_recv_buf_count)
3862     - iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
3863     }
3864     }
3865    
3866     @@ -3214,6 +3213,7 @@ accept_wait:
3867    
3868     conn->context = isert_conn;
3869     isert_conn->conn = conn;
3870     + isert_conn->state = ISER_CONN_BOUND;
3871    
3872     isert_set_conn_info(np, conn, isert_conn);
3873    
3874     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
3875     index 8d50453eef66..1aa019ab9d78 100644
3876     --- a/drivers/infiniband/ulp/isert/ib_isert.h
3877     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
3878     @@ -84,6 +84,7 @@ enum iser_ib_op_code {
3879     enum iser_conn_state {
3880     ISER_CONN_INIT,
3881     ISER_CONN_UP,
3882     + ISER_CONN_BOUND,
3883     ISER_CONN_FULL_FEATURE,
3884     ISER_CONN_TERMINATING,
3885     ISER_CONN_DOWN,
3886     @@ -179,7 +180,6 @@ struct isert_device;
3887    
3888     struct isert_conn {
3889     enum iser_conn_state state;
3890     - int post_recv_buf_count;
3891     u32 responder_resources;
3892     u32 initiator_depth;
3893     bool pi_support;
3894     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
3895     index 0c37fee363b1..4328679a67a7 100644
3896     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
3897     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
3898     @@ -1670,47 +1670,6 @@ send_sense:
3899     return -1;
3900     }
3901    
3902     -/**
3903     - * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
3904     - * @ch: RDMA channel of the task management request.
3905     - * @fn: Task management function to perform.
3906     - * @req_tag: Tag of the SRP task management request.
3907     - * @mgmt_ioctx: I/O context of the task management request.
3908     - *
3909     - * Returns zero if the target core will process the task management
3910     - * request asynchronously.
3911     - *
3912     - * Note: It is assumed that the initiator serializes tag-based task management
3913     - * requests.
3914     - */
3915     -static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
3916     -{
3917     - struct srpt_device *sdev;
3918     - struct srpt_rdma_ch *ch;
3919     - struct srpt_send_ioctx *target;
3920     - int ret, i;
3921     -
3922     - ret = -EINVAL;
3923     - ch = ioctx->ch;
3924     - BUG_ON(!ch);
3925     - BUG_ON(!ch->sport);
3926     - sdev = ch->sport->sdev;
3927     - BUG_ON(!sdev);
3928     - spin_lock_irq(&sdev->spinlock);
3929     - for (i = 0; i < ch->rq_size; ++i) {
3930     - target = ch->ioctx_ring[i];
3931     - if (target->cmd.se_lun == ioctx->cmd.se_lun &&
3932     - target->cmd.tag == tag &&
3933     - srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
3934     - ret = 0;
3935     - /* now let the target core abort &target->cmd; */
3936     - break;
3937     - }
3938     - }
3939     - spin_unlock_irq(&sdev->spinlock);
3940     - return ret;
3941     -}
3942     -
3943     static int srp_tmr_to_tcm(int fn)
3944     {
3945     switch (fn) {
3946     @@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
3947     struct se_cmd *cmd;
3948     struct se_session *sess = ch->sess;
3949     uint64_t unpacked_lun;
3950     - uint32_t tag = 0;
3951     int tcm_tmr;
3952     int rc;
3953    
3954     @@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
3955     srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
3956     send_ioctx->cmd.tag = srp_tsk->tag;
3957     tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
3958     - if (tcm_tmr < 0) {
3959     - send_ioctx->cmd.se_tmr_req->response =
3960     - TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3961     - goto fail;
3962     - }
3963     unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
3964     sizeof(srp_tsk->lun));
3965     -
3966     - if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
3967     - rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
3968     - if (rc < 0) {
3969     - send_ioctx->cmd.se_tmr_req->response =
3970     - TMR_TASK_DOES_NOT_EXIST;
3971     - goto fail;
3972     - }
3973     - tag = srp_tsk->task_tag;
3974     - }
3975     rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
3976     - srp_tsk, tcm_tmr, GFP_KERNEL, tag,
3977     + srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
3978     TARGET_SCF_ACK_KREF);
3979     if (rc != 0) {
3980     send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
3981     diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
3982     index cfd58e87da26..1c5914cae853 100644
3983     --- a/drivers/input/misc/ati_remote2.c
3984     +++ b/drivers/input/misc/ati_remote2.c
3985     @@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
3986    
3987     ar2->udev = udev;
3988    
3989     + /* Sanity check, first interface must have an endpoint */
3990     + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
3991     + dev_err(&interface->dev,
3992     + "%s(): interface 0 must have an endpoint\n", __func__);
3993     + r = -ENODEV;
3994     + goto fail1;
3995     + }
3996     ar2->intf[0] = interface;
3997     ar2->ep[0] = &alt->endpoint[0].desc;
3998    
3999     + /* Sanity check, the device must have two interfaces */
4000     ar2->intf[1] = usb_ifnum_to_if(udev, 1);
4001     + if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
4002     + dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
4003     + __func__, udev->actconfig->desc.bNumInterfaces);
4004     + r = -ENODEV;
4005     + goto fail1;
4006     + }
4007     +
4008     r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
4009     if (r)
4010     goto fail1;
4011     +
4012     + /* Sanity check, second interface must have an endpoint */
4013     alt = ar2->intf[1]->cur_altsetting;
4014     + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
4015     + dev_err(&interface->dev,
4016     + "%s(): interface 1 must have an endpoint\n", __func__);
4017     + r = -ENODEV;
4018     + goto fail2;
4019     + }
4020     ar2->ep[1] = &alt->endpoint[0].desc;
4021    
4022     r = ati_remote2_urb_init(ar2);
4023     if (r)
4024     - goto fail2;
4025     + goto fail3;
4026    
4027     ar2->channel_mask = channel_mask;
4028     ar2->mode_mask = mode_mask;
4029    
4030     r = ati_remote2_setup(ar2, ar2->channel_mask);
4031     if (r)
4032     - goto fail2;
4033     + goto fail3;
4034    
4035     usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
4036     strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
4037     @@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
4038    
4039     r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
4040     if (r)
4041     - goto fail2;
4042     + goto fail3;
4043    
4044     r = ati_remote2_input_init(ar2);
4045     if (r)
4046     - goto fail3;
4047     + goto fail4;
4048    
4049     usb_set_intfdata(interface, ar2);
4050    
4051     @@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
4052    
4053     return 0;
4054    
4055     - fail3:
4056     + fail4:
4057     sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
4058     - fail2:
4059     + fail3:
4060     ati_remote2_urb_cleanup(ar2);
4061     + fail2:
4062     usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
4063     fail1:
4064     kfree(ar2);
4065     diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
4066     index ac1fa5f44580..9c0ea36913b4 100644
4067     --- a/drivers/input/misc/ims-pcu.c
4068     +++ b/drivers/input/misc/ims-pcu.c
4069     @@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
4070    
4071     pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
4072     union_desc->bMasterInterface0);
4073     + if (!pcu->ctrl_intf)
4074     + return -EINVAL;
4075    
4076     alt = pcu->ctrl_intf->cur_altsetting;
4077     pcu->ep_ctrl = &alt->endpoint[0].desc;
4078     @@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
4079    
4080     pcu->data_intf = usb_ifnum_to_if(pcu->udev,
4081     union_desc->bSlaveInterface0);
4082     + if (!pcu->data_intf)
4083     + return -EINVAL;
4084    
4085     alt = pcu->data_intf->cur_altsetting;
4086     if (alt->desc.bNumEndpoints != 2) {
4087     diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
4088     index 63b539d3daba..84909a12ff36 100644
4089     --- a/drivers/input/misc/powermate.c
4090     +++ b/drivers/input/misc/powermate.c
4091     @@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
4092     int error = -ENOMEM;
4093    
4094     interface = intf->cur_altsetting;
4095     + if (interface->desc.bNumEndpoints < 1)
4096     + return -EINVAL;
4097     +
4098     endpoint = &interface->endpoint[0].desc;
4099     if (!usb_endpoint_is_int_in(endpoint))
4100     return -EIO;
4101     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
4102     index 6025eb430c0a..a41d8328c064 100644
4103     --- a/drivers/input/mouse/synaptics.c
4104     +++ b/drivers/input/mouse/synaptics.c
4105     @@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
4106     if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
4107     return;
4108    
4109     - /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
4110     - if (SYN_ID_FULL(priv->identity) == 0x801 &&
4111     + /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
4112     + if ((SYN_ID_FULL(priv->identity) == 0x801 ||
4113     + SYN_ID_FULL(priv->identity) == 0x802) &&
4114     !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
4115     return;
4116    
4117     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
4118     index 8d0ead98eb6e..a296425a7270 100644
4119     --- a/drivers/md/bcache/super.c
4120     +++ b/drivers/md/bcache/super.c
4121     @@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
4122     */
4123     atomic_set(&dc->count, 1);
4124    
4125     - if (bch_cached_dev_writeback_start(dc))
4126     + /* Block writeback thread, but spawn it */
4127     + down_write(&dc->writeback_lock);
4128     + if (bch_cached_dev_writeback_start(dc)) {
4129     + up_write(&dc->writeback_lock);
4130     return -ENOMEM;
4131     + }
4132    
4133     if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
4134     bch_sectors_dirty_init(dc);
4135     @@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
4136     bch_cached_dev_run(dc);
4137     bcache_device_link(&dc->disk, c, "bdev");
4138    
4139     + /* Allow the writeback thread to proceed */
4140     + up_write(&dc->writeback_lock);
4141     +
4142     pr_info("Caching %s as %s on set %pU",
4143     bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
4144     dc->disk.c->sb.set_uuid);
4145     @@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
4146     struct btree *b;
4147     unsigned i;
4148    
4149     + if (!c)
4150     + closure_return(cl);
4151     +
4152     bch_cache_accounting_destroy(&c->accounting);
4153    
4154     kobject_put(&c->internal);
4155     @@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
4156     return 0;
4157     }
4158    
4159     -static void register_cache(struct cache_sb *sb, struct page *sb_page,
4160     +static int register_cache(struct cache_sb *sb, struct page *sb_page,
4161     struct block_device *bdev, struct cache *ca)
4162     {
4163     char name[BDEVNAME_SIZE];
4164     - const char *err = "cannot allocate memory";
4165     + const char *err = NULL;
4166     + int ret = 0;
4167    
4168     memcpy(&ca->sb, sb, sizeof(struct cache_sb));
4169     ca->bdev = bdev;
4170     @@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
4171     if (blk_queue_discard(bdev_get_queue(ca->bdev)))
4172     ca->discard = CACHE_DISCARD(&ca->sb);
4173    
4174     - if (cache_alloc(sb, ca) != 0)
4175     + ret = cache_alloc(sb, ca);
4176     + if (ret != 0)
4177     goto err;
4178    
4179     - err = "error creating kobject";
4180     - if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
4181     - goto err;
4182     + if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
4183     + err = "error calling kobject_add";
4184     + ret = -ENOMEM;
4185     + goto out;
4186     + }
4187    
4188     mutex_lock(&bch_register_lock);
4189     err = register_cache_set(ca);
4190     mutex_unlock(&bch_register_lock);
4191    
4192     - if (err)
4193     - goto err;
4194     + if (err) {
4195     + ret = -ENODEV;
4196     + goto out;
4197     + }
4198    
4199     pr_info("registered cache device %s", bdevname(bdev, name));
4200     +
4201     out:
4202     kobject_put(&ca->kobj);
4203     - return;
4204     +
4205     err:
4206     - pr_notice("error opening %s: %s", bdevname(bdev, name), err);
4207     - goto out;
4208     + if (err)
4209     + pr_notice("error opening %s: %s", bdevname(bdev, name), err);
4210     +
4211     + return ret;
4212     }
4213    
4214     /* Global interfaces/init */
4215     @@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
4216     if (!ca)
4217     goto err_close;
4218    
4219     - register_cache(sb, sb_page, bdev, ca);
4220     + if (register_cache(sb, sb_page, bdev, ca) != 0)
4221     + goto err_close;
4222     }
4223     out:
4224     if (sb_page)
4225     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
4226     index f6543f3a970f..27f2ef300f8b 100644
4227     --- a/drivers/md/dm-cache-metadata.c
4228     +++ b/drivers/md/dm-cache-metadata.c
4229     @@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
4230     return 0;
4231     }
4232    
4233     -#define WRITE_LOCK(cmd) \
4234     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
4235     +#define WRITE_LOCK(cmd) \
4236     + down_write(&cmd->root_lock); \
4237     + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
4238     + up_write(&cmd->root_lock); \
4239     return -EINVAL; \
4240     - down_write(&cmd->root_lock)
4241     + }
4242    
4243     #define WRITE_LOCK_VOID(cmd) \
4244     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
4245     + down_write(&cmd->root_lock); \
4246     + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
4247     + up_write(&cmd->root_lock); \
4248     return; \
4249     - down_write(&cmd->root_lock)
4250     + }
4251    
4252     #define WRITE_UNLOCK(cmd) \
4253     up_write(&cmd->root_lock)
4254    
4255     +#define READ_LOCK(cmd) \
4256     + down_read(&cmd->root_lock); \
4257     + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
4258     + up_read(&cmd->root_lock); \
4259     + return -EINVAL; \
4260     + }
4261     +
4262     +#define READ_LOCK_VOID(cmd) \
4263     + down_read(&cmd->root_lock); \
4264     + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
4265     + up_read(&cmd->root_lock); \
4266     + return; \
4267     + }
4268     +
4269     +#define READ_UNLOCK(cmd) \
4270     + up_read(&cmd->root_lock)
4271     +
4272     int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
4273     {
4274     int r;
4275     @@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
4276     {
4277     int r;
4278    
4279     - down_read(&cmd->root_lock);
4280     + READ_LOCK(cmd);
4281     r = __load_discards(cmd, fn, context);
4282     - up_read(&cmd->root_lock);
4283     + READ_UNLOCK(cmd);
4284    
4285     return r;
4286     }
4287    
4288     -dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
4289     +int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
4290     {
4291     - dm_cblock_t r;
4292     + READ_LOCK(cmd);
4293     + *result = cmd->cache_blocks;
4294     + READ_UNLOCK(cmd);
4295    
4296     - down_read(&cmd->root_lock);
4297     - r = cmd->cache_blocks;
4298     - up_read(&cmd->root_lock);
4299     -
4300     - return r;
4301     + return 0;
4302     }
4303    
4304     static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
4305     @@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
4306     {
4307     int r;
4308    
4309     - down_read(&cmd->root_lock);
4310     + READ_LOCK(cmd);
4311     r = __load_mappings(cmd, policy, fn, context);
4312     - up_read(&cmd->root_lock);
4313     + READ_UNLOCK(cmd);
4314    
4315     return r;
4316     }
4317     @@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
4318    
4319     void dm_cache_dump(struct dm_cache_metadata *cmd)
4320     {
4321     - down_read(&cmd->root_lock);
4322     + READ_LOCK_VOID(cmd);
4323     __dump_mappings(cmd);
4324     - up_read(&cmd->root_lock);
4325     + READ_UNLOCK(cmd);
4326     }
4327    
4328     int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
4329     {
4330     int r;
4331    
4332     - down_read(&cmd->root_lock);
4333     + READ_LOCK(cmd);
4334     r = cmd->changed;
4335     - up_read(&cmd->root_lock);
4336     + READ_UNLOCK(cmd);
4337    
4338     return r;
4339     }
4340     @@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
4341     void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
4342     struct dm_cache_statistics *stats)
4343     {
4344     - down_read(&cmd->root_lock);
4345     + READ_LOCK_VOID(cmd);
4346     *stats = cmd->stats;
4347     - up_read(&cmd->root_lock);
4348     + READ_UNLOCK(cmd);
4349     }
4350    
4351     void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
4352     @@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
4353     {
4354     int r = -EINVAL;
4355    
4356     - down_read(&cmd->root_lock);
4357     + READ_LOCK(cmd);
4358     r = dm_sm_get_nr_free(cmd->metadata_sm, result);
4359     - up_read(&cmd->root_lock);
4360     + READ_UNLOCK(cmd);
4361    
4362     return r;
4363     }
4364     @@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
4365     {
4366     int r = -EINVAL;
4367    
4368     - down_read(&cmd->root_lock);
4369     + READ_LOCK(cmd);
4370     r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
4371     - up_read(&cmd->root_lock);
4372     + READ_UNLOCK(cmd);
4373    
4374     return r;
4375     }
4376     @@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
4377    
4378     int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
4379     {
4380     - return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
4381     + int r;
4382     +
4383     + READ_LOCK(cmd);
4384     + r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
4385     + READ_UNLOCK(cmd);
4386     +
4387     + return r;
4388     }
4389    
4390     void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
4391     @@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
4392     struct dm_block *sblock;
4393     struct cache_disk_superblock *disk_super;
4394    
4395     - /*
4396     - * We ignore fail_io for this function.
4397     - */
4398     - down_write(&cmd->root_lock);
4399     + WRITE_LOCK(cmd);
4400     set_bit(NEEDS_CHECK, &cmd->flags);
4401    
4402     r = superblock_lock(cmd, &sblock);
4403     @@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
4404     dm_bm_unlock(sblock);
4405    
4406     out:
4407     - up_write(&cmd->root_lock);
4408     + WRITE_UNLOCK(cmd);
4409     return r;
4410     }
4411    
4412     -bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
4413     +int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
4414     {
4415     - bool needs_check;
4416     + READ_LOCK(cmd);
4417     + *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
4418     + READ_UNLOCK(cmd);
4419    
4420     - down_read(&cmd->root_lock);
4421     - needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
4422     - up_read(&cmd->root_lock);
4423     -
4424     - return needs_check;
4425     + return 0;
4426     }
4427    
4428     int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
4429     diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
4430     index 2ffee21f318d..8528744195e5 100644
4431     --- a/drivers/md/dm-cache-metadata.h
4432     +++ b/drivers/md/dm-cache-metadata.h
4433     @@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
4434     * origin blocks to map to.
4435     */
4436     int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
4437     -dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
4438     +int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
4439    
4440     int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
4441     sector_t discard_block_size,
4442     @@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
4443     */
4444     int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
4445    
4446     -bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
4447     +int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
4448     int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
4449     void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
4450     void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
4451     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
4452     index 5780accffa30..bb9b92ebbf8e 100644
4453     --- a/drivers/md/dm-cache-target.c
4454     +++ b/drivers/md/dm-cache-target.c
4455     @@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
4456    
4457     static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
4458     {
4459     - bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
4460     + bool needs_check;
4461     enum cache_metadata_mode old_mode = get_cache_mode(cache);
4462    
4463     + if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
4464     + DMERR("unable to read needs_check flag, setting failure mode");
4465     + new_mode = CM_FAIL;
4466     + }
4467     +
4468     if (new_mode == CM_WRITE && needs_check) {
4469     DMERR("%s: unable to switch cache to write mode until repaired.",
4470     cache_device_name(cache));
4471     @@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
4472     char buf[BDEVNAME_SIZE];
4473     struct cache *cache = ti->private;
4474     dm_cblock_t residency;
4475     + bool needs_check;
4476    
4477     switch (type) {
4478     case STATUSTYPE_INFO:
4479     @@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
4480     else
4481     DMEMIT("rw ");
4482    
4483     - if (dm_cache_metadata_needs_check(cache->cmd))
4484     + r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
4485     +
4486     + if (r || needs_check)
4487     DMEMIT("needs_check ");
4488     else
4489     DMEMIT("- ");
4490     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
4491     index 3766386080a4..e4d1bafe78c1 100644
4492     --- a/drivers/md/dm-snap.c
4493     +++ b/drivers/md/dm-snap.c
4494     @@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4495     int i;
4496     int r = -EINVAL;
4497     char *origin_path, *cow_path;
4498     + dev_t origin_dev, cow_dev;
4499     unsigned args_used, num_flush_bios = 1;
4500     fmode_t origin_mode = FMODE_READ;
4501    
4502     @@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4503     ti->error = "Cannot get origin device";
4504     goto bad_origin;
4505     }
4506     + origin_dev = s->origin->bdev->bd_dev;
4507    
4508     cow_path = argv[0];
4509     argv++;
4510     argc--;
4511    
4512     + cow_dev = dm_get_dev_t(cow_path);
4513     + if (cow_dev && cow_dev == origin_dev) {
4514     + ti->error = "COW device cannot be the same as origin device";
4515     + r = -EINVAL;
4516     + goto bad_cow;
4517     + }
4518     +
4519     r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
4520     if (r) {
4521     ti->error = "Cannot get COW device";
4522     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
4523     index 061152a43730..cb5d0daf53bb 100644
4524     --- a/drivers/md/dm-table.c
4525     +++ b/drivers/md/dm-table.c
4526     @@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
4527     }
4528    
4529     /*
4530     + * Convert the path to a device
4531     + */
4532     +dev_t dm_get_dev_t(const char *path)
4533     +{
4534     + dev_t uninitialized_var(dev);
4535     + struct block_device *bdev;
4536     +
4537     + bdev = lookup_bdev(path);
4538     + if (IS_ERR(bdev))
4539     + dev = name_to_dev_t(path);
4540     + else {
4541     + dev = bdev->bd_dev;
4542     + bdput(bdev);
4543     + }
4544     +
4545     + return dev;
4546     +}
4547     +EXPORT_SYMBOL_GPL(dm_get_dev_t);
4548     +
4549     +/*
4550     * Add a device to the list, or just increment the usage count if
4551     * it's already present.
4552     */
4553     @@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
4554     struct dm_dev **result)
4555     {
4556     int r;
4557     - dev_t uninitialized_var(dev);
4558     + dev_t dev;
4559     struct dm_dev_internal *dd;
4560     struct dm_table *t = ti->table;
4561     - struct block_device *bdev;
4562    
4563     BUG_ON(!t);
4564    
4565     - /* convert the path to a device */
4566     - bdev = lookup_bdev(path);
4567     - if (IS_ERR(bdev)) {
4568     - dev = name_to_dev_t(path);
4569     - if (!dev)
4570     - return -ENODEV;
4571     - } else {
4572     - dev = bdev->bd_dev;
4573     - bdput(bdev);
4574     - }
4575     + dev = dm_get_dev_t(path);
4576     + if (!dev)
4577     + return -ENODEV;
4578    
4579     dd = find_device(&t->devices, dev);
4580     if (!dd) {
4581     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
4582     index f962d6453afd..185010d9cccc 100644
4583     --- a/drivers/md/dm-thin-metadata.c
4584     +++ b/drivers/md/dm-thin-metadata.c
4585     @@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
4586    
4587     void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
4588     {
4589     - dm_tm_issue_prefetches(pmd->tm);
4590     + down_read(&pmd->root_lock);
4591     + if (!pmd->fail_io)
4592     + dm_tm_issue_prefetches(pmd->tm);
4593     + up_read(&pmd->root_lock);
4594     }
4595     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
4596     index dd834927bc66..c338aebb4ccd 100644
4597     --- a/drivers/md/dm.c
4598     +++ b/drivers/md/dm.c
4599     @@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
4600     * back into ->request_fn() could deadlock attempting to grab the
4601     * queue lock again.
4602     */
4603     - if (run_queue) {
4604     - if (md->queue->mq_ops)
4605     - blk_mq_run_hw_queues(md->queue, true);
4606     - else
4607     - blk_run_queue_async(md->queue);
4608     - }
4609     + if (!md->queue->mq_ops && run_queue)
4610     + blk_run_queue_async(md->queue);
4611    
4612     /*
4613     * dm_put() must be at the end of this function. See the comment above
4614     @@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
4615     {
4616     int rw = rq_data_dir(rq);
4617    
4618     + rq_end_stats(md, rq);
4619     dm_unprep_request(rq);
4620    
4621     - rq_end_stats(md, rq);
4622     if (!rq->q->mq_ops)
4623     old_requeue_request(rq);
4624     else {
4625     @@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
4626     struct dm_rq_target_io *tio = tio_from_request(rq);
4627    
4628     tio->error = error;
4629     - blk_complete_request(rq);
4630     + if (!rq->q->mq_ops)
4631     + blk_complete_request(rq);
4632     + else
4633     + blk_mq_complete_request(rq, error);
4634     }
4635    
4636     /*
4637     diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
4638     index 0a72ab6e6c20..dd483bb2e111 100644
4639     --- a/drivers/md/multipath.c
4640     +++ b/drivers/md/multipath.c
4641     @@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
4642     }
4643     multipath = conf->multipaths + mp_bh->path;
4644    
4645     - mp_bh->bio = *bio;
4646     + bio_init(&mp_bh->bio);
4647     + __bio_clone_fast(&mp_bh->bio, bio);
4648     +
4649     mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
4650     mp_bh->bio.bi_bdev = multipath->rdev->bdev;
4651     mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
4652     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
4653     index 4e3843f7d245..bb5bce059eb4 100644
4654     --- a/drivers/md/raid1.c
4655     +++ b/drivers/md/raid1.c
4656     @@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
4657     if (fail) {
4658     spin_lock_irq(&conf->device_lock);
4659     list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
4660     + conf->nr_queued++;
4661     spin_unlock_irq(&conf->device_lock);
4662     md_wakeup_thread(conf->mddev->thread);
4663     } else {
4664     @@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
4665     LIST_HEAD(tmp);
4666     spin_lock_irqsave(&conf->device_lock, flags);
4667     if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
4668     - list_add(&tmp, &conf->bio_end_io_list);
4669     - list_del_init(&conf->bio_end_io_list);
4670     + while (!list_empty(&conf->bio_end_io_list)) {
4671     + list_move(conf->bio_end_io_list.prev, &tmp);
4672     + conf->nr_queued--;
4673     + }
4674     }
4675     spin_unlock_irqrestore(&conf->device_lock, flags);
4676     while (!list_empty(&tmp)) {
4677     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
4678     index 1c1447dd3417..e3fd725d5c4d 100644
4679     --- a/drivers/md/raid10.c
4680     +++ b/drivers/md/raid10.c
4681     @@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
4682     if (fail) {
4683     spin_lock_irq(&conf->device_lock);
4684     list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
4685     + conf->nr_queued++;
4686     spin_unlock_irq(&conf->device_lock);
4687     md_wakeup_thread(conf->mddev->thread);
4688     } else {
4689     @@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
4690     LIST_HEAD(tmp);
4691     spin_lock_irqsave(&conf->device_lock, flags);
4692     if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
4693     - list_add(&tmp, &conf->bio_end_io_list);
4694     - list_del_init(&conf->bio_end_io_list);
4695     + while (!list_empty(&conf->bio_end_io_list)) {
4696     + list_move(conf->bio_end_io_list.prev, &tmp);
4697     + conf->nr_queued--;
4698     + }
4699     }
4700     spin_unlock_irqrestore(&conf->device_lock, flags);
4701     while (!list_empty(&tmp)) {
4702     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
4703     index b4f02c9959f2..32d52878f182 100644
4704     --- a/drivers/md/raid5.c
4705     +++ b/drivers/md/raid5.c
4706     @@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
4707     int hash)
4708     {
4709     int size;
4710     - unsigned long do_wakeup = 0;
4711     - int i = 0;
4712     + bool do_wakeup = false;
4713     unsigned long flags;
4714    
4715     if (hash == NR_STRIPE_HASH_LOCKS) {
4716     @@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
4717     !list_empty(list))
4718     atomic_dec(&conf->empty_inactive_list_nr);
4719     list_splice_tail_init(list, conf->inactive_list + hash);
4720     - do_wakeup |= 1 << hash;
4721     + do_wakeup = true;
4722     spin_unlock_irqrestore(conf->hash_locks + hash, flags);
4723     }
4724     size--;
4725     hash--;
4726     }
4727    
4728     - for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
4729     - if (do_wakeup & (1 << i))
4730     - wake_up(&conf->wait_for_stripe[i]);
4731     - }
4732     -
4733     if (do_wakeup) {
4734     + wake_up(&conf->wait_for_stripe);
4735     if (atomic_read(&conf->active_stripes) == 0)
4736     wake_up(&conf->wait_for_quiescent);
4737     if (conf->retry_read_aligned)
4738     @@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
4739     if (!sh) {
4740     set_bit(R5_INACTIVE_BLOCKED,
4741     &conf->cache_state);
4742     - wait_event_exclusive_cmd(
4743     - conf->wait_for_stripe[hash],
4744     + wait_event_lock_irq(
4745     + conf->wait_for_stripe,
4746     !list_empty(conf->inactive_list + hash) &&
4747     (atomic_read(&conf->active_stripes)
4748     < (conf->max_nr_stripes * 3 / 4)
4749     || !test_bit(R5_INACTIVE_BLOCKED,
4750     &conf->cache_state)),
4751     - spin_unlock_irq(conf->hash_locks + hash),
4752     - spin_lock_irq(conf->hash_locks + hash));
4753     + *(conf->hash_locks + hash));
4754     clear_bit(R5_INACTIVE_BLOCKED,
4755     &conf->cache_state);
4756     } else {
4757     @@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
4758     }
4759     } while (sh == NULL);
4760    
4761     - if (!list_empty(conf->inactive_list + hash))
4762     - wake_up(&conf->wait_for_stripe[hash]);
4763     -
4764     spin_unlock_irq(conf->hash_locks + hash);
4765     return sh;
4766     }
4767     @@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
4768     unsigned long cpu;
4769     int err = 0;
4770    
4771     + /*
4772     + * Never shrink. And mddev_suspend() could deadlock if this is called
4773     + * from raid5d. In that case, scribble_disks and scribble_sectors
4774     + * should equal to new_disks and new_sectors
4775     + */
4776     + if (conf->scribble_disks >= new_disks &&
4777     + conf->scribble_sectors >= new_sectors)
4778     + return 0;
4779     mddev_suspend(conf->mddev);
4780     get_online_cpus();
4781     for_each_present_cpu(cpu) {
4782     @@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
4783     }
4784     put_online_cpus();
4785     mddev_resume(conf->mddev);
4786     + if (!err) {
4787     + conf->scribble_disks = new_disks;
4788     + conf->scribble_sectors = new_sectors;
4789     + }
4790     return err;
4791     }
4792    
4793     @@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
4794     cnt = 0;
4795     list_for_each_entry(nsh, &newstripes, lru) {
4796     lock_device_hash_lock(conf, hash);
4797     - wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
4798     + wait_event_cmd(conf->wait_for_stripe,
4799     !list_empty(conf->inactive_list + hash),
4800     unlock_device_hash_lock(conf, hash),
4801     lock_device_hash_lock(conf, hash));
4802     @@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
4803     WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4804     (1 << STRIPE_SYNCING) |
4805     (1 << STRIPE_REPLACED) |
4806     - (1 << STRIPE_PREREAD_ACTIVE) |
4807     (1 << STRIPE_DELAYED) |
4808     (1 << STRIPE_BIT_DELAY) |
4809     (1 << STRIPE_FULL_WRITE) |
4810     @@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
4811     (1 << STRIPE_REPLACED)));
4812    
4813     set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4814     + (1 << STRIPE_PREREAD_ACTIVE) |
4815     (1 << STRIPE_DEGRADED)),
4816     head_sh->state & (1 << STRIPE_INSYNC));
4817    
4818     @@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
4819     }
4820     put_online_cpus();
4821    
4822     + if (!err) {
4823     + conf->scribble_disks = max(conf->raid_disks,
4824     + conf->previous_raid_disks);
4825     + conf->scribble_sectors = max(conf->chunk_sectors,
4826     + conf->prev_chunk_sectors);
4827     + }
4828     return err;
4829     }
4830    
4831     @@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
4832     seqcount_init(&conf->gen_lock);
4833     mutex_init(&conf->cache_size_mutex);
4834     init_waitqueue_head(&conf->wait_for_quiescent);
4835     - for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
4836     - init_waitqueue_head(&conf->wait_for_stripe[i]);
4837     - }
4838     + init_waitqueue_head(&conf->wait_for_stripe);
4839     init_waitqueue_head(&conf->wait_for_overlap);
4840     INIT_LIST_HEAD(&conf->handle_list);
4841     INIT_LIST_HEAD(&conf->hold_list);
4842     @@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
4843     }
4844    
4845     if (discard_supported &&
4846     - mddev->queue->limits.max_discard_sectors >= stripe &&
4847     - mddev->queue->limits.discard_granularity >= stripe)
4848     + mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
4849     + mddev->queue->limits.discard_granularity >= stripe)
4850     queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
4851     mddev->queue);
4852     else
4853     diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
4854     index a415e1cd39b8..517d4b68a1be 100644
4855     --- a/drivers/md/raid5.h
4856     +++ b/drivers/md/raid5.h
4857     @@ -510,6 +510,8 @@ struct r5conf {
4858     * conversions
4859     */
4860     } __percpu *percpu;
4861     + int scribble_disks;
4862     + int scribble_sectors;
4863     #ifdef CONFIG_HOTPLUG_CPU
4864     struct notifier_block cpu_notify;
4865     #endif
4866     @@ -522,7 +524,7 @@ struct r5conf {
4867     atomic_t empty_inactive_list_nr;
4868     struct llist_head released_stripes;
4869     wait_queue_head_t wait_for_quiescent;
4870     - wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
4871     + wait_queue_head_t wait_for_stripe;
4872     wait_queue_head_t wait_for_overlap;
4873     unsigned long cache_state;
4874     #define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
4875     diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
4876     index 471fd23b5c5c..08d2c6bf7341 100644
4877     --- a/drivers/media/i2c/adv7511.c
4878     +++ b/drivers/media/i2c/adv7511.c
4879     @@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
4880     }
4881     }
4882    
4883     +static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
4884     +{
4885     + struct adv7511_state *state = get_adv7511_state(sd);
4886     + struct adv7511_edid_detect ed;
4887     +
4888     + /* We failed to read the EDID, so send an event for this. */
4889     + ed.present = false;
4890     + ed.segment = adv7511_rd(sd, 0xc4);
4891     + v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
4892     + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
4893     +}
4894     +
4895     static void adv7511_edid_handler(struct work_struct *work)
4896     {
4897     struct delayed_work *dwork = to_delayed_work(work);
4898     struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
4899     struct v4l2_subdev *sd = &state->sd;
4900     - struct adv7511_edid_detect ed;
4901    
4902     v4l2_dbg(1, debug, sd, "%s:\n", __func__);
4903    
4904     @@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
4905     }
4906    
4907     /* We failed to read the EDID, so send an event for this. */
4908     - ed.present = false;
4909     - ed.segment = adv7511_rd(sd, 0xc4);
4910     - v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
4911     + adv7511_notify_no_edid(sd);
4912     v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
4913     }
4914    
4915     @@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
4916     /* update read only ctrls */
4917     v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
4918     v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
4919     - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
4920    
4921     if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
4922     v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
4923     @@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
4924     }
4925     adv7511_s_power(sd, false);
4926     memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
4927     + adv7511_notify_no_edid(sd);
4928     }
4929     }
4930    
4931     @@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
4932     }
4933     /* one more segment read ok */
4934     state->edid.segments = segment + 1;
4935     + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
4936     if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
4937     /* Request next EDID segment */
4938     v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
4939     @@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
4940     ed.present = true;
4941     ed.segment = 0;
4942     state->edid_detect_counter++;
4943     - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
4944     v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
4945     return ed.present;
4946     }
4947     diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
4948     index 9400e996087b..bedbd51fb77c 100644
4949     --- a/drivers/media/pci/bt8xx/bttv-driver.c
4950     +++ b/drivers/media/pci/bt8xx/bttv-driver.c
4951     @@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
4952     return 0;
4953     }
4954    
4955     +static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
4956     + unsigned int *width_mask,
4957     + unsigned int *width_bias)
4958     +{
4959     + if (fmt->flags & FORMAT_FLAGS_PLANAR) {
4960     + *width_mask = ~15; /* width must be a multiple of 16 pixels */
4961     + *width_bias = 8; /* nearest */
4962     + } else {
4963     + *width_mask = ~3; /* width must be a multiple of 4 pixels */
4964     + *width_bias = 2; /* nearest */
4965     + }
4966     +}
4967     +
4968     static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
4969     struct v4l2_format *f)
4970     {
4971     @@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
4972     enum v4l2_field field;
4973     __s32 width, height;
4974     __s32 height2;
4975     + unsigned int width_mask, width_bias;
4976     int rc;
4977    
4978     fmt = format_by_fourcc(f->fmt.pix.pixelformat);
4979     @@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
4980     width = f->fmt.pix.width;
4981     height = f->fmt.pix.height;
4982    
4983     + bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
4984     rc = limit_scaled_size_lock(fh, &width, &height, field,
4985     - /* width_mask: 4 pixels */ ~3,
4986     - /* width_bias: nearest */ 2,
4987     + width_mask, width_bias,
4988     /* adjust_size */ 1,
4989     /* adjust_crop */ 0);
4990     if (0 != rc)
4991     @@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
4992     struct bttv_fh *fh = priv;
4993     struct bttv *btv = fh->btv;
4994     __s32 width, height;
4995     + unsigned int width_mask, width_bias;
4996     enum v4l2_field field;
4997    
4998     retval = bttv_switch_type(fh, f->type);
4999     @@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
5000     height = f->fmt.pix.height;
5001     field = f->fmt.pix.field;
5002    
5003     + fmt = format_by_fourcc(f->fmt.pix.pixelformat);
5004     + bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
5005     retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
5006     - /* width_mask: 4 pixels */ ~3,
5007     - /* width_bias: nearest */ 2,
5008     + width_mask, width_bias,
5009     /* adjust_size */ 1,
5010     /* adjust_crop */ 1);
5011     if (0 != retval)
5012     @@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
5013    
5014     f->fmt.pix.field = field;
5015    
5016     - fmt = format_by_fourcc(f->fmt.pix.pixelformat);
5017     -
5018     /* update our state informations */
5019     fh->fmt = fmt;
5020     fh->cap.field = f->fmt.pix.field;
5021     diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
5022     index a63c1366a64e..1293563b7dce 100644
5023     --- a/drivers/media/pci/saa7134/saa7134-video.c
5024     +++ b/drivers/media/pci/saa7134/saa7134-video.c
5025     @@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
5026     f->fmt.pix.height = dev->height;
5027     f->fmt.pix.field = dev->field;
5028     f->fmt.pix.pixelformat = dev->fmt->fourcc;
5029     - f->fmt.pix.bytesperline =
5030     - (f->fmt.pix.width * dev->fmt->depth) >> 3;
5031     + if (dev->fmt->planar)
5032     + f->fmt.pix.bytesperline = f->fmt.pix.width;
5033     + else
5034     + f->fmt.pix.bytesperline =
5035     + (f->fmt.pix.width * dev->fmt->depth) / 8;
5036     f->fmt.pix.sizeimage =
5037     - f->fmt.pix.height * f->fmt.pix.bytesperline;
5038     + (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
5039     f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
5040     return 0;
5041     }
5042     @@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
5043     if (f->fmt.pix.height > maxh)
5044     f->fmt.pix.height = maxh;
5045     f->fmt.pix.width &= ~0x03;
5046     - f->fmt.pix.bytesperline =
5047     - (f->fmt.pix.width * fmt->depth) >> 3;
5048     + if (fmt->planar)
5049     + f->fmt.pix.bytesperline = f->fmt.pix.width;
5050     + else
5051     + f->fmt.pix.bytesperline =
5052     + (f->fmt.pix.width * fmt->depth) / 8;
5053     f->fmt.pix.sizeimage =
5054     - f->fmt.pix.height * f->fmt.pix.bytesperline;
5055     + (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
5056     f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
5057    
5058     return 0;
5059     diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
5060     index 7d28899f89ce..6efe9d002961 100644
5061     --- a/drivers/media/platform/coda/coda-bit.c
5062     +++ b/drivers/media/platform/coda/coda-bit.c
5063     @@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
5064    
5065     /* Calculate bytesused field */
5066     if (dst_buf->sequence == 0) {
5067     - vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
5068     + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
5069     ctx->vpu_header_size[0] +
5070     ctx->vpu_header_size[1] +
5071     ctx->vpu_header_size[2]);
5072     diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
5073     index 086cf1c7bd7d..18aed5dd325e 100644
5074     --- a/drivers/media/usb/pwc/pwc-if.c
5075     +++ b/drivers/media/usb/pwc/pwc-if.c
5076     @@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
5077     { USB_DEVICE(0x0471, 0x0312) },
5078     { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
5079     { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
5080     + { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
5081     { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
5082     { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
5083     { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
5084     @@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
5085     name = "Philips SPC 900NC webcam";
5086     type_id = 740;
5087     break;
5088     + case 0x032C:
5089     + PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
5090     + name = "Philips SPC 880NC webcam";
5091     + type_id = 740;
5092     + break;
5093     default:
5094     return -ENODEV;
5095     break;
5096     diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5097     index 8fd84a67478a..019644ff627d 100644
5098     --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5099     +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
5100     @@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
5101     get_user(kp->index, &up->index) ||
5102     get_user(kp->type, &up->type) ||
5103     get_user(kp->flags, &up->flags) ||
5104     - get_user(kp->memory, &up->memory))
5105     + get_user(kp->memory, &up->memory) ||
5106     + get_user(kp->length, &up->length))
5107     return -EFAULT;
5108    
5109     if (V4L2_TYPE_IS_OUTPUT(kp->type))
5110     @@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
5111     return -EFAULT;
5112    
5113     if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
5114     - if (get_user(kp->length, &up->length))
5115     - return -EFAULT;
5116     -
5117     num_planes = kp->length;
5118     if (num_planes == 0) {
5119     kp->m.planes = NULL;
5120     @@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
5121     } else {
5122     switch (kp->memory) {
5123     case V4L2_MEMORY_MMAP:
5124     - if (get_user(kp->length, &up->length) ||
5125     - get_user(kp->m.offset, &up->m.offset))
5126     + if (get_user(kp->m.offset, &up->m.offset))
5127     return -EFAULT;
5128     break;
5129     case V4L2_MEMORY_USERPTR:
5130     {
5131     compat_long_t tmp;
5132    
5133     - if (get_user(kp->length, &up->length) ||
5134     - get_user(tmp, &up->m.userptr))
5135     + if (get_user(tmp, &up->m.userptr))
5136     return -EFAULT;
5137    
5138     kp->m.userptr = (unsigned long)compat_ptr(tmp);
5139     @@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
5140     copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
5141     put_user(kp->sequence, &up->sequence) ||
5142     put_user(kp->reserved2, &up->reserved2) ||
5143     - put_user(kp->reserved, &up->reserved))
5144     + put_user(kp->reserved, &up->reserved) ||
5145     + put_user(kp->length, &up->length))
5146     return -EFAULT;
5147    
5148     if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
5149     @@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
5150     } else {
5151     switch (kp->memory) {
5152     case V4L2_MEMORY_MMAP:
5153     - if (put_user(kp->length, &up->length) ||
5154     - put_user(kp->m.offset, &up->m.offset))
5155     + if (put_user(kp->m.offset, &up->m.offset))
5156     return -EFAULT;
5157     break;
5158     case V4L2_MEMORY_USERPTR:
5159     - if (put_user(kp->length, &up->length) ||
5160     - put_user(kp->m.userptr, &up->m.userptr))
5161     + if (put_user(kp->m.userptr, &up->m.userptr))
5162     return -EFAULT;
5163     break;
5164     case V4L2_MEMORY_OVERLAY:
5165     diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
5166     index 0b05aa938799..1a173d0af694 100644
5167     --- a/drivers/misc/mei/bus.c
5168     +++ b/drivers/misc/mei/bus.c
5169     @@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
5170     bus = cl->dev;
5171    
5172     mutex_lock(&bus->device_lock);
5173     + if (bus->dev_state != MEI_DEV_ENABLED) {
5174     + rets = -ENODEV;
5175     + goto out;
5176     + }
5177     +
5178     if (!mei_cl_is_connected(cl)) {
5179     rets = -ENODEV;
5180     goto out;
5181     @@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
5182     bus = cl->dev;
5183    
5184     mutex_lock(&bus->device_lock);
5185     + if (bus->dev_state != MEI_DEV_ENABLED) {
5186     + rets = -ENODEV;
5187     + goto out;
5188     + }
5189    
5190     cb = mei_cl_read_cb(cl, NULL);
5191     if (cb)
5192     diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
5193     index fe207e542032..5fbffdb6b854 100644
5194     --- a/drivers/mmc/card/block.c
5195     +++ b/drivers/mmc/card/block.c
5196     @@ -589,6 +589,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
5197     struct mmc_card *card;
5198     int err = 0, ioc_err = 0;
5199    
5200     + /*
5201     + * The caller must have CAP_SYS_RAWIO, and must be calling this on the
5202     + * whole block device, not on a partition. This prevents overspray
5203     + * between sibling partitions.
5204     + */
5205     + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
5206     + return -EPERM;
5207     +
5208     idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
5209     if (IS_ERR(idata))
5210     return PTR_ERR(idata);
5211     @@ -631,6 +639,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
5212     int i, err = 0, ioc_err = 0;
5213     __u64 num_of_cmds;
5214    
5215     + /*
5216     + * The caller must have CAP_SYS_RAWIO, and must be calling this on the
5217     + * whole block device, not on a partition. This prevents overspray
5218     + * between sibling partitions.
5219     + */
5220     + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
5221     + return -EPERM;
5222     +
5223     if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
5224     sizeof(num_of_cmds)))
5225     return -EFAULT;
5226     @@ -688,14 +704,6 @@ cmd_err:
5227     static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
5228     unsigned int cmd, unsigned long arg)
5229     {
5230     - /*
5231     - * The caller must have CAP_SYS_RAWIO, and must be calling this on the
5232     - * whole block device, not on a partition. This prevents overspray
5233     - * between sibling partitions.
5234     - */
5235     - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
5236     - return -EPERM;
5237     -
5238     switch (cmd) {
5239     case MMC_IOC_CMD:
5240     return mmc_blk_ioctl_cmd(bdev,
5241     diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
5242     index 851ccd9ac868..25c179592125 100644
5243     --- a/drivers/mmc/host/atmel-mci.c
5244     +++ b/drivers/mmc/host/atmel-mci.c
5245     @@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
5246     struct mci_platform_data *pdata = host->pdev->dev.platform_data;
5247     dma_cap_mask_t mask;
5248    
5249     - if (!pdata->dma_filter)
5250     + if (!pdata || !pdata->dma_filter)
5251     return -ENODEV;
5252    
5253     dma_cap_zero(mask);
5254     diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
5255     index 3446097a43c0..e77d79c8cd9f 100644
5256     --- a/drivers/mmc/host/mmc_spi.c
5257     +++ b/drivers/mmc/host/mmc_spi.c
5258     @@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
5259     host->pdata->cd_debounce);
5260     if (status != 0)
5261     goto fail_add_host;
5262     +
5263     + /* The platform has a CD GPIO signal that may support
5264     + * interrupts, so let mmc_gpiod_request_cd_irq() decide
5265     + * if polling is needed or not.
5266     + */
5267     + mmc->caps &= ~MMC_CAP_NEEDS_POLL;
5268     mmc_gpiod_request_cd_irq(mmc);
5269     }
5270    
5271     diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
5272     index f5edf9d3a18a..c7f27fe4805a 100644
5273     --- a/drivers/mmc/host/sdhci-pxav3.c
5274     +++ b/drivers/mmc/host/sdhci-pxav3.c
5275     @@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
5276    
5277     host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
5278     host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
5279     +
5280     + host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
5281     + host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
5282     +
5283     res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5284     "conf-sdio3");
5285     if (res) {
5286     @@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
5287     * Configuration register, if the adjustment is not done,
5288     * remove them from the capabilities.
5289     */
5290     - host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
5291     host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
5292    
5293     dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
5294     @@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
5295     * controller has different capabilities than the ones shown
5296     * in its registers
5297     */
5298     - host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
5299     if (of_property_read_bool(np, "no-1-8-v")) {
5300     host->caps &= ~SDHCI_CAN_VDD_180;
5301     host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
5302     diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
5303     index 83c4bf7bc16c..0004721cd213 100644
5304     --- a/drivers/mmc/host/sdhci-tegra.c
5305     +++ b/drivers/mmc/host/sdhci-tegra.c
5306     @@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
5307     /* Advertise UHS modes as supported by host */
5308     if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
5309     misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
5310     + else
5311     + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
5312     if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
5313     misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
5314     + else
5315     + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
5316     if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
5317     misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
5318     + else
5319     + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
5320     sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
5321    
5322     clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
5323     @@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
5324     unsigned long host_clk;
5325    
5326     if (!clock)
5327     - return;
5328     + return sdhci_set_clock(host, clock);
5329    
5330     host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
5331     clk_set_rate(pltfm_host->clk, host_clk);
5332     @@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
5333    
5334     static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
5335     .pdata = &sdhci_tegra114_pdata,
5336     +};
5337     +
5338     +static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
5339     + .pdata = &sdhci_tegra114_pdata,
5340     .nvquirks = NVQUIRK_ENABLE_SDR50 |
5341     NVQUIRK_ENABLE_DDR50 |
5342     NVQUIRK_ENABLE_SDR104,
5343     @@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
5344    
5345     static const struct of_device_id sdhci_tegra_dt_match[] = {
5346     { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
5347     - { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
5348     + { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
5349     { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
5350     { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
5351     { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
5352     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
5353     index add9fdfd1d8f..8059d7248fff 100644
5354     --- a/drivers/mmc/host/sdhci.c
5355     +++ b/drivers/mmc/host/sdhci.c
5356     @@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
5357     static int sdhci_adma_table_pre(struct sdhci_host *host,
5358     struct mmc_data *data)
5359     {
5360     - int direction;
5361     -
5362     void *desc;
5363     void *align;
5364     dma_addr_t addr;
5365     @@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
5366     * We currently guess that it is LE.
5367     */
5368    
5369     - if (data->flags & MMC_DATA_READ)
5370     - direction = DMA_FROM_DEVICE;
5371     - else
5372     - direction = DMA_TO_DEVICE;
5373     -
5374     - host->align_addr = dma_map_single(mmc_dev(host->mmc),
5375     - host->align_buffer, host->align_buffer_sz, direction);
5376     - if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
5377     - goto fail;
5378     - BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
5379     -
5380     host->sg_count = sdhci_pre_dma_transfer(host, data);
5381     if (host->sg_count < 0)
5382     - goto unmap_align;
5383     + return -EINVAL;
5384    
5385     desc = host->adma_table;
5386     align = host->align_buffer;
5387     @@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
5388     /* nop, end, valid */
5389     sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
5390     }
5391     -
5392     - /*
5393     - * Resync align buffer as we might have changed it.
5394     - */
5395     - if (data->flags & MMC_DATA_WRITE) {
5396     - dma_sync_single_for_device(mmc_dev(host->mmc),
5397     - host->align_addr, host->align_buffer_sz, direction);
5398     - }
5399     -
5400     return 0;
5401     -
5402     -unmap_align:
5403     - dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
5404     - host->align_buffer_sz, direction);
5405     -fail:
5406     - return -EINVAL;
5407     }
5408    
5409     static void sdhci_adma_table_post(struct sdhci_host *host,
5410     @@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
5411     else
5412     direction = DMA_TO_DEVICE;
5413    
5414     - dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
5415     - host->align_buffer_sz, direction);
5416     -
5417     /* Do a quick scan of the SG list for any unaligned mappings */
5418     has_unaligned = false;
5419     for_each_sg(data->sg, sg, host->sg_count, i)
5420     @@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
5421     if (!data)
5422     target_timeout = cmd->busy_timeout * 1000;
5423     else {
5424     - target_timeout = data->timeout_ns / 1000;
5425     - if (host->clock)
5426     - target_timeout += data->timeout_clks / host->clock;
5427     + target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
5428     + if (host->clock && data->timeout_clks) {
5429     + unsigned long long val;
5430     +
5431     + /*
5432     + * data->timeout_clks is in units of clock cycles.
5433     + * host->clock is in Hz. target_timeout is in us.
5434     + * Hence, us = 1000000 * cycles / Hz. Round up.
5435     + */
5436     + val = 1000000 * data->timeout_clks;
5437     + if (do_div(val, host->clock))
5438     + target_timeout++;
5439     + target_timeout += val;
5440     + }
5441     }
5442    
5443     /*
5444     @@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
5445    
5446     WARN_ON(host->cmd);
5447    
5448     + /* Initially, a command has no error */
5449     + cmd->error = 0;
5450     +
5451     /* Wait max 10 ms */
5452     timeout = 10;
5453    
5454     @@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
5455     }
5456     }
5457    
5458     - host->cmd->error = 0;
5459     -
5460     /* Finished CMD23, now send actual command. */
5461     if (host->cmd == host->mrq->sbc) {
5462     host->cmd = NULL;
5463     @@ -2114,14 +2095,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
5464     struct sdhci_host *host = mmc_priv(mmc);
5465     struct mmc_data *data = mrq->data;
5466    
5467     - if (host->flags & SDHCI_REQ_USE_DMA) {
5468     - if (data->host_cookie == COOKIE_GIVEN ||
5469     - data->host_cookie == COOKIE_MAPPED)
5470     - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
5471     - data->flags & MMC_DATA_WRITE ?
5472     - DMA_TO_DEVICE : DMA_FROM_DEVICE);
5473     - data->host_cookie = COOKIE_UNMAPPED;
5474     - }
5475     + if (data->host_cookie == COOKIE_GIVEN ||
5476     + data->host_cookie == COOKIE_MAPPED)
5477     + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
5478     + data->flags & MMC_DATA_WRITE ?
5479     + DMA_TO_DEVICE : DMA_FROM_DEVICE);
5480     +
5481     + data->host_cookie = COOKIE_UNMAPPED;
5482     }
5483    
5484     static int sdhci_pre_dma_transfer(struct sdhci_host *host,
5485     @@ -2238,6 +2218,22 @@ static void sdhci_tasklet_finish(unsigned long param)
5486     mrq = host->mrq;
5487    
5488     /*
5489     + * Always unmap the data buffers if they were mapped by
5490     + * sdhci_prepare_data() whenever we finish with a request.
5491     + * This avoids leaking DMA mappings on error.
5492     + */
5493     + if (host->flags & SDHCI_REQ_USE_DMA) {
5494     + struct mmc_data *data = mrq->data;
5495     +
5496     + if (data && data->host_cookie == COOKIE_MAPPED) {
5497     + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
5498     + (data->flags & MMC_DATA_READ) ?
5499     + DMA_FROM_DEVICE : DMA_TO_DEVICE);
5500     + data->host_cookie = COOKIE_UNMAPPED;
5501     + }
5502     + }
5503     +
5504     + /*
5505     * The controller needs a reset of internal state machines
5506     * upon error conditions.
5507     */
5508     @@ -2322,13 +2318,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
5509     return;
5510     }
5511    
5512     - if (intmask & SDHCI_INT_TIMEOUT)
5513     - host->cmd->error = -ETIMEDOUT;
5514     - else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
5515     - SDHCI_INT_INDEX))
5516     - host->cmd->error = -EILSEQ;
5517     + if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
5518     + SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
5519     + if (intmask & SDHCI_INT_TIMEOUT)
5520     + host->cmd->error = -ETIMEDOUT;
5521     + else
5522     + host->cmd->error = -EILSEQ;
5523     +
5524     + /*
5525     + * If this command initiates a data phase and a response
5526     + * CRC error is signalled, the card can start transferring
5527     + * data - the card may have received the command without
5528     + * error. We must not terminate the mmc_request early.
5529     + *
5530     + * If the card did not receive the command or returned an
5531     + * error which prevented it sending data, the data phase
5532     + * will time out.
5533     + */
5534     + if (host->cmd->data &&
5535     + (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
5536     + SDHCI_INT_CRC) {
5537     + host->cmd = NULL;
5538     + return;
5539     + }
5540    
5541     - if (host->cmd->error) {
5542     tasklet_schedule(&host->finish_tasklet);
5543     return;
5544     }
5545     @@ -2967,14 +2980,21 @@ int sdhci_add_host(struct sdhci_host *host)
5546     &host->adma_addr,
5547     GFP_KERNEL);
5548     host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
5549     - host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
5550     + host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
5551     + host->align_buffer_sz,
5552     + &host->align_addr,
5553     + GFP_KERNEL);
5554     if (!host->adma_table || !host->align_buffer) {
5555     if (host->adma_table)
5556     dma_free_coherent(mmc_dev(mmc),
5557     host->adma_table_sz,
5558     host->adma_table,
5559     host->adma_addr);
5560     - kfree(host->align_buffer);
5561     + if (host->align_buffer)
5562     + dma_free_coherent(mmc_dev(mmc),
5563     + host->align_buffer_sz,
5564     + host->align_buffer,
5565     + host->align_addr);
5566     pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
5567     mmc_hostname(mmc));
5568     host->flags &= ~SDHCI_USE_ADMA;
5569     @@ -2986,10 +3006,14 @@ int sdhci_add_host(struct sdhci_host *host)
5570     host->flags &= ~SDHCI_USE_ADMA;
5571     dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
5572     host->adma_table, host->adma_addr);
5573     - kfree(host->align_buffer);
5574     + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
5575     + host->align_buffer, host->align_addr);
5576     host->adma_table = NULL;
5577     host->align_buffer = NULL;
5578     }
5579     +
5580     + /* dma_alloc_coherent returns page aligned and sized buffers */
5581     + BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
5582     }
5583    
5584     /*
5585     @@ -3072,14 +3096,14 @@ int sdhci_add_host(struct sdhci_host *host)
5586     if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
5587     host->timeout_clk *= 1000;
5588    
5589     + if (override_timeout_clk)
5590     + host->timeout_clk = override_timeout_clk;
5591     +
5592     mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
5593     host->ops->get_max_timeout_count(host) : 1 << 27;
5594     mmc->max_busy_timeout /= host->timeout_clk;
5595     }
5596    
5597     - if (override_timeout_clk)
5598     - host->timeout_clk = override_timeout_clk;
5599     -
5600     mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
5601     mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
5602    
5603     @@ -3452,7 +3476,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
5604     if (host->adma_table)
5605     dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
5606     host->adma_table, host->adma_addr);
5607     - kfree(host->align_buffer);
5608     + if (host->align_buffer)
5609     + dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
5610     + host->align_buffer, host->align_addr);
5611    
5612     host->adma_table = NULL;
5613     host->align_buffer = NULL;
5614     diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
5615     index 43b3392ffee7..652d01832873 100644
5616     --- a/drivers/mtd/onenand/onenand_base.c
5617     +++ b/drivers/mtd/onenand/onenand_base.c
5618     @@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
5619     */
5620     static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
5621     {
5622     + struct onenand_chip *this = mtd->priv;
5623     int ret;
5624    
5625     ret = onenand_block_isbad(mtd, ofs);
5626     @@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
5627     }
5628    
5629     onenand_get_device(mtd, FL_WRITING);
5630     - ret = mtd_block_markbad(mtd, ofs);
5631     + ret = this->block_markbad(mtd, ofs);
5632     onenand_release_device(mtd);
5633     return ret;
5634     }
5635     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
5636     index b0ae69f84493..acb1c5b2bad3 100644
5637     --- a/drivers/net/ethernet/marvell/mvneta.c
5638     +++ b/drivers/net/ethernet/marvell/mvneta.c
5639     @@ -3720,7 +3720,7 @@ static int mvneta_probe(struct platform_device *pdev)
5640     dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
5641     dev->hw_features |= dev->features;
5642     dev->vlan_features |= dev->features;
5643     - dev->priv_flags |= IFF_UNICAST_FLT;
5644     + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
5645     dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5646    
5647     err = register_netdev(dev);
5648     diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
5649     index 696852eb23c3..7a3f990c1935 100644
5650     --- a/drivers/net/irda/irtty-sir.c
5651     +++ b/drivers/net/irda/irtty-sir.c
5652     @@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
5653    
5654     /* Module stuff handled via irda_ldisc.owner - Jean II */
5655    
5656     - /* First make sure we're not already connected. */
5657     - if (tty->disc_data != NULL) {
5658     - priv = tty->disc_data;
5659     - if (priv && priv->magic == IRTTY_MAGIC) {
5660     - ret = -EEXIST;
5661     - goto out;
5662     - }
5663     - tty->disc_data = NULL; /* ### */
5664     - }
5665     -
5666     /* stop the underlying driver */
5667     irtty_stop_receiver(tty, TRUE);
5668     if (tty->ops->stop)
5669     diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
5670     index 01f08a7751f7..e7034c55e796 100644
5671     --- a/drivers/net/rionet.c
5672     +++ b/drivers/net/rionet.c
5673     @@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
5674     struct net_device *ndev = dev_id;
5675     struct rionet_private *rnet = netdev_priv(ndev);
5676    
5677     - spin_lock(&rnet->lock);
5678     + spin_lock(&rnet->tx_lock);
5679    
5680     if (netif_msg_intr(rnet))
5681     printk(KERN_INFO
5682     @@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
5683     if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
5684     netif_wake_queue(ndev);
5685    
5686     - spin_unlock(&rnet->lock);
5687     + spin_unlock(&rnet->tx_lock);
5688     }
5689    
5690     static int rionet_open(struct net_device *ndev)
5691     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
5692     index 2ca783fa50cf..7e269f9aa607 100644
5693     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
5694     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
5695     @@ -32,7 +32,7 @@
5696     #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
5697     #define BRCMF_FLOWRING_INVALID_IFIDX 0xff
5698    
5699     -#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
5700     +#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
5701     #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
5702    
5703     static const u8 brcmf_flowring_prio2fifo[] = {
5704     @@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5705     u8 prio, u8 ifidx)
5706     {
5707     struct brcmf_flowring_hash *hash;
5708     - u8 hash_idx;
5709     + u16 hash_idx;
5710     u32 i;
5711     bool found;
5712     bool sta;
5713     @@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5714     }
5715     hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
5716     BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
5717     + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
5718     found = false;
5719     hash = flow->hash;
5720     for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
5721     @@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5722     break;
5723     }
5724     hash_idx++;
5725     + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
5726     }
5727     if (found)
5728     return hash[hash_idx].flowid;
5729     @@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5730     {
5731     struct brcmf_flowring_ring *ring;
5732     struct brcmf_flowring_hash *hash;
5733     - u8 hash_idx;
5734     + u16 hash_idx;
5735     u32 i;
5736     bool found;
5737     u8 fifo;
5738     @@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5739     }
5740     hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
5741     BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
5742     + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
5743     found = false;
5744     hash = flow->hash;
5745     for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
5746     @@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5747     break;
5748     }
5749     hash_idx++;
5750     + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
5751     }
5752     if (found) {
5753     for (i = 0; i < flow->nrofrings; i++) {
5754     @@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5755     }
5756    
5757    
5758     -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
5759     +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
5760     {
5761     struct brcmf_flowring_ring *ring;
5762    
5763     @@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
5764     }
5765    
5766    
5767     -static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
5768     +static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
5769     bool blocked)
5770     {
5771     struct brcmf_flowring_ring *ring;
5772     @@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
5773     }
5774    
5775    
5776     -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
5777     +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
5778     {
5779     struct brcmf_flowring_ring *ring;
5780     - u8 hash_idx;
5781     + u16 hash_idx;
5782     struct sk_buff *skb;
5783    
5784     ring = flow->rings[flowid];
5785     @@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
5786     }
5787    
5788    
5789     -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
5790     +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
5791     struct sk_buff *skb)
5792     {
5793     struct brcmf_flowring_ring *ring;
5794     @@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
5795     }
5796    
5797    
5798     -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
5799     +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
5800     {
5801     struct brcmf_flowring_ring *ring;
5802     struct sk_buff *skb;
5803     @@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
5804     }
5805    
5806    
5807     -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
5808     +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
5809     struct sk_buff *skb)
5810     {
5811     struct brcmf_flowring_ring *ring;
5812     @@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
5813     }
5814    
5815    
5816     -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
5817     +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
5818     {
5819     struct brcmf_flowring_ring *ring;
5820    
5821     @@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
5822     }
5823    
5824    
5825     -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
5826     +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
5827     {
5828     struct brcmf_flowring_ring *ring;
5829    
5830     @@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
5831     }
5832    
5833    
5834     -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
5835     +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
5836     {
5837     struct brcmf_flowring_ring *ring;
5838     - u8 hash_idx;
5839     + u16 hash_idx;
5840    
5841     ring = flow->rings[flowid];
5842     hash_idx = ring->hash_id;
5843     @@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
5844     struct brcmf_pub *drvr = bus_if->drvr;
5845     struct brcmf_flowring_tdls_entry *search;
5846     struct brcmf_flowring_tdls_entry *remove;
5847     - u8 flowid;
5848     + u16 flowid;
5849    
5850     for (flowid = 0; flowid < flow->nrofrings; flowid++) {
5851     if (flow->rings[flowid])
5852     @@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
5853     struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
5854     struct brcmf_pub *drvr = bus_if->drvr;
5855     u32 i;
5856     - u8 flowid;
5857     + u16 flowid;
5858    
5859     if (flow->addr_mode[ifidx] != addr_mode) {
5860     for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
5861     @@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
5862     struct brcmf_flowring_tdls_entry *prev;
5863     struct brcmf_flowring_tdls_entry *search;
5864     u32 i;
5865     - u8 flowid;
5866     + u16 flowid;
5867     bool sta;
5868    
5869     sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
5870     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
5871     index 95fd1c9675d1..068e68d94999 100644
5872     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
5873     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
5874     @@ -16,7 +16,7 @@
5875     #define BRCMFMAC_FLOWRING_H
5876    
5877    
5878     -#define BRCMF_FLOWRING_HASHSIZE 256
5879     +#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */
5880     #define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF
5881    
5882    
5883     @@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
5884     u8 mac[ETH_ALEN];
5885     u8 fifo;
5886     u8 ifidx;
5887     - u8 flowid;
5888     + u16 flowid;
5889     };
5890    
5891     enum ring_status {
5892     @@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5893     u8 prio, u8 ifidx);
5894     u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
5895     u8 prio, u8 ifidx);
5896     -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
5897     -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
5898     -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
5899     -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
5900     +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
5901     +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
5902     +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
5903     +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
5904     struct sk_buff *skb);
5905     -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
5906     -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
5907     +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
5908     +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
5909     struct sk_buff *skb);
5910     -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
5911     -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
5912     +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
5913     +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
5914     struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
5915     void brcmf_flowring_detach(struct brcmf_flowring *flow);
5916     void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
5917     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5918     index c2bdb91746cf..922966734a7f 100644
5919     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5920     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5921     @@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
5922     }
5923    
5924    
5925     -static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
5926     +static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
5927     {
5928     struct brcmf_flowring *flow = msgbuf->flow;
5929     struct brcmf_commonring *commonring;
5930     @@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
5931     }
5932    
5933    
5934     -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
5935     +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
5936     {
5937     struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
5938     struct msgbuf_tx_flowring_delete_req *delete;
5939     @@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
5940     u32 count;
5941    
5942     if_msgbuf = drvr->bus_if->msgbuf;
5943     +
5944     + if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
5945     + brcmf_err("driver not configured for this many flowrings %d\n",
5946     + if_msgbuf->nrof_flowrings);
5947     + if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
5948     + }
5949     +
5950     msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
5951     if (!msgbuf)
5952     goto fail;
5953     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
5954     index 3d513e407e3d..ee6906a3c3f6 100644
5955     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
5956     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
5957     @@ -33,7 +33,7 @@
5958    
5959    
5960     int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
5961     -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
5962     +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
5963     int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
5964     void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
5965     #else
5966     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
5967     index bf9afbf46c1b..4b0bb6b4f6f1 100644
5968     --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
5969     +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
5970     @@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
5971     { USB_DEVICE(0x0411, 0x01a2) },
5972     { USB_DEVICE(0x0411, 0x01ee) },
5973     { USB_DEVICE(0x0411, 0x01a8) },
5974     + { USB_DEVICE(0x0411, 0x01fd) },
5975     /* Corega */
5976     { USB_DEVICE(0x07aa, 0x002f) },
5977     { USB_DEVICE(0x07aa, 0x003c) },
5978     diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
5979     index 5d28e9405f32..576eb7013792 100644
5980     --- a/drivers/nvdimm/bus.c
5981     +++ b/drivers/nvdimm/bus.c
5982     @@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
5983    
5984     /* fail write commands (when read-only) */
5985     if (read_only)
5986     - switch (ioctl_cmd) {
5987     - case ND_IOCTL_VENDOR:
5988     - case ND_IOCTL_SET_CONFIG_DATA:
5989     - case ND_IOCTL_ARS_START:
5990     + switch (cmd) {
5991     + case ND_CMD_VENDOR:
5992     + case ND_CMD_SET_CONFIG_DATA:
5993     + case ND_CMD_ARS_START:
5994     dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
5995     nvdimm ? nvdimm_cmd_name(cmd)
5996     : nvdimm_bus_cmd_name(cmd));
5997     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
5998     index 8d0b54670184..544b802a594c 100644
5999     --- a/drivers/nvdimm/pmem.c
6000     +++ b/drivers/nvdimm/pmem.c
6001     @@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
6002     unsigned int len, unsigned int off, int rw,
6003     sector_t sector)
6004     {
6005     + int rc = 0;
6006     void *mem = kmap_atomic(page);
6007     phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
6008     void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
6009    
6010     if (rw == READ) {
6011     if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
6012     - return -EIO;
6013     - memcpy_from_pmem(mem + off, pmem_addr, len);
6014     - flush_dcache_page(page);
6015     + rc = -EIO;
6016     + else {
6017     + memcpy_from_pmem(mem + off, pmem_addr, len);
6018     + flush_dcache_page(page);
6019     + }
6020     } else {
6021     flush_dcache_page(page);
6022     memcpy_to_pmem(pmem_addr, mem + off, len);
6023     }
6024    
6025     kunmap_atomic(mem);
6026     - return 0;
6027     + return rc;
6028     }
6029    
6030     static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
6031     diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
6032     index 1a3556a9e9ea..ed01c0172e4a 100644
6033     --- a/drivers/of/of_reserved_mem.c
6034     +++ b/drivers/of/of_reserved_mem.c
6035     @@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
6036     phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
6037     phys_addr_t *res_base)
6038     {
6039     + phys_addr_t base;
6040     /*
6041     * We use __memblock_alloc_base() because memblock_alloc_base()
6042     * panic()s on allocation failure.
6043     */
6044     - phys_addr_t base = __memblock_alloc_base(size, align, end);
6045     + end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
6046     + base = __memblock_alloc_base(size, align, end);
6047     if (!base)
6048     return -ENOMEM;
6049    
6050     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
6051     index 6d7ab9bb0d5a..6b0056e9c33e 100644
6052     --- a/drivers/pci/probe.c
6053     +++ b/drivers/pci/probe.c
6054     @@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
6055     u16 orig_cmd;
6056     struct pci_bus_region region, inverted_region;
6057    
6058     + if (dev->non_compliant_bars)
6059     + return 0;
6060     +
6061     mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6062    
6063     /* No printks while decoding is disabled! */
6064     @@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
6065     int pci_setup_device(struct pci_dev *dev)
6066     {
6067     u32 class;
6068     + u16 cmd;
6069     u8 hdr_type;
6070     int pos = 0;
6071     struct pci_bus_region region;
6072     @@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
6073     /* device class may be changed after fixup */
6074     class = dev->class >> 8;
6075    
6076     + if (dev->non_compliant_bars) {
6077     + pci_read_config_word(dev, PCI_COMMAND, &cmd);
6078     + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
6079     + dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
6080     + cmd &= ~PCI_COMMAND_IO;
6081     + cmd &= ~PCI_COMMAND_MEMORY;
6082     + pci_write_config_word(dev, PCI_COMMAND, cmd);
6083     + }
6084     + }
6085     +
6086     switch (dev->hdr_type) { /* header type */
6087     case PCI_HEADER_TYPE_NORMAL: /* standard header */
6088     if (class == PCI_CLASS_BRIDGE_PCI)
6089     diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
6090     index 0f5997ceb494..08b1d93da9fe 100644
6091     --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
6092     +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
6093     @@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
6094     }
6095     if (num_pulls) {
6096     err = of_property_read_u32_index(np, "brcm,pull",
6097     - (num_funcs > 1) ? i : 0, &pull);
6098     + (num_pulls > 1) ? i : 0, &pull);
6099     if (err)
6100     goto out;
6101     err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
6102     diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
6103     index d78ee151c9e4..be3bc2f4edd4 100644
6104     --- a/drivers/platform/x86/ideapad-laptop.c
6105     +++ b/drivers/platform/x86/ideapad-laptop.c
6106     @@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
6107     },
6108     },
6109     {
6110     + .ident = "Lenovo ideapad Y700-15ISK",
6111     + .matches = {
6112     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6113     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
6114     + },
6115     + },
6116     + {
6117     + .ident = "Lenovo ideapad Y700 Touch-15ISK",
6118     + .matches = {
6119     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6120     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
6121     + },
6122     + },
6123     + {
6124     .ident = "Lenovo ideapad Y700-17ISK",
6125     .matches = {
6126     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6127     diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
6128     index d72867257346..3eff2a69fe08 100644
6129     --- a/drivers/scsi/NCR5380.c
6130     +++ b/drivers/scsi/NCR5380.c
6131     @@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
6132     struct NCR5380_cmd *ncmd;
6133     struct scsi_cmnd *cmd;
6134    
6135     - if (list_empty(&hostdata->autosense)) {
6136     + if (hostdata->sensing || list_empty(&hostdata->autosense)) {
6137     list_for_each_entry(ncmd, &hostdata->unissued, list) {
6138     cmd = NCR5380_to_scmd(ncmd);
6139     dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
6140     @@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
6141     struct NCR5380_hostdata *hostdata = shost_priv(instance);
6142     struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
6143    
6144     - if (hostdata->sensing) {
6145     + if (hostdata->sensing == cmd) {
6146     scsi_eh_restore_cmnd(cmd, &hostdata->ses);
6147     list_add(&ncmd->list, &hostdata->autosense);
6148     hostdata->sensing = NULL;
6149     @@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
6150     struct NCR5380_hostdata *hostdata =
6151     container_of(work, struct NCR5380_hostdata, main_task);
6152     struct Scsi_Host *instance = hostdata->host;
6153     - struct scsi_cmnd *cmd;
6154     int done;
6155    
6156     do {
6157     done = 1;
6158    
6159     spin_lock_irq(&hostdata->lock);
6160     - while (!hostdata->connected &&
6161     - (cmd = dequeue_next_cmd(instance))) {
6162     + while (!hostdata->connected && !hostdata->selecting) {
6163     + struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
6164     +
6165     + if (!cmd)
6166     + break;
6167    
6168     dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
6169    
6170     @@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
6171     * entire unit.
6172     */
6173    
6174     - cmd = NCR5380_select(instance, cmd);
6175     - if (!cmd) {
6176     + if (!NCR5380_select(instance, cmd)) {
6177     dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
6178     } else {
6179     dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
6180     @@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
6181     /* Reselection interrupt */
6182     goto out;
6183     }
6184     + if (!hostdata->selecting) {
6185     + /* Command was aborted */
6186     + NCR5380_write(MODE_REG, MR_BASE);
6187     + goto out;
6188     + }
6189     if (err < 0) {
6190     NCR5380_write(MODE_REG, MR_BASE);
6191     shost_printk(KERN_ERR, instance,
6192     @@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6193     unsigned char msgout = NOP;
6194     int sink = 0;
6195     int len;
6196     -#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
6197     int transfersize;
6198     -#endif
6199     unsigned char *data;
6200     unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
6201     struct scsi_cmnd *cmd;
6202     @@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6203     do_abort(instance);
6204     cmd->result = DID_ERROR << 16;
6205     complete_cmd(instance, cmd);
6206     + hostdata->connected = NULL;
6207     return;
6208     #endif
6209     case PHASE_DATAIN:
6210     @@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6211     sink = 1;
6212     do_abort(instance);
6213     cmd->result = DID_ERROR << 16;
6214     - complete_cmd(instance, cmd);
6215     /* XXX - need to source or sink data here, as appropriate */
6216     } else
6217     cmd->SCp.this_residual -= transfersize - len;
6218     } else
6219     #endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
6220     {
6221     - spin_unlock_irq(&hostdata->lock);
6222     - NCR5380_transfer_pio(instance, &phase,
6223     - (int *)&cmd->SCp.this_residual,
6224     + /* Break up transfer into 3 ms chunks,
6225     + * presuming 6 accesses per handshake.
6226     + */
6227     + transfersize = min((unsigned long)cmd->SCp.this_residual,
6228     + hostdata->accesses_per_ms / 2);
6229     + len = transfersize;
6230     + NCR5380_transfer_pio(instance, &phase, &len,
6231     (unsigned char **)&cmd->SCp.ptr);
6232     - spin_lock_irq(&hostdata->lock);
6233     + cmd->SCp.this_residual -= transfersize - len;
6234     }
6235     - break;
6236     + return;
6237     case PHASE_MSGIN:
6238     len = 1;
6239     data = &tmp;
6240     @@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
6241     * [disconnected -> connected ->]...
6242     * [autosense -> connected ->] done
6243     *
6244     - * If cmd is unissued then just remove it.
6245     - * If cmd is disconnected, try to select the target.
6246     - * If cmd is connected, try to send an abort message.
6247     - * If cmd is waiting for autosense, give it a chance to complete but check
6248     - * that it isn't left connected.
6249     * If cmd was not found at all then presumably it has already been completed,
6250     * in which case return SUCCESS to try to avoid further EH measures.
6251     + *
6252     * If the command has not completed yet, we must not fail to find it.
6253     + * We have no option but to forget the aborted command (even if it still
6254     + * lacks sense data). The mid-layer may re-issue a command that is in error
6255     + * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
6256     + * this driver are such that a command can appear on one queue only.
6257     + *
6258     + * The lock protects driver data structures, but EH handlers also use it
6259     + * to serialize their own execution and prevent their own re-entry.
6260     */
6261    
6262     static int NCR5380_abort(struct scsi_cmnd *cmd)
6263     @@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6264     "abort: removed %p from issue queue\n", cmd);
6265     cmd->result = DID_ABORT << 16;
6266     cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
6267     + goto out;
6268     }
6269    
6270     if (hostdata->selecting == cmd) {
6271     @@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6272     if (list_del_cmd(&hostdata->disconnected, cmd)) {
6273     dsprintk(NDEBUG_ABORT, instance,
6274     "abort: removed %p from disconnected list\n", cmd);
6275     - cmd->result = DID_ERROR << 16;
6276     - if (!hostdata->connected)
6277     - NCR5380_select(instance, cmd);
6278     - if (hostdata->connected != cmd) {
6279     - complete_cmd(instance, cmd);
6280     - result = FAILED;
6281     - goto out;
6282     - }
6283     + /* Can't call NCR5380_select() and send ABORT because that
6284     + * means releasing the lock. Need a bus reset.
6285     + */
6286     + set_host_byte(cmd, DID_ERROR);
6287     + complete_cmd(instance, cmd);
6288     + result = FAILED;
6289     + goto out;
6290     }
6291    
6292     if (hostdata->connected == cmd) {
6293     dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
6294     hostdata->connected = NULL;
6295     - if (do_abort(instance)) {
6296     - set_host_byte(cmd, DID_ERROR);
6297     - complete_cmd(instance, cmd);
6298     - result = FAILED;
6299     - goto out;
6300     - }
6301     - set_host_byte(cmd, DID_ABORT);
6302     #ifdef REAL_DMA
6303     hostdata->dma_len = 0;
6304     #endif
6305     - if (cmd->cmnd[0] == REQUEST_SENSE)
6306     - complete_cmd(instance, cmd);
6307     - else {
6308     - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
6309     -
6310     - /* Perform autosense for this command */
6311     - list_add(&ncmd->list, &hostdata->autosense);
6312     - }
6313     - }
6314     -
6315     - if (list_find_cmd(&hostdata->autosense, cmd)) {
6316     - dsprintk(NDEBUG_ABORT, instance,
6317     - "abort: found %p on sense queue\n", cmd);
6318     - spin_unlock_irqrestore(&hostdata->lock, flags);
6319     - queue_work(hostdata->work_q, &hostdata->main_task);
6320     - msleep(1000);
6321     - spin_lock_irqsave(&hostdata->lock, flags);
6322     - if (list_del_cmd(&hostdata->autosense, cmd)) {
6323     - dsprintk(NDEBUG_ABORT, instance,
6324     - "abort: removed %p from sense queue\n", cmd);
6325     - set_host_byte(cmd, DID_ABORT);
6326     - complete_cmd(instance, cmd);
6327     - goto out;
6328     - }
6329     - }
6330     -
6331     - if (hostdata->connected == cmd) {
6332     - dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
6333     - hostdata->connected = NULL;
6334     if (do_abort(instance)) {
6335     set_host_byte(cmd, DID_ERROR);
6336     complete_cmd(instance, cmd);
6337     @@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6338     goto out;
6339     }
6340     set_host_byte(cmd, DID_ABORT);
6341     -#ifdef REAL_DMA
6342     - hostdata->dma_len = 0;
6343     -#endif
6344     + complete_cmd(instance, cmd);
6345     + goto out;
6346     + }
6347     +
6348     + if (list_del_cmd(&hostdata->autosense, cmd)) {
6349     + dsprintk(NDEBUG_ABORT, instance,
6350     + "abort: removed %p from sense queue\n", cmd);
6351     + set_host_byte(cmd, DID_ERROR);
6352     complete_cmd(instance, cmd);
6353     }
6354    
6355     @@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6356     * commands!
6357     */
6358    
6359     - hostdata->selecting = NULL;
6360     + if (list_del_cmd(&hostdata->unissued, cmd)) {
6361     + cmd->result = DID_RESET << 16;
6362     + cmd->scsi_done(cmd);
6363     + }
6364     +
6365     + if (hostdata->selecting) {
6366     + hostdata->selecting->result = DID_RESET << 16;
6367     + complete_cmd(instance, hostdata->selecting);
6368     + hostdata->selecting = NULL;
6369     + }
6370    
6371     list_for_each_entry(ncmd, &hostdata->disconnected, list) {
6372     struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
6373     @@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6374     set_host_byte(cmd, DID_RESET);
6375     cmd->scsi_done(cmd);
6376     }
6377     + INIT_LIST_HEAD(&hostdata->disconnected);
6378    
6379     list_for_each_entry(ncmd, &hostdata->autosense, list) {
6380     struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
6381     @@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6382     set_host_byte(cmd, DID_RESET);
6383     cmd->scsi_done(cmd);
6384     }
6385     + INIT_LIST_HEAD(&hostdata->autosense);
6386    
6387     if (hostdata->connected) {
6388     set_host_byte(hostdata->connected, DID_RESET);
6389     @@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6390     hostdata->connected = NULL;
6391     }
6392    
6393     - if (hostdata->sensing) {
6394     - set_host_byte(hostdata->connected, DID_RESET);
6395     - complete_cmd(instance, hostdata->sensing);
6396     - hostdata->sensing = NULL;
6397     - }
6398     -
6399     for (i = 0; i < 8; ++i)
6400     hostdata->busy[i] = 0;
6401     #ifdef REAL_DMA
6402     diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
6403     index 074878b55a0b..d044f3f273be 100644
6404     --- a/drivers/scsi/aacraid/aacraid.h
6405     +++ b/drivers/scsi/aacraid/aacraid.h
6406     @@ -944,6 +944,7 @@ struct fib {
6407     */
6408     struct list_head fiblink;
6409     void *data;
6410     + u32 vector_no;
6411     struct hw_fib *hw_fib_va; /* Actual shared object */
6412     dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
6413     };
6414     @@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
6415     int aac_acquire_irq(struct aac_dev *dev);
6416     void aac_free_irq(struct aac_dev *dev);
6417     const char *aac_driverinfo(struct Scsi_Host *);
6418     +void aac_fib_vector_assign(struct aac_dev *dev);
6419     struct fib *aac_fib_alloc(struct aac_dev *dev);
6420     int aac_fib_setup(struct aac_dev *dev);
6421     void aac_fib_map_free(struct aac_dev *dev);
6422     diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
6423     index a1f90fe849c9..4cbf54928640 100644
6424     --- a/drivers/scsi/aacraid/commsup.c
6425     +++ b/drivers/scsi/aacraid/commsup.c
6426     @@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
6427    
6428     void aac_fib_map_free(struct aac_dev *dev)
6429     {
6430     - pci_free_consistent(dev->pdev,
6431     - dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
6432     - dev->hw_fib_va, dev->hw_fib_pa);
6433     + if (dev->hw_fib_va && dev->max_fib_size) {
6434     + pci_free_consistent(dev->pdev,
6435     + (dev->max_fib_size *
6436     + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
6437     + dev->hw_fib_va, dev->hw_fib_pa);
6438     + }
6439     dev->hw_fib_va = NULL;
6440     dev->hw_fib_pa = 0;
6441     }
6442    
6443     +void aac_fib_vector_assign(struct aac_dev *dev)
6444     +{
6445     + u32 i = 0;
6446     + u32 vector = 1;
6447     + struct fib *fibptr = NULL;
6448     +
6449     + for (i = 0, fibptr = &dev->fibs[i];
6450     + i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
6451     + i++, fibptr++) {
6452     + if ((dev->max_msix == 1) ||
6453     + (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
6454     + - dev->vector_cap))) {
6455     + fibptr->vector_no = 0;
6456     + } else {
6457     + fibptr->vector_no = vector;
6458     + vector++;
6459     + if (vector == dev->max_msix)
6460     + vector = 1;
6461     + }
6462     + }
6463     +}
6464     +
6465     /**
6466     * aac_fib_setup - setup the fibs
6467     * @dev: Adapter to set up
6468     @@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
6469     hw_fib_pa = hw_fib_pa +
6470     dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
6471     }
6472     +
6473     + /*
6474     + *Assign vector numbers to fibs
6475     + */
6476     + aac_fib_vector_assign(dev);
6477     +
6478     /*
6479     * Add the fib chain to the free list
6480     */
6481     diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
6482     index 76eaa38ffd6e..8a8e84548d64 100644
6483     --- a/drivers/scsi/aacraid/linit.c
6484     +++ b/drivers/scsi/aacraid/linit.c
6485     @@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
6486    
6487     aac_adapter_enable_int(dev);
6488    
6489     - if (!dev->sync_mode)
6490     + /*max msix may change after EEH
6491     + * Re-assign vectors to fibs
6492     + */
6493     + aac_fib_vector_assign(dev);
6494     +
6495     + if (!dev->sync_mode) {
6496     + /* After EEH recovery or suspend resume, max_msix count
6497     + * may change, therfore updating in init as well.
6498     + */
6499     aac_adapter_start(dev);
6500     + dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
6501     + }
6502     return 0;
6503    
6504     error_iounmap:
6505     diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
6506     index 2aa34ea8ceb1..bc0203f3d243 100644
6507     --- a/drivers/scsi/aacraid/src.c
6508     +++ b/drivers/scsi/aacraid/src.c
6509     @@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
6510     break;
6511     if (dev->msi_enabled && dev->max_msix > 1)
6512     atomic_dec(&dev->rrq_outstanding[vector_no]);
6513     - aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
6514     dev->host_rrq[index++] = 0;
6515     + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
6516     if (index == (vector_no + 1) * dev->vector_cap)
6517     index = vector_no * dev->vector_cap;
6518     dev->host_rrq_idx[vector_no] = index;
6519     @@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
6520     #endif
6521    
6522     u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
6523     + u16 vector_no;
6524    
6525     atomic_inc(&q->numpending);
6526    
6527     if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
6528     dev->max_msix > 1) {
6529     - u_int16_t vector_no, first_choice = 0xffff;
6530     -
6531     - vector_no = dev->fibs_pushed_no % dev->max_msix;
6532     - do {
6533     - vector_no += 1;
6534     - if (vector_no == dev->max_msix)
6535     - vector_no = 1;
6536     - if (atomic_read(&dev->rrq_outstanding[vector_no]) <
6537     - dev->vector_cap)
6538     - break;
6539     - if (0xffff == first_choice)
6540     - first_choice = vector_no;
6541     - else if (vector_no == first_choice)
6542     - break;
6543     - } while (1);
6544     - if (vector_no == first_choice)
6545     - vector_no = 0;
6546     - atomic_inc(&dev->rrq_outstanding[vector_no]);
6547     - if (dev->fibs_pushed_no == 0xffffffff)
6548     - dev->fibs_pushed_no = 0;
6549     - else
6550     - dev->fibs_pushed_no++;
6551     + vector_no = fib->vector_no;
6552     fib->hw_fib_va->header.Handle += (vector_no << 16);
6553     + } else {
6554     + vector_no = 0;
6555     }
6556    
6557     + atomic_inc(&dev->rrq_outstanding[vector_no]);
6558     +
6559     if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
6560     /* Calculate the amount to the fibsize bits */
6561     fibsize = (hdr_size + 127) / 128 - 1;
6562     diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
6563     index b846a4683562..fc6a83188c1e 100644
6564     --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
6565     +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
6566     @@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
6567     case AHC_DEV_Q_TAGGED:
6568     scsi_change_queue_depth(sdev,
6569     dev->openings + dev->active);
6570     + break;
6571     default:
6572     /*
6573     * We allow the OS to queue 2 untagged transactions to
6574     diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
6575     index e65478651ca9..389825ba5d96 100644
6576     --- a/drivers/scsi/atari_NCR5380.c
6577     +++ b/drivers/scsi/atari_NCR5380.c
6578     @@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
6579     struct NCR5380_cmd *ncmd;
6580     struct scsi_cmnd *cmd;
6581    
6582     - if (list_empty(&hostdata->autosense)) {
6583     + if (hostdata->sensing || list_empty(&hostdata->autosense)) {
6584     list_for_each_entry(ncmd, &hostdata->unissued, list) {
6585     cmd = NCR5380_to_scmd(ncmd);
6586     dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
6587     @@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
6588     struct NCR5380_hostdata *hostdata = shost_priv(instance);
6589     struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
6590    
6591     - if (hostdata->sensing) {
6592     + if (hostdata->sensing == cmd) {
6593     scsi_eh_restore_cmnd(cmd, &hostdata->ses);
6594     list_add(&ncmd->list, &hostdata->autosense);
6595     hostdata->sensing = NULL;
6596     @@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
6597     struct NCR5380_hostdata *hostdata =
6598     container_of(work, struct NCR5380_hostdata, main_task);
6599     struct Scsi_Host *instance = hostdata->host;
6600     - struct scsi_cmnd *cmd;
6601     int done;
6602    
6603     /*
6604     @@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
6605     done = 1;
6606    
6607     spin_lock_irq(&hostdata->lock);
6608     - while (!hostdata->connected &&
6609     - (cmd = dequeue_next_cmd(instance))) {
6610     + while (!hostdata->connected && !hostdata->selecting) {
6611     + struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
6612     +
6613     + if (!cmd)
6614     + break;
6615    
6616     dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
6617    
6618     @@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
6619     #ifdef SUPPORT_TAGS
6620     cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
6621     #endif
6622     - cmd = NCR5380_select(instance, cmd);
6623     - if (!cmd) {
6624     + if (!NCR5380_select(instance, cmd)) {
6625     dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
6626     maybe_release_dma_irq(instance);
6627     } else {
6628     @@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
6629     /* Reselection interrupt */
6630     goto out;
6631     }
6632     + if (!hostdata->selecting) {
6633     + /* Command was aborted */
6634     + NCR5380_write(MODE_REG, MR_BASE);
6635     + goto out;
6636     + }
6637     if (err < 0) {
6638     NCR5380_write(MODE_REG, MR_BASE);
6639     shost_printk(KERN_ERR, instance,
6640     @@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6641     unsigned char msgout = NOP;
6642     int sink = 0;
6643     int len;
6644     -#if defined(REAL_DMA)
6645     int transfersize;
6646     -#endif
6647     unsigned char *data;
6648     unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
6649     struct scsi_cmnd *cmd;
6650     @@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6651     do_abort(instance);
6652     cmd->result = DID_ERROR << 16;
6653     complete_cmd(instance, cmd);
6654     + hostdata->connected = NULL;
6655     return;
6656     #endif
6657     case PHASE_DATAIN:
6658     @@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6659     sink = 1;
6660     do_abort(instance);
6661     cmd->result = DID_ERROR << 16;
6662     - complete_cmd(instance, cmd);
6663     /* XXX - need to source or sink data here, as appropriate */
6664     } else {
6665     #ifdef REAL_DMA
6666     @@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
6667     } else
6668     #endif /* defined(REAL_DMA) */
6669     {
6670     - spin_unlock_irq(&hostdata->lock);
6671     - NCR5380_transfer_pio(instance, &phase,
6672     - (int *)&cmd->SCp.this_residual,
6673     + /* Break up transfer into 3 ms chunks,
6674     + * presuming 6 accesses per handshake.
6675     + */
6676     + transfersize = min((unsigned long)cmd->SCp.this_residual,
6677     + hostdata->accesses_per_ms / 2);
6678     + len = transfersize;
6679     + NCR5380_transfer_pio(instance, &phase, &len,
6680     (unsigned char **)&cmd->SCp.ptr);
6681     - spin_lock_irq(&hostdata->lock);
6682     + cmd->SCp.this_residual -= transfersize - len;
6683     }
6684     #if defined(CONFIG_SUN3) && defined(REAL_DMA)
6685     /* if we had intended to dma that command clear it */
6686     if (sun3_dma_setup_done == cmd)
6687     sun3_dma_setup_done = NULL;
6688     #endif
6689     - break;
6690     + return;
6691     case PHASE_MSGIN:
6692     len = 1;
6693     data = &tmp;
6694     @@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
6695     * [disconnected -> connected ->]...
6696     * [autosense -> connected ->] done
6697     *
6698     - * If cmd is unissued then just remove it.
6699     - * If cmd is disconnected, try to select the target.
6700     - * If cmd is connected, try to send an abort message.
6701     - * If cmd is waiting for autosense, give it a chance to complete but check
6702     - * that it isn't left connected.
6703     * If cmd was not found at all then presumably it has already been completed,
6704     * in which case return SUCCESS to try to avoid further EH measures.
6705     + *
6706     * If the command has not completed yet, we must not fail to find it.
6707     + * We have no option but to forget the aborted command (even if it still
6708     + * lacks sense data). The mid-layer may re-issue a command that is in error
6709     + * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
6710     + * this driver are such that a command can appear on one queue only.
6711     + *
6712     + * The lock protects driver data structures, but EH handlers also use it
6713     + * to serialize their own execution and prevent their own re-entry.
6714     */
6715    
6716     static int NCR5380_abort(struct scsi_cmnd *cmd)
6717     @@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6718     "abort: removed %p from issue queue\n", cmd);
6719     cmd->result = DID_ABORT << 16;
6720     cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
6721     + goto out;
6722     }
6723    
6724     if (hostdata->selecting == cmd) {
6725     @@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6726     if (list_del_cmd(&hostdata->disconnected, cmd)) {
6727     dsprintk(NDEBUG_ABORT, instance,
6728     "abort: removed %p from disconnected list\n", cmd);
6729     - cmd->result = DID_ERROR << 16;
6730     - if (!hostdata->connected)
6731     - NCR5380_select(instance, cmd);
6732     - if (hostdata->connected != cmd) {
6733     - complete_cmd(instance, cmd);
6734     - result = FAILED;
6735     - goto out;
6736     - }
6737     + /* Can't call NCR5380_select() and send ABORT because that
6738     + * means releasing the lock. Need a bus reset.
6739     + */
6740     + set_host_byte(cmd, DID_ERROR);
6741     + complete_cmd(instance, cmd);
6742     + result = FAILED;
6743     + goto out;
6744     }
6745    
6746     if (hostdata->connected == cmd) {
6747     dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
6748     hostdata->connected = NULL;
6749     - if (do_abort(instance)) {
6750     - set_host_byte(cmd, DID_ERROR);
6751     - complete_cmd(instance, cmd);
6752     - result = FAILED;
6753     - goto out;
6754     - }
6755     - set_host_byte(cmd, DID_ABORT);
6756     #ifdef REAL_DMA
6757     hostdata->dma_len = 0;
6758     #endif
6759     - if (cmd->cmnd[0] == REQUEST_SENSE)
6760     - complete_cmd(instance, cmd);
6761     - else {
6762     - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
6763     -
6764     - /* Perform autosense for this command */
6765     - list_add(&ncmd->list, &hostdata->autosense);
6766     - }
6767     - }
6768     -
6769     - if (list_find_cmd(&hostdata->autosense, cmd)) {
6770     - dsprintk(NDEBUG_ABORT, instance,
6771     - "abort: found %p on sense queue\n", cmd);
6772     - spin_unlock_irqrestore(&hostdata->lock, flags);
6773     - queue_work(hostdata->work_q, &hostdata->main_task);
6774     - msleep(1000);
6775     - spin_lock_irqsave(&hostdata->lock, flags);
6776     - if (list_del_cmd(&hostdata->autosense, cmd)) {
6777     - dsprintk(NDEBUG_ABORT, instance,
6778     - "abort: removed %p from sense queue\n", cmd);
6779     - set_host_byte(cmd, DID_ABORT);
6780     - complete_cmd(instance, cmd);
6781     - goto out;
6782     - }
6783     - }
6784     -
6785     - if (hostdata->connected == cmd) {
6786     - dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
6787     - hostdata->connected = NULL;
6788     if (do_abort(instance)) {
6789     set_host_byte(cmd, DID_ERROR);
6790     complete_cmd(instance, cmd);
6791     @@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
6792     goto out;
6793     }
6794     set_host_byte(cmd, DID_ABORT);
6795     -#ifdef REAL_DMA
6796     - hostdata->dma_len = 0;
6797     -#endif
6798     + complete_cmd(instance, cmd);
6799     + goto out;
6800     + }
6801     +
6802     + if (list_del_cmd(&hostdata->autosense, cmd)) {
6803     + dsprintk(NDEBUG_ABORT, instance,
6804     + "abort: removed %p from sense queue\n", cmd);
6805     + set_host_byte(cmd, DID_ERROR);
6806     complete_cmd(instance, cmd);
6807     }
6808    
6809     @@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6810     * commands!
6811     */
6812    
6813     - hostdata->selecting = NULL;
6814     + if (list_del_cmd(&hostdata->unissued, cmd)) {
6815     + cmd->result = DID_RESET << 16;
6816     + cmd->scsi_done(cmd);
6817     + }
6818     +
6819     + if (hostdata->selecting) {
6820     + hostdata->selecting->result = DID_RESET << 16;
6821     + complete_cmd(instance, hostdata->selecting);
6822     + hostdata->selecting = NULL;
6823     + }
6824    
6825     list_for_each_entry(ncmd, &hostdata->disconnected, list) {
6826     struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
6827     @@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6828     set_host_byte(cmd, DID_RESET);
6829     cmd->scsi_done(cmd);
6830     }
6831     + INIT_LIST_HEAD(&hostdata->disconnected);
6832    
6833     list_for_each_entry(ncmd, &hostdata->autosense, list) {
6834     struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
6835     @@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6836     set_host_byte(cmd, DID_RESET);
6837     cmd->scsi_done(cmd);
6838     }
6839     + INIT_LIST_HEAD(&hostdata->autosense);
6840    
6841     if (hostdata->connected) {
6842     set_host_byte(hostdata->connected, DID_RESET);
6843     @@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
6844     hostdata->connected = NULL;
6845     }
6846    
6847     - if (hostdata->sensing) {
6848     - set_host_byte(hostdata->connected, DID_RESET);
6849     - complete_cmd(instance, hostdata->sensing);
6850     - hostdata->sensing = NULL;
6851     - }
6852     -
6853     #ifdef SUPPORT_TAGS
6854     free_all_tags(hostdata);
6855     #endif
6856     diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
6857     index cb9072a841be..069e5c50abd0 100644
6858     --- a/drivers/scsi/be2iscsi/be_main.c
6859     +++ b/drivers/scsi/be2iscsi/be_main.c
6860     @@ -4468,6 +4468,7 @@ put_shost:
6861     scsi_host_put(phba->shost);
6862     free_kset:
6863     iscsi_boot_destroy_kset(phba->boot_kset);
6864     + phba->boot_kset = NULL;
6865     return -ENOMEM;
6866     }
6867    
6868     diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
6869     index c126966130ab..ce79de822e46 100644
6870     --- a/drivers/scsi/scsi_common.c
6871     +++ b/drivers/scsi/scsi_common.c
6872     @@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
6873     ucp[3] = 0;
6874     put_unaligned_be64(info, &ucp[4]);
6875     } else if ((buf[0] & 0x7f) == 0x70) {
6876     - buf[0] |= 0x80;
6877     - put_unaligned_be64(info, &buf[3]);
6878     + /*
6879     + * Only set the 'VALID' bit if we can represent the value
6880     + * correctly; otherwise just fill out the lower bytes and
6881     + * clear the 'VALID' flag.
6882     + */
6883     + if (info <= 0xffffffffUL)
6884     + buf[0] |= 0x80;
6885     + else
6886     + buf[0] &= 0x7f;
6887     + put_unaligned_be32((u32)info, &buf[3]);
6888     }
6889    
6890     return 0;
6891     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
6892     index d749da765df1..5a5457ac9cdb 100644
6893     --- a/drivers/scsi/sd.c
6894     +++ b/drivers/scsi/sd.c
6895     @@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
6896     */
6897     if (sdkp->lbprz) {
6898     q->limits.discard_alignment = 0;
6899     - q->limits.discard_granularity = 1;
6900     + q->limits.discard_granularity = logical_block_size;
6901     } else {
6902     q->limits.discard_alignment = sdkp->unmap_alignment *
6903     logical_block_size;
6904     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
6905     index 5e820674432c..ae7d9bdf409c 100644
6906     --- a/drivers/scsi/sg.c
6907     +++ b/drivers/scsi/sg.c
6908     @@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
6909     else
6910     hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
6911     hp->dxfer_len = mxsize;
6912     - if (hp->dxfer_direction == SG_DXFER_TO_DEV)
6913     + if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
6914     + (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
6915     hp->dxferp = (char __user *)buf + cmd_size;
6916     else
6917     hp->dxferp = NULL;
6918     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
6919     index 292c04eec9ad..3ddcabb790a8 100644
6920     --- a/drivers/scsi/storvsc_drv.c
6921     +++ b/drivers/scsi/storvsc_drv.c
6922     @@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
6923     do_work = true;
6924     process_err_fn = storvsc_remove_lun;
6925     break;
6926     - case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
6927     - if ((asc == 0x2a) && (ascq == 0x9)) {
6928     + case SRB_STATUS_ABORTED:
6929     + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
6930     + (asc == 0x2a) && (ascq == 0x9)) {
6931     do_work = true;
6932     process_err_fn = storvsc_device_scan;
6933     /*
6934     diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
6935     index b8dcf5a26cc4..58d46893e5ff 100644
6936     --- a/drivers/staging/android/ion/ion_test.c
6937     +++ b/drivers/staging/android/ion/ion_test.c
6938     @@ -285,8 +285,8 @@ static int __init ion_test_init(void)
6939     {
6940     ion_test_pdev = platform_device_register_simple("ion-test",
6941     -1, NULL, 0);
6942     - if (!ion_test_pdev)
6943     - return -ENODEV;
6944     + if (IS_ERR(ion_test_pdev))
6945     + return PTR_ERR(ion_test_pdev);
6946    
6947     return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
6948     }
6949     diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
6950     index 5e8130a7d670..0e9f77924e26 100644
6951     --- a/drivers/staging/comedi/drivers/ni_mio_common.c
6952     +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
6953     @@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
6954     {
6955     if (dev->mmio)
6956     writel(data, dev->mmio + reg);
6957     -
6958     - outl(data, dev->iobase + reg);
6959     + else
6960     + outl(data, dev->iobase + reg);
6961     }
6962    
6963     static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
6964     {
6965     if (dev->mmio)
6966     writew(data, dev->mmio + reg);
6967     -
6968     - outw(data, dev->iobase + reg);
6969     + else
6970     + outw(data, dev->iobase + reg);
6971     }
6972    
6973     static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
6974     {
6975     if (dev->mmio)
6976     writeb(data, dev->mmio + reg);
6977     -
6978     - outb(data, dev->iobase + reg);
6979     + else
6980     + outb(data, dev->iobase + reg);
6981     }
6982    
6983     static uint32_t ni_readl(struct comedi_device *dev, int reg)
6984     diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
6985     index 437f723bb34d..823e47910004 100644
6986     --- a/drivers/staging/comedi/drivers/ni_tiocmd.c
6987     +++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
6988     @@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
6989     unsigned long flags;
6990     int ret = 0;
6991    
6992     - if (trig_num != cmd->start_src)
6993     + if (trig_num != cmd->start_arg)
6994     return -EINVAL;
6995    
6996     spin_lock_irqsave(&counter->lock, flags);
6997     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
6998     index 867bc6d0a68a..43d8b42c0f22 100644
6999     --- a/drivers/target/target_core_transport.c
7000     +++ b/drivers/target/target_core_transport.c
7001     @@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
7002    
7003     list_for_each_entry_safe(se_cmd, tmp_cmd,
7004     &se_sess->sess_wait_list, se_cmd_list) {
7005     - list_del_init(&se_cmd->se_cmd_list);
7006     -
7007     pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
7008     " %d\n", se_cmd, se_cmd->t_state,
7009     se_cmd->se_tfo->get_cmd_state(se_cmd));
7010     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
7011     index a0a8fd1235e2..d4b54653ecf8 100644
7012     --- a/drivers/thermal/thermal_core.c
7013     +++ b/drivers/thermal/thermal_core.c
7014     @@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
7015     {
7016     enum thermal_trip_type type;
7017    
7018     + /* Ignore disabled trip points */
7019     + if (test_bit(trip, &tz->trips_disabled))
7020     + return;
7021     +
7022     tz->ops->get_trip_type(tz, trip, &type);
7023    
7024     if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
7025     @@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
7026     {
7027     struct thermal_zone_device *tz;
7028     enum thermal_trip_type trip_type;
7029     + int trip_temp;
7030     int result;
7031     int count;
7032     int passive = 0;
7033     @@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
7034     goto unregister;
7035    
7036     for (count = 0; count < trips; count++) {
7037     - tz->ops->get_trip_type(tz, count, &trip_type);
7038     + if (tz->ops->get_trip_type(tz, count, &trip_type))
7039     + set_bit(count, &tz->trips_disabled);
7040     if (trip_type == THERMAL_TRIP_PASSIVE)
7041     passive = 1;
7042     + if (tz->ops->get_trip_temp(tz, count, &trip_temp))
7043     + set_bit(count, &tz->trips_disabled);
7044     + /* Check for bogus trip points */
7045     + if (trip_temp == 0)
7046     + set_bit(count, &tz->trips_disabled);
7047     }
7048    
7049     if (!passive) {
7050     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
7051     index 8d262bce97e4..720b9465b12e 100644
7052     --- a/drivers/tty/serial/8250/8250_port.c
7053     +++ b/drivers/tty/serial/8250/8250_port.c
7054     @@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
7055     */
7056     static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
7057     {
7058     - unsigned char old_dll, old_dlm, old_lcr;
7059     - unsigned int id;
7060     + unsigned char old_lcr;
7061     + unsigned int id, old_dl;
7062    
7063     old_lcr = serial_in(p, UART_LCR);
7064     serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
7065     + old_dl = serial_dl_read(p);
7066     + serial_dl_write(p, 0);
7067     + id = serial_dl_read(p);
7068     + serial_dl_write(p, old_dl);
7069    
7070     - old_dll = serial_in(p, UART_DLL);
7071     - old_dlm = serial_in(p, UART_DLM);
7072     -
7073     - serial_out(p, UART_DLL, 0);
7074     - serial_out(p, UART_DLM, 0);
7075     -
7076     - id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
7077     -
7078     - serial_out(p, UART_DLL, old_dll);
7079     - serial_out(p, UART_DLM, old_dlm);
7080     serial_out(p, UART_LCR, old_lcr);
7081    
7082     return id;
7083     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
7084     index fa4e23930614..d37fdcc3143c 100644
7085     --- a/drivers/usb/class/cdc-acm.c
7086     +++ b/drivers/usb/class/cdc-acm.c
7087     @@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
7088     if (quirks == NO_UNION_NORMAL) {
7089     data_interface = usb_ifnum_to_if(usb_dev, 1);
7090     control_interface = usb_ifnum_to_if(usb_dev, 0);
7091     + /* we would crash */
7092     + if (!data_interface || !control_interface)
7093     + return -ENODEV;
7094     goto skip_normal_probe;
7095     }
7096    
7097     diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
7098     index 56593a9a8726..2057d91d8336 100644
7099     --- a/drivers/usb/core/driver.c
7100     +++ b/drivers/usb/core/driver.c
7101     @@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
7102     int usb_driver_claim_interface(struct usb_driver *driver,
7103     struct usb_interface *iface, void *priv)
7104     {
7105     - struct device *dev = &iface->dev;
7106     + struct device *dev;
7107     struct usb_device *udev;
7108     int retval = 0;
7109     int lpm_disable_error;
7110    
7111     + if (!iface)
7112     + return -ENODEV;
7113     +
7114     + dev = &iface->dev;
7115     if (dev->driver)
7116     return -EBUSY;
7117    
7118     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7119     index 51b436918f78..84f65743f29a 100644
7120     --- a/drivers/usb/core/hub.c
7121     +++ b/drivers/usb/core/hub.c
7122     @@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
7123     {
7124     struct usb_device *hdev = hub->hdev;
7125     struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
7126     - int i, j, retval;
7127     + int retries, operations, retval, i;
7128     unsigned delay = HUB_SHORT_RESET_TIME;
7129     enum usb_device_speed oldspeed = udev->speed;
7130     const char *speed;
7131     @@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
7132     * first 8 bytes of the device descriptor to get the ep0 maxpacket
7133     * value.
7134     */
7135     - for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
7136     + for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
7137     bool did_new_scheme = false;
7138    
7139     if (use_new_scheme(udev, retry_counter)) {
7140     @@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
7141     * 255 is for WUSB devices, we actually need to use
7142     * 512 (WUSB1.0[4.8.1]).
7143     */
7144     - for (j = 0; j < 3; ++j) {
7145     + for (operations = 0; operations < 3; ++operations) {
7146     buf->bMaxPacketSize0 = 0;
7147     r = usb_control_msg(udev, usb_rcvaddr0pipe(),
7148     USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
7149     @@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
7150     r = -EPROTO;
7151     break;
7152     }
7153     - if (r == 0)
7154     + /*
7155     + * Some devices time out if they are powered on
7156     + * when already connected. They need a second
7157     + * reset. But only on the first attempt,
7158     + * lest we get into a time out/reset loop
7159     + */
7160     + if (r == 0 || (r == -ETIMEDOUT && retries == 0))
7161     break;
7162     }
7163     udev->descriptor.bMaxPacketSize0 =
7164     @@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
7165     * authorization will assign the final address.
7166     */
7167     if (udev->wusb == 0) {
7168     - for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
7169     + for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
7170     retval = hub_set_address(udev, devnum);
7171     if (retval >= 0)
7172     break;
7173     diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
7174     index c6bfd13f6c92..1950e87b4219 100644
7175     --- a/drivers/usb/misc/iowarrior.c
7176     +++ b/drivers/usb/misc/iowarrior.c
7177     @@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
7178     iface_desc = interface->cur_altsetting;
7179     dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
7180    
7181     + if (iface_desc->desc.bNumEndpoints < 1) {
7182     + dev_err(&interface->dev, "Invalid number of endpoints\n");
7183     + retval = -EINVAL;
7184     + goto error;
7185     + }
7186     +
7187     /* set up the endpoint information */
7188     for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
7189     endpoint = &iface_desc->endpoint[i].desc;
7190     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
7191     index 73a366de5102..9bc0e090b881 100644
7192     --- a/drivers/usb/serial/cp210x.c
7193     +++ b/drivers/usb/serial/cp210x.c
7194     @@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
7195     { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
7196     { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
7197     { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
7198     + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
7199     { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
7200     { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
7201     { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
7202     diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
7203     index 01bf53392819..244acb1299a9 100644
7204     --- a/drivers/usb/serial/cypress_m8.c
7205     +++ b/drivers/usb/serial/cypress_m8.c
7206     @@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
7207     struct usb_serial *serial = port->serial;
7208     struct cypress_private *priv;
7209    
7210     + if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
7211     + dev_err(&port->dev, "required endpoint is missing\n");
7212     + return -ENODEV;
7213     + }
7214     +
7215     priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
7216     if (!priv)
7217     return -ENOMEM;
7218     @@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
7219     cypress_set_termios(tty, port, &priv->tmp_termios);
7220    
7221     /* setup the port and start reading from the device */
7222     - if (!port->interrupt_in_urb) {
7223     - dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
7224     - __func__);
7225     - return -1;
7226     - }
7227     -
7228     usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
7229     usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
7230     port->interrupt_in_urb->transfer_buffer,
7231     diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
7232     index 12b0e67473ba..3df7b7ec178e 100644
7233     --- a/drivers/usb/serial/digi_acceleport.c
7234     +++ b/drivers/usb/serial/digi_acceleport.c
7235     @@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
7236    
7237     static int digi_startup(struct usb_serial *serial)
7238     {
7239     + struct device *dev = &serial->interface->dev;
7240     struct digi_serial *serial_priv;
7241     int ret;
7242     + int i;
7243     +
7244     + /* check whether the device has the expected number of endpoints */
7245     + if (serial->num_port_pointers < serial->type->num_ports + 1) {
7246     + dev_err(dev, "OOB endpoints missing\n");
7247     + return -ENODEV;
7248     + }
7249     +
7250     + for (i = 0; i < serial->type->num_ports + 1 ; i++) {
7251     + if (!serial->port[i]->read_urb) {
7252     + dev_err(dev, "bulk-in endpoint missing\n");
7253     + return -ENODEV;
7254     + }
7255     + if (!serial->port[i]->write_urb) {
7256     + dev_err(dev, "bulk-out endpoint missing\n");
7257     + return -ENODEV;
7258     + }
7259     + }
7260    
7261     serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
7262     if (!serial_priv)
7263     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
7264     index 8c660ae401d8..b61f12160d37 100644
7265     --- a/drivers/usb/serial/ftdi_sio.c
7266     +++ b/drivers/usb/serial/ftdi_sio.c
7267     @@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
7268     { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
7269     { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
7270     { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
7271     + /* ICP DAS I-756xU devices */
7272     + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
7273     + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
7274     + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
7275     { } /* Terminating entry */
7276     };
7277    
7278     diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
7279     index a84df2513994..c5d6c1e73e8e 100644
7280     --- a/drivers/usb/serial/ftdi_sio_ids.h
7281     +++ b/drivers/usb/serial/ftdi_sio_ids.h
7282     @@ -872,6 +872,14 @@
7283     #define NOVITUS_BONO_E_PID 0x6010
7284    
7285     /*
7286     + * ICPDAS I-756*U devices
7287     + */
7288     +#define ICPDAS_VID 0x1b5c
7289     +#define ICPDAS_I7560U_PID 0x0103
7290     +#define ICPDAS_I7561U_PID 0x0104
7291     +#define ICPDAS_I7563U_PID 0x0105
7292     +
7293     +/*
7294     * RT Systems programming cables for various ham radios
7295     */
7296     #define RTSYSTEMS_VID 0x2100 /* Vendor ID */
7297     diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
7298     index fd707d6a10e2..89726f702202 100644
7299     --- a/drivers/usb/serial/mct_u232.c
7300     +++ b/drivers/usb/serial/mct_u232.c
7301     @@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
7302    
7303     static int mct_u232_port_probe(struct usb_serial_port *port)
7304     {
7305     + struct usb_serial *serial = port->serial;
7306     struct mct_u232_private *priv;
7307    
7308     + /* check first to simplify error handling */
7309     + if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
7310     + dev_err(&port->dev, "expected endpoint missing\n");
7311     + return -ENODEV;
7312     + }
7313     +
7314     priv = kzalloc(sizeof(*priv), GFP_KERNEL);
7315     if (!priv)
7316     return -ENOMEM;
7317    
7318     /* Use second interrupt-in endpoint for reading. */
7319     - priv->read_urb = port->serial->port[1]->interrupt_in_urb;
7320     + priv->read_urb = serial->port[1]->interrupt_in_urb;
7321     priv->read_urb->context = port;
7322    
7323     spin_lock_init(&priv->lock);
7324     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7325     index 348e19834b83..c6f497f16526 100644
7326     --- a/drivers/usb/serial/option.c
7327     +++ b/drivers/usb/serial/option.c
7328     @@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
7329     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
7330     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
7331     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
7332     + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
7333     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7334     { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
7335     { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
7336     { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
7337     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
7338     index 9ff9404f99d7..c90a7e46cc7b 100644
7339     --- a/drivers/usb/storage/uas.c
7340     +++ b/drivers/usb/storage/uas.c
7341     @@ -812,7 +812,7 @@ static struct scsi_host_template uas_host_template = {
7342     .slave_configure = uas_slave_configure,
7343     .eh_abort_handler = uas_eh_abort_handler,
7344     .eh_bus_reset_handler = uas_eh_bus_reset_handler,
7345     - .can_queue = 65536, /* Is there a limit on the _host_ ? */
7346     + .can_queue = MAX_CMNDS,
7347     .this_id = -1,
7348     .sg_tablesize = SG_NONE,
7349     .skip_settle_delay = 1,
7350     diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
7351     index 71e78ef4b736..3a75f3b53452 100644
7352     --- a/drivers/watchdog/rc32434_wdt.c
7353     +++ b/drivers/watchdog/rc32434_wdt.c
7354     @@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
7355     return -EINVAL;
7356     /* Fall through */
7357     case WDIOC_GETTIMEOUT:
7358     - return copy_to_user(argp, &timeout, sizeof(int));
7359     + return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
7360     default:
7361     return -ENOTTY;
7362     }
7363     diff --git a/fs/coredump.c b/fs/coredump.c
7364     index 9ea87e9fdccf..47c32c3bfa1d 100644
7365     --- a/fs/coredump.c
7366     +++ b/fs/coredump.c
7367     @@ -32,6 +32,9 @@
7368     #include <linux/pipe_fs_i.h>
7369     #include <linux/oom.h>
7370     #include <linux/compat.h>
7371     +#include <linux/sched.h>
7372     +#include <linux/fs.h>
7373     +#include <linux/path.h>
7374     #include <linux/timekeeping.h>
7375    
7376     #include <asm/uaccess.h>
7377     @@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
7378     }
7379     } else {
7380     struct inode *inode;
7381     + int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
7382     + O_LARGEFILE | O_EXCL;
7383    
7384     if (cprm.limit < binfmt->min_coredump)
7385     goto fail_unlock;
7386     @@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
7387     * what matters is that at least one of the two processes
7388     * writes its coredump successfully, not which one.
7389     */
7390     - cprm.file = filp_open(cn.corename,
7391     - O_CREAT | 2 | O_NOFOLLOW |
7392     - O_LARGEFILE | O_EXCL,
7393     - 0600);
7394     + if (need_suid_safe) {
7395     + /*
7396     + * Using user namespaces, normal user tasks can change
7397     + * their current->fs->root to point to arbitrary
7398     + * directories. Since the intention of the "only dump
7399     + * with a fully qualified path" rule is to control where
7400     + * coredumps may be placed using root privileges,
7401     + * current->fs->root must not be used. Instead, use the
7402     + * root directory of init_task.
7403     + */
7404     + struct path root;
7405     +
7406     + task_lock(&init_task);
7407     + get_fs_root(init_task.fs, &root);
7408     + task_unlock(&init_task);
7409     + cprm.file = file_open_root(root.dentry, root.mnt,
7410     + cn.corename, open_flags, 0600);
7411     + path_put(&root);
7412     + } else {
7413     + cprm.file = filp_open(cn.corename, open_flags, 0600);
7414     + }
7415     if (IS_ERR(cprm.file))
7416     goto fail_unlock;
7417    
7418     diff --git a/fs/fhandle.c b/fs/fhandle.c
7419     index d59712dfa3e7..ca3c3dd01789 100644
7420     --- a/fs/fhandle.c
7421     +++ b/fs/fhandle.c
7422     @@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
7423     path_put(&path);
7424     return fd;
7425     }
7426     - file = file_open_root(path.dentry, path.mnt, "", open_flag);
7427     + file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
7428     if (IS_ERR(file)) {
7429     put_unused_fd(fd);
7430     retval = PTR_ERR(file);
7431     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
7432     index 5c46ed9f3e14..fee81e8768c9 100644
7433     --- a/fs/fs-writeback.c
7434     +++ b/fs/fs-writeback.c
7435     @@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
7436     wb_get(wb);
7437     spin_unlock(&inode->i_lock);
7438     spin_lock(&wb->list_lock);
7439     - wb_put(wb); /* not gonna deref it anymore */
7440    
7441     /* i_wb may have changed inbetween, can't use inode_to_wb() */
7442     - if (likely(wb == inode->i_wb))
7443     - return wb; /* @inode already has ref */
7444     + if (likely(wb == inode->i_wb)) {
7445     + wb_put(wb); /* @inode already has ref */
7446     + return wb;
7447     + }
7448    
7449     spin_unlock(&wb->list_lock);
7450     + wb_put(wb);
7451     cpu_relax();
7452     spin_lock(&inode->i_lock);
7453     }
7454     @@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
7455     * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
7456     * and does more profound writeback list handling in writeback_sb_inodes().
7457     */
7458     -static int
7459     -writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
7460     - struct writeback_control *wbc)
7461     +static int writeback_single_inode(struct inode *inode,
7462     + struct writeback_control *wbc)
7463     {
7464     + struct bdi_writeback *wb;
7465     int ret = 0;
7466    
7467     spin_lock(&inode->i_lock);
7468     @@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
7469     ret = __writeback_single_inode(inode, wbc);
7470    
7471     wbc_detach_inode(wbc);
7472     - spin_lock(&wb->list_lock);
7473     +
7474     + wb = inode_to_wb_and_lock_list(inode);
7475     spin_lock(&inode->i_lock);
7476     /*
7477     * If inode is clean, remove it from writeback lists. Otherwise don't
7478     @@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
7479    
7480     while (!list_empty(&wb->b_io)) {
7481     struct inode *inode = wb_inode(wb->b_io.prev);
7482     + struct bdi_writeback *tmp_wb;
7483    
7484     if (inode->i_sb != sb) {
7485     if (work->sb) {
7486     @@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
7487     cond_resched();
7488     }
7489    
7490     -
7491     - spin_lock(&wb->list_lock);
7492     + /*
7493     + * Requeue @inode if still dirty. Be careful as @inode may
7494     + * have been switched to another wb in the meantime.
7495     + */
7496     + tmp_wb = inode_to_wb_and_lock_list(inode);
7497     spin_lock(&inode->i_lock);
7498     if (!(inode->i_state & I_DIRTY_ALL))
7499     wrote++;
7500     - requeue_inode(inode, wb, &wbc);
7501     + requeue_inode(inode, tmp_wb, &wbc);
7502     inode_sync_complete(inode);
7503     spin_unlock(&inode->i_lock);
7504    
7505     + if (unlikely(tmp_wb != wb)) {
7506     + spin_unlock(&tmp_wb->list_lock);
7507     + spin_lock(&wb->list_lock);
7508     + }
7509     +
7510     /*
7511     * bail out to wb_writeback() often enough to check
7512     * background threshold and other termination conditions.
7513     @@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
7514     */
7515     int write_inode_now(struct inode *inode, int sync)
7516     {
7517     - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
7518     struct writeback_control wbc = {
7519     .nr_to_write = LONG_MAX,
7520     .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
7521     @@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
7522     wbc.nr_to_write = 0;
7523    
7524     might_sleep();
7525     - return writeback_single_inode(inode, wb, &wbc);
7526     + return writeback_single_inode(inode, &wbc);
7527     }
7528     EXPORT_SYMBOL(write_inode_now);
7529    
7530     @@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
7531     */
7532     int sync_inode(struct inode *inode, struct writeback_control *wbc)
7533     {
7534     - return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
7535     + return writeback_single_inode(inode, wbc);
7536     }
7537     EXPORT_SYMBOL(sync_inode);
7538    
7539     diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
7540     index 8e3ee1936c7e..c5b6b7165489 100644
7541     --- a/fs/fuse/cuse.c
7542     +++ b/fs/fuse/cuse.c
7543     @@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
7544    
7545     static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
7546     {
7547     - struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
7548     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
7549     loff_t pos = 0;
7550    
7551     return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
7552     @@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
7553    
7554     static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
7555     {
7556     - struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
7557     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
7558     loff_t pos = 0;
7559     /*
7560     * No locking or generic_write_checks(), the server is
7561     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
7562     index b03d253ece15..416108b42412 100644
7563     --- a/fs/fuse/file.c
7564     +++ b/fs/fuse/file.c
7565     @@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
7566     }
7567     }
7568    
7569     +static void fuse_io_release(struct kref *kref)
7570     +{
7571     + kfree(container_of(kref, struct fuse_io_priv, refcnt));
7572     +}
7573     +
7574     static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
7575     {
7576     if (io->err)
7577     @@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
7578     }
7579    
7580     io->iocb->ki_complete(io->iocb, res, 0);
7581     - kfree(io);
7582     }
7583     +
7584     + kref_put(&io->refcnt, fuse_io_release);
7585     }
7586    
7587     static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
7588     @@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
7589     size_t num_bytes, struct fuse_io_priv *io)
7590     {
7591     spin_lock(&io->lock);
7592     + kref_get(&io->refcnt);
7593     io->size += num_bytes;
7594     io->reqs++;
7595     spin_unlock(&io->lock);
7596     @@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
7597    
7598     static int fuse_do_readpage(struct file *file, struct page *page)
7599     {
7600     - struct fuse_io_priv io = { .async = 0, .file = file };
7601     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
7602     struct inode *inode = page->mapping->host;
7603     struct fuse_conn *fc = get_fuse_conn(inode);
7604     struct fuse_req *req;
7605     @@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
7606     size_t res;
7607     unsigned offset;
7608     unsigned i;
7609     - struct fuse_io_priv io = { .async = 0, .file = file };
7610     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
7611    
7612     for (i = 0; i < req->num_pages; i++)
7613     fuse_wait_on_page_writeback(inode, req->pages[i]->index);
7614     @@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
7615    
7616     static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
7617     {
7618     - struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
7619     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
7620     return __fuse_direct_read(&io, to, &iocb->ki_pos);
7621     }
7622    
7623     @@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
7624     {
7625     struct file *file = iocb->ki_filp;
7626     struct inode *inode = file_inode(file);
7627     - struct fuse_io_priv io = { .async = 0, .file = file };
7628     + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
7629     ssize_t res;
7630    
7631     if (is_bad_inode(inode))
7632     @@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
7633     loff_t i_size;
7634     size_t count = iov_iter_count(iter);
7635     struct fuse_io_priv *io;
7636     + bool is_sync = is_sync_kiocb(iocb);
7637    
7638     pos = offset;
7639     inode = file->f_mapping->host;
7640     @@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
7641     if (!io)
7642     return -ENOMEM;
7643     spin_lock_init(&io->lock);
7644     + kref_init(&io->refcnt);
7645     io->reqs = 1;
7646     io->bytes = -1;
7647     io->size = 0;
7648     @@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
7649     * to wait on real async I/O requests, so we must submit this request
7650     * synchronously.
7651     */
7652     - if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
7653     + if (!is_sync && (offset + count > i_size) &&
7654     iov_iter_rw(iter) == WRITE)
7655     io->async = false;
7656    
7657     - if (io->async && is_sync_kiocb(iocb))
7658     + if (io->async && is_sync) {
7659     + /*
7660     + * Additional reference to keep io around after
7661     + * calling fuse_aio_complete()
7662     + */
7663     + kref_get(&io->refcnt);
7664     io->done = &wait;
7665     + }
7666    
7667     if (iov_iter_rw(iter) == WRITE) {
7668     ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
7669     @@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
7670     fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
7671    
7672     /* we have a non-extending, async request, so return */
7673     - if (!is_sync_kiocb(iocb))
7674     + if (!is_sync)
7675     return -EIOCBQUEUED;
7676    
7677     wait_for_completion(&wait);
7678     ret = fuse_get_res_by_io(io);
7679     }
7680    
7681     - kfree(io);
7682     + kref_put(&io->refcnt, fuse_io_release);
7683    
7684     if (iov_iter_rw(iter) == WRITE) {
7685     if (ret > 0)
7686     diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
7687     index ce394b5fe6b4..eddbe02c4028 100644
7688     --- a/fs/fuse/fuse_i.h
7689     +++ b/fs/fuse/fuse_i.h
7690     @@ -22,6 +22,7 @@
7691     #include <linux/rbtree.h>
7692     #include <linux/poll.h>
7693     #include <linux/workqueue.h>
7694     +#include <linux/kref.h>
7695    
7696     /** Max number of pages that can be used in a single read request */
7697     #define FUSE_MAX_PAGES_PER_REQ 32
7698     @@ -243,6 +244,7 @@ struct fuse_args {
7699    
7700     /** The request IO state (for asynchronous processing) */
7701     struct fuse_io_priv {
7702     + struct kref refcnt;
7703     int async;
7704     spinlock_t lock;
7705     unsigned reqs;
7706     @@ -256,6 +258,13 @@ struct fuse_io_priv {
7707     struct completion *done;
7708     };
7709    
7710     +#define FUSE_IO_PRIV_SYNC(f) \
7711     +{ \
7712     + .refcnt = { ATOMIC_INIT(1) }, \
7713     + .async = 0, \
7714     + .file = f, \
7715     +}
7716     +
7717     /**
7718     * Request flags
7719     *
7720     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
7721     index 81e622681c82..624a57a9c4aa 100644
7722     --- a/fs/jbd2/journal.c
7723     +++ b/fs/jbd2/journal.c
7724     @@ -1408,11 +1408,12 @@ out:
7725     /**
7726     * jbd2_mark_journal_empty() - Mark on disk journal as empty.
7727     * @journal: The journal to update.
7728     + * @write_op: With which operation should we write the journal sb
7729     *
7730     * Update a journal's dynamic superblock fields to show that journal is empty.
7731     * Write updated superblock to disk waiting for IO to complete.
7732     */
7733     -static void jbd2_mark_journal_empty(journal_t *journal)
7734     +static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
7735     {
7736     journal_superblock_t *sb = journal->j_superblock;
7737    
7738     @@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
7739     sb->s_start = cpu_to_be32(0);
7740     read_unlock(&journal->j_state_lock);
7741    
7742     - jbd2_write_superblock(journal, WRITE_FUA);
7743     + jbd2_write_superblock(journal, write_op);
7744    
7745     /* Log is no longer empty */
7746     write_lock(&journal->j_state_lock);
7747     @@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
7748     if (journal->j_sb_buffer) {
7749     if (!is_journal_aborted(journal)) {
7750     mutex_lock(&journal->j_checkpoint_mutex);
7751     - jbd2_mark_journal_empty(journal);
7752     +
7753     + write_lock(&journal->j_state_lock);
7754     + journal->j_tail_sequence =
7755     + ++journal->j_transaction_sequence;
7756     + write_unlock(&journal->j_state_lock);
7757     +
7758     + jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
7759     mutex_unlock(&journal->j_checkpoint_mutex);
7760     } else
7761     err = -EIO;
7762     @@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
7763     * the magic code for a fully-recovered superblock. Any future
7764     * commits of data to the journal will restore the current
7765     * s_start value. */
7766     - jbd2_mark_journal_empty(journal);
7767     + jbd2_mark_journal_empty(journal, WRITE_FUA);
7768     mutex_unlock(&journal->j_checkpoint_mutex);
7769     write_lock(&journal->j_state_lock);
7770     J_ASSERT(!journal->j_running_transaction);
7771     @@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
7772     if (write) {
7773     /* Lock to make assertions happy... */
7774     mutex_lock(&journal->j_checkpoint_mutex);
7775     - jbd2_mark_journal_empty(journal);
7776     + jbd2_mark_journal_empty(journal, WRITE_FUA);
7777     mutex_unlock(&journal->j_checkpoint_mutex);
7778     }
7779    
7780     diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
7781     index 4cba7865f496..f8082c7cde8b 100644
7782     --- a/fs/nfsd/nfs4proc.c
7783     +++ b/fs/nfsd/nfs4proc.c
7784     @@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7785     &exp, &dentry);
7786     if (err)
7787     return err;
7788     + fh_unlock(&cstate->current_fh);
7789     if (d_really_is_negative(dentry)) {
7790     exp_put(exp);
7791     err = nfserr_noent;
7792     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
7793     index d6ef0955a979..1600ec470ce7 100644
7794     --- a/fs/nfsd/nfs4xdr.c
7795     +++ b/fs/nfsd/nfs4xdr.c
7796     @@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
7797    
7798     READ_BUF(4);
7799     rename->rn_snamelen = be32_to_cpup(p++);
7800     - READ_BUF(rename->rn_snamelen + 4);
7801     + READ_BUF(rename->rn_snamelen);
7802     SAVEMEM(rename->rn_sname, rename->rn_snamelen);
7803     + READ_BUF(4);
7804     rename->rn_tnamelen = be32_to_cpup(p++);
7805     READ_BUF(rename->rn_tnamelen);
7806     SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
7807     @@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
7808     READ_BUF(8);
7809     setclientid->se_callback_prog = be32_to_cpup(p++);
7810     setclientid->se_callback_netid_len = be32_to_cpup(p++);
7811     -
7812     - READ_BUF(setclientid->se_callback_netid_len + 4);
7813     + READ_BUF(setclientid->se_callback_netid_len);
7814     SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
7815     + READ_BUF(4);
7816     setclientid->se_callback_addr_len = be32_to_cpup(p++);
7817    
7818     - READ_BUF(setclientid->se_callback_addr_len + 4);
7819     + READ_BUF(setclientid->se_callback_addr_len);
7820     SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
7821     + READ_BUF(4);
7822     setclientid->se_callback_ident = be32_to_cpup(p++);
7823    
7824     DECODE_TAIL;
7825     @@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
7826    
7827     READ_BUF(4);
7828     argp->taglen = be32_to_cpup(p++);
7829     - READ_BUF(argp->taglen + 8);
7830     + READ_BUF(argp->taglen);
7831     SAVEMEM(argp->tag, argp->taglen);
7832     + READ_BUF(8);
7833     argp->minorversion = be32_to_cpup(p++);
7834     argp->opcnt = be32_to_cpup(p++);
7835     max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
7836     diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
7837     index a76b9ea7722e..a2370e2c7295 100644
7838     --- a/fs/ocfs2/cluster/heartbeat.c
7839     +++ b/fs/ocfs2/cluster/heartbeat.c
7840     @@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
7841     debugfs_remove(reg->hr_debug_dir);
7842     kfree(reg->hr_db_livenodes);
7843     kfree(reg->hr_db_regnum);
7844     - kfree(reg->hr_debug_elapsed_time);
7845     - kfree(reg->hr_debug_pinned);
7846     + kfree(reg->hr_db_elapsed_time);
7847     + kfree(reg->hr_db_pinned);
7848    
7849     spin_lock(&o2hb_live_lock);
7850     list_del(&reg->hr_all_item);
7851     diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
7852     index e36d63ff1783..f90931335c6b 100644
7853     --- a/fs/ocfs2/dlm/dlmconvert.c
7854     +++ b/fs/ocfs2/dlm/dlmconvert.c
7855     @@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
7856     struct dlm_lock *lock, int flags, int type)
7857     {
7858     enum dlm_status status;
7859     + u8 old_owner = res->owner;
7860    
7861     mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
7862     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
7863     @@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
7864     status = DLM_DENIED;
7865     goto bail;
7866     }
7867     +
7868     + if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
7869     + mlog(0, "last convert request returned DLM_RECOVERING, but "
7870     + "owner has already queued and sent ast to me. res %.*s, "
7871     + "(cookie=%u:%llu, type=%d, conv=%d)\n",
7872     + res->lockname.len, res->lockname.name,
7873     + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
7874     + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
7875     + lock->ml.type, lock->ml.convert_type);
7876     + status = DLM_NORMAL;
7877     + goto bail;
7878     + }
7879     +
7880     res->state |= DLM_LOCK_RES_IN_PROGRESS;
7881     /* move lock to local convert queue */
7882     /* do not alter lock refcount. switching lists. */
7883     @@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
7884     spin_lock(&res->spinlock);
7885     res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
7886     lock->convert_pending = 0;
7887     - /* if it failed, move it back to granted queue */
7888     + /* if it failed, move it back to granted queue.
7889     + * if master returns DLM_NORMAL and then down before sending ast,
7890     + * it may have already been moved to granted queue, reset to
7891     + * DLM_RECOVERING and retry convert */
7892     if (status != DLM_NORMAL) {
7893     if (status != DLM_NOTQUEUED)
7894     dlm_error(status);
7895     dlm_revert_pending_convert(res, lock);
7896     + } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
7897     + (old_owner != res->owner)) {
7898     + mlog(0, "res %.*s is in recovering or has been recovered.\n",
7899     + res->lockname.len, res->lockname.name);
7900     + status = DLM_RECOVERING;
7901     }
7902     bail:
7903     spin_unlock(&res->spinlock);
7904     diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
7905     index b94a425f0175..23d0ab881f6e 100644
7906     --- a/fs/ocfs2/dlm/dlmrecovery.c
7907     +++ b/fs/ocfs2/dlm/dlmrecovery.c
7908     @@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
7909     dlm_lock_get(lock);
7910     if (lock->convert_pending) {
7911     /* move converting lock back to granted */
7912     - BUG_ON(i != DLM_CONVERTING_LIST);
7913     mlog(0, "node died with convert pending "
7914     "on %.*s. move back to granted list.\n",
7915     res->lockname.len, res->lockname.name);
7916     diff --git a/fs/open.c b/fs/open.c
7917     index 55bdc75e2172..17cb6b1dab75 100644
7918     --- a/fs/open.c
7919     +++ b/fs/open.c
7920     @@ -992,14 +992,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
7921     EXPORT_SYMBOL(filp_open);
7922    
7923     struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
7924     - const char *filename, int flags)
7925     + const char *filename, int flags, umode_t mode)
7926     {
7927     struct open_flags op;
7928     - int err = build_open_flags(flags, 0, &op);
7929     + int err = build_open_flags(flags, mode, &op);
7930     if (err)
7931     return ERR_PTR(err);
7932     - if (flags & O_CREAT)
7933     - return ERR_PTR(-EINVAL);
7934     return do_file_open_root(dentry, mnt, filename, &op);
7935     }
7936     EXPORT_SYMBOL(file_open_root);
7937     diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
7938     index 2256e7e23e67..3f1190d18991 100644
7939     --- a/fs/proc_namespace.c
7940     +++ b/fs/proc_namespace.c
7941     @@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
7942     if (sb->s_op->show_devname) {
7943     seq_puts(m, "device ");
7944     err = sb->s_op->show_devname(m, mnt_path.dentry);
7945     + if (err)
7946     + goto out;
7947     } else {
7948     if (r->mnt_devname) {
7949     seq_puts(m, "device ");
7950     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
7951     index 3c3b81bb6dfe..850d17fa0aa3 100644
7952     --- a/fs/quota/dquot.c
7953     +++ b/fs/quota/dquot.c
7954     @@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
7955     static int __dquot_initialize(struct inode *inode, int type)
7956     {
7957     int cnt, init_needed = 0;
7958     - struct dquot **dquots, *got[MAXQUOTAS];
7959     + struct dquot **dquots, *got[MAXQUOTAS] = {};
7960     struct super_block *sb = inode->i_sb;
7961     qsize_t rsv;
7962     int ret = 0;
7963     @@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
7964     int rc;
7965     struct dquot *dquot;
7966    
7967     - got[cnt] = NULL;
7968     if (type != -1 && cnt != type)
7969     continue;
7970     /*
7971     diff --git a/fs/splice.c b/fs/splice.c
7972     index 82bc0d64fc38..19e0b103d253 100644
7973     --- a/fs/splice.c
7974     +++ b/fs/splice.c
7975     @@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
7976     unsigned int spd_pages = spd->nr_pages;
7977     int ret, do_wakeup, page_nr;
7978    
7979     + if (!spd_pages)
7980     + return 0;
7981     +
7982     ret = 0;
7983     do_wakeup = 0;
7984     page_nr = 0;
7985     diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
7986     index 0ef7c2ed3f8a..4fa14820e2e2 100644
7987     --- a/fs/xfs/xfs_attr_list.c
7988     +++ b/fs/xfs/xfs_attr_list.c
7989     @@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
7990     sbp->namelen,
7991     sbp->valuelen,
7992     &sbp->name[sbp->namelen]);
7993     - if (error)
7994     + if (error) {
7995     + kmem_free(sbuf);
7996     return error;
7997     + }
7998     if (context->seen_enough)
7999     break;
8000     cursor->offset++;
8001     @@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
8002     args.rmtblkcnt = xfs_attr3_rmt_blocks(
8003     args.dp->i_mount, valuelen);
8004     retval = xfs_attr_rmtval_get(&args);
8005     - if (retval)
8006     - return retval;
8007     - retval = context->put_listent(context,
8008     - entry->flags,
8009     - name_rmt->name,
8010     - (int)name_rmt->namelen,
8011     - valuelen,
8012     - args.value);
8013     + if (!retval)
8014     + retval = context->put_listent(context,
8015     + entry->flags,
8016     + name_rmt->name,
8017     + (int)name_rmt->namelen,
8018     + valuelen,
8019     + args.value);
8020     kmem_free(args.value);
8021     } else {
8022     retval = context->put_listent(context,
8023     diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
8024     index c30266e94806..8ef0ccbf8167 100644
8025     --- a/include/asm-generic/bitops/lock.h
8026     +++ b/include/asm-generic/bitops/lock.h
8027     @@ -29,16 +29,16 @@ do { \
8028     * @nr: the bit to set
8029     * @addr: the address to start counting from
8030     *
8031     - * This operation is like clear_bit_unlock, however it is not atomic.
8032     - * It does provide release barrier semantics so it can be used to unlock
8033     - * a bit lock, however it would only be used if no other CPU can modify
8034     - * any bits in the memory until the lock is released (a good example is
8035     - * if the bit lock itself protects access to the other bits in the word).
8036     + * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
8037     + * the bits in the word are protected by this lock some archs can use weaker
8038     + * ops to safely unlock.
8039     + *
8040     + * See for example x86's implementation.
8041     */
8042     #define __clear_bit_unlock(nr, addr) \
8043     do { \
8044     - smp_mb(); \
8045     - __clear_bit(nr, addr); \
8046     + smp_mb__before_atomic(); \
8047     + clear_bit(nr, addr); \
8048     } while (0)
8049    
8050     #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
8051     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
8052     index 789471dba6fb..89d944b25d87 100644
8053     --- a/include/linux/cgroup-defs.h
8054     +++ b/include/linux/cgroup-defs.h
8055     @@ -210,6 +210,9 @@ struct css_set {
8056     /* all css_task_iters currently walking this cset */
8057     struct list_head task_iters;
8058    
8059     + /* dead and being drained, ignore for migration */
8060     + bool dead;
8061     +
8062     /* For RCU-protected deletion */
8063     struct rcu_head rcu_head;
8064     };
8065     diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
8066     index ec1c61c87d89..899ab9f8549e 100644
8067     --- a/include/linux/device-mapper.h
8068     +++ b/include/linux/device-mapper.h
8069     @@ -124,6 +124,8 @@ struct dm_dev {
8070     char name[16];
8071     };
8072    
8073     +dev_t dm_get_dev_t(const char *path);
8074     +
8075     /*
8076     * Constructors should call these functions to ensure destination devices
8077     * are opened/closed correctly.
8078     diff --git a/include/linux/fs.h b/include/linux/fs.h
8079     index ae681002100a..2c7f8d9c3c70 100644
8080     --- a/include/linux/fs.h
8081     +++ b/include/linux/fs.h
8082     @@ -2259,7 +2259,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
8083     extern struct file *file_open_name(struct filename *, int, umode_t);
8084     extern struct file *filp_open(const char *, int, umode_t);
8085     extern struct file *file_open_root(struct dentry *, struct vfsmount *,
8086     - const char *, int);
8087     + const char *, int, umode_t);
8088     extern struct file * dentry_open(const struct path *, int, const struct cred *);
8089     extern int filp_close(struct file *, fl_owner_t id);
8090    
8091     diff --git a/include/linux/kernel.h b/include/linux/kernel.h
8092     index f31638c6e873..95452f72349a 100644
8093     --- a/include/linux/kernel.h
8094     +++ b/include/linux/kernel.h
8095     @@ -635,7 +635,7 @@ do { \
8096    
8097     #define do_trace_printk(fmt, args...) \
8098     do { \
8099     - static const char *trace_printk_fmt \
8100     + static const char *trace_printk_fmt __used \
8101     __attribute__((section("__trace_printk_fmt"))) = \
8102     __builtin_constant_p(fmt) ? fmt : NULL; \
8103     \
8104     @@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
8105     */
8106    
8107     #define trace_puts(str) ({ \
8108     - static const char *trace_printk_fmt \
8109     + static const char *trace_printk_fmt __used \
8110     __attribute__((section("__trace_printk_fmt"))) = \
8111     __builtin_constant_p(str) ? str : NULL; \
8112     \
8113     @@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
8114     #define ftrace_vprintk(fmt, vargs) \
8115     do { \
8116     if (__builtin_constant_p(fmt)) { \
8117     - static const char *trace_printk_fmt \
8118     + static const char *trace_printk_fmt __used \
8119     __attribute__((section("__trace_printk_fmt"))) = \
8120     __builtin_constant_p(fmt) ? fmt : NULL; \
8121     \
8122     diff --git a/include/linux/pci.h b/include/linux/pci.h
8123     index 27716254dcc5..60042ab5d7bd 100644
8124     --- a/include/linux/pci.h
8125     +++ b/include/linux/pci.h
8126     @@ -359,6 +359,7 @@ struct pci_dev {
8127     unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
8128     unsigned int irq_managed:1;
8129     unsigned int has_secondary_link:1;
8130     + unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
8131     pci_dev_flags_t dev_flags;
8132     atomic_t enable_cnt; /* pci_enable_device has been called */
8133    
8134     diff --git a/include/linux/thermal.h b/include/linux/thermal.h
8135     index e13a1ace50e9..4a849f19e6c9 100644
8136     --- a/include/linux/thermal.h
8137     +++ b/include/linux/thermal.h
8138     @@ -156,6 +156,7 @@ struct thermal_attr {
8139     * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
8140     * @devdata: private pointer for device private data
8141     * @trips: number of trip points the thermal zone supports
8142     + * @trips_disabled; bitmap for disabled trips
8143     * @passive_delay: number of milliseconds to wait between polls when
8144     * performing passive cooling.
8145     * @polling_delay: number of milliseconds to wait between polls when
8146     @@ -191,6 +192,7 @@ struct thermal_zone_device {
8147     struct thermal_attr *trip_hyst_attrs;
8148     void *devdata;
8149     int trips;
8150     + unsigned long trips_disabled; /* bitmap for disabled trips */
8151     int passive_delay;
8152     int polling_delay;
8153     int temperature;
8154     diff --git a/include/linux/tty.h b/include/linux/tty.h
8155     index d9fb4b043f56..19199c26783f 100644
8156     --- a/include/linux/tty.h
8157     +++ b/include/linux/tty.h
8158     @@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
8159     count = ld->ops->receive_buf2(ld->tty, p, f, count);
8160     else {
8161     count = min_t(int, count, ld->tty->receive_room);
8162     - if (count)
8163     + if (count && ld->ops->receive_buf)
8164     ld->ops->receive_buf(ld->tty, p, f, count);
8165     }
8166     return count;
8167     diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
8168     index c21c38ce7450..93e63c56f48f 100644
8169     --- a/include/sound/hdaudio.h
8170     +++ b/include/sound/hdaudio.h
8171     @@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
8172     int snd_hdac_power_down(struct hdac_device *codec);
8173     int snd_hdac_power_up_pm(struct hdac_device *codec);
8174     int snd_hdac_power_down_pm(struct hdac_device *codec);
8175     +int snd_hdac_keep_power_up(struct hdac_device *codec);
8176     #else
8177     static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
8178     static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
8179     static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
8180     static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
8181     +static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
8182     #endif
8183    
8184     /*
8185     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
8186     index d27904c193da..6a498daf2eec 100644
8187     --- a/kernel/cgroup.c
8188     +++ b/kernel/cgroup.c
8189     @@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
8190     lockdep_assert_held(&cgroup_mutex);
8191     lockdep_assert_held(&css_set_lock);
8192    
8193     + /*
8194     + * If ->dead, @src_set is associated with one or more dead cgroups
8195     + * and doesn't contain any migratable tasks. Ignore it early so
8196     + * that the rest of migration path doesn't get confused by it.
8197     + */
8198     + if (src_cset->dead)
8199     + return;
8200     +
8201     src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
8202    
8203     if (!list_empty(&src_cset->mg_preload_node))
8204     @@ -5114,6 +5122,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
8205     __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
8206     {
8207     struct cgroup_subsys_state *css;
8208     + struct cgrp_cset_link *link;
8209     int ssid;
8210    
8211     lockdep_assert_held(&cgroup_mutex);
8212     @@ -5134,11 +5143,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
8213     return -EBUSY;
8214    
8215     /*
8216     - * Mark @cgrp dead. This prevents further task migration and child
8217     - * creation by disabling cgroup_lock_live_group().
8218     + * Mark @cgrp and the associated csets dead. The former prevents
8219     + * further task migration and child creation by disabling
8220     + * cgroup_lock_live_group(). The latter makes the csets ignored by
8221     + * the migration path.
8222     */
8223     cgrp->self.flags &= ~CSS_ONLINE;
8224    
8225     + spin_lock_bh(&css_set_lock);
8226     + list_for_each_entry(link, &cgrp->cset_links, cset_link)
8227     + link->cset->dead = true;
8228     + spin_unlock_bh(&css_set_lock);
8229     +
8230     /* initiate massacre of all css's */
8231     for_each_css(css, ssid, cgrp)
8232     kill_css(css);
8233     diff --git a/kernel/events/core.c b/kernel/events/core.c
8234     index 614614821f00..f0b4b328d8f5 100644
8235     --- a/kernel/events/core.c
8236     +++ b/kernel/events/core.c
8237     @@ -8001,6 +8001,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
8238     }
8239     }
8240    
8241     + /* symmetric to unaccount_event() in _free_event() */
8242     + account_event(event);
8243     +
8244     return event;
8245    
8246     err_per_task:
8247     @@ -8364,8 +8367,6 @@ SYSCALL_DEFINE5(perf_event_open,
8248     }
8249     }
8250    
8251     - account_event(event);
8252     -
8253     /*
8254     * Special case software events and allow them to be part of
8255     * any hardware group.
8256     @@ -8662,8 +8663,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8257     /* Mark owner so we could distinguish it from user events. */
8258     event->owner = TASK_TOMBSTONE;
8259    
8260     - account_event(event);
8261     -
8262     ctx = find_get_context(event->pmu, task, event);
8263     if (IS_ERR(ctx)) {
8264     err = PTR_ERR(ctx);
8265     diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
8266     index b7342a24f559..b7dd5718836e 100644
8267     --- a/kernel/power/hibernate.c
8268     +++ b/kernel/power/hibernate.c
8269     @@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
8270     pm_message_t msg;
8271     int error;
8272    
8273     + pm_suspend_clear_flags();
8274     error = platform_begin(platform_mode);
8275     if (error)
8276     goto Close;
8277     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8278     index 41f6b2215aa8..a74073f8c08c 100644
8279     --- a/kernel/sched/core.c
8280     +++ b/kernel/sched/core.c
8281     @@ -5630,6 +5630,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
8282    
8283     case CPU_UP_PREPARE:
8284     rq->calc_load_update = calc_load_update;
8285     + account_reset_rq(rq);
8286     break;
8287    
8288     case CPU_ONLINE:
8289     diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
8290     index b2ab2ffb1adc..ab2b5fb9821d 100644
8291     --- a/kernel/sched/cputime.c
8292     +++ b/kernel/sched/cputime.c
8293     @@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
8294     #ifdef CONFIG_PARAVIRT
8295     if (static_key_false(&paravirt_steal_enabled)) {
8296     u64 steal;
8297     - cputime_t steal_ct;
8298     + unsigned long steal_jiffies;
8299    
8300     steal = paravirt_steal_clock(smp_processor_id());
8301     steal -= this_rq()->prev_steal_time;
8302    
8303     /*
8304     - * cputime_t may be less precise than nsecs (eg: if it's
8305     - * based on jiffies). Lets cast the result to cputime
8306     + * steal is in nsecs but our caller is expecting steal
8307     + * time in jiffies. Lets cast the result to jiffies
8308     * granularity and account the rest on the next rounds.
8309     */
8310     - steal_ct = nsecs_to_cputime(steal);
8311     - this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
8312     + steal_jiffies = nsecs_to_jiffies(steal);
8313     + this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
8314    
8315     - account_steal_time(steal_ct);
8316     - return steal_ct;
8317     + account_steal_time(jiffies_to_cputime(steal_jiffies));
8318     + return steal_jiffies;
8319     }
8320     #endif
8321     return false;
8322     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8323     index 56b7d4b83947..adff850e5d42 100644
8324     --- a/kernel/sched/fair.c
8325     +++ b/kernel/sched/fair.c
8326     @@ -4459,9 +4459,17 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
8327    
8328     /* scale is effectively 1 << i now, and >> i divides by scale */
8329    
8330     - old_load = this_rq->cpu_load[i] - tickless_load;
8331     + old_load = this_rq->cpu_load[i];
8332     old_load = decay_load_missed(old_load, pending_updates - 1, i);
8333     - old_load += tickless_load;
8334     + if (tickless_load) {
8335     + old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
8336     + /*
8337     + * old_load can never be a negative value because a
8338     + * decayed tickless_load cannot be greater than the
8339     + * original tickless_load.
8340     + */
8341     + old_load += tickless_load;
8342     + }
8343     new_load = this_load;
8344     /*
8345     * Round up the averaging division if load is increasing. This
8346     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
8347     index 10f16374df7f..ff87d887ff62 100644
8348     --- a/kernel/sched/sched.h
8349     +++ b/kernel/sched/sched.h
8350     @@ -1738,3 +1738,16 @@ static inline u64 irq_time_read(int cpu)
8351     }
8352     #endif /* CONFIG_64BIT */
8353     #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
8354     +
8355     +static inline void account_reset_rq(struct rq *rq)
8356     +{
8357     +#ifdef CONFIG_IRQ_TIME_ACCOUNTING
8358     + rq->prev_irq_time = 0;
8359     +#endif
8360     +#ifdef CONFIG_PARAVIRT
8361     + rq->prev_steal_time = 0;
8362     +#endif
8363     +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
8364     + rq->prev_steal_time_rq = 0;
8365     +#endif
8366     +}
8367     diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
8368     index 7e7746a42a62..10a1d7dc9313 100644
8369     --- a/kernel/sysctl_binary.c
8370     +++ b/kernel/sysctl_binary.c
8371     @@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
8372     }
8373    
8374     mnt = task_active_pid_ns(current)->proc_mnt;
8375     - file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
8376     + file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
8377     result = PTR_ERR(file);
8378     if (IS_ERR(file))
8379     goto out_putname;
8380     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
8381     index d9293402ee68..8305cbb2d5a2 100644
8382     --- a/kernel/trace/trace.c
8383     +++ b/kernel/trace/trace.c
8384     @@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
8385    
8386     spd.nr_pages = i;
8387    
8388     - ret = splice_to_pipe(pipe, &spd);
8389     + if (i)
8390     + ret = splice_to_pipe(pipe, &spd);
8391     + else
8392     + ret = 0;
8393     out:
8394     splice_shrink_spd(&spd);
8395     return ret;
8396     diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
8397     index e4e56589ec1d..be3222b7d72e 100644
8398     --- a/kernel/trace/trace_irqsoff.c
8399     +++ b/kernel/trace/trace_irqsoff.c
8400     @@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
8401     return 0;
8402    
8403     local_save_flags(*flags);
8404     - /* slight chance to get a false positive on tracing_cpu */
8405     - if (!irqs_disabled_flags(*flags))
8406     + /*
8407     + * Slight chance to get a false positive on tracing_cpu,
8408     + * although I'm starting to think there isn't a chance.
8409     + * Leave this for now just to be paranoid.
8410     + */
8411     + if (!irqs_disabled_flags(*flags) && !preempt_count())
8412     return 0;
8413    
8414     *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
8415     diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
8416     index 060df67dbdd1..f96f0383f6c6 100644
8417     --- a/kernel/trace/trace_printk.c
8418     +++ b/kernel/trace/trace_printk.c
8419     @@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
8420     const char *str = *fmt;
8421     int i;
8422    
8423     + if (!*fmt)
8424     + return 0;
8425     +
8426     seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
8427    
8428     /*
8429     diff --git a/kernel/watchdog.c b/kernel/watchdog.c
8430     index b3ace6ebbba3..9acb29f280ec 100644
8431     --- a/kernel/watchdog.c
8432     +++ b/kernel/watchdog.c
8433     @@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
8434     * both lockup detectors are disabled if proc_watchdog_update()
8435     * returns an error.
8436     */
8437     + if (old == new)
8438     + goto out;
8439     +
8440     err = proc_watchdog_update();
8441     }
8442     out:
8443     @@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
8444     int proc_watchdog_thresh(struct ctl_table *table, int write,
8445     void __user *buffer, size_t *lenp, loff_t *ppos)
8446     {
8447     - int err, old;
8448     + int err, old, new;
8449    
8450     get_online_cpus();
8451     mutex_lock(&watchdog_proc_mutex);
8452     @@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
8453     /*
8454     * Update the sample period. Restore on failure.
8455     */
8456     + new = ACCESS_ONCE(watchdog_thresh);
8457     + if (old == new)
8458     + goto out;
8459     +
8460     set_sample_period();
8461     err = proc_watchdog_update();
8462     if (err) {
8463     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
8464     index d06cae2de783..caf3bf73b533 100644
8465     --- a/mm/memcontrol.c
8466     +++ b/mm/memcontrol.c
8467     @@ -1262,7 +1262,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
8468     return limit;
8469     }
8470    
8471     -static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
8472     +static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
8473     int order)
8474     {
8475     struct oom_control oc = {
8476     @@ -1340,6 +1340,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
8477     }
8478     unlock:
8479     mutex_unlock(&oom_lock);
8480     + return chosen;
8481     }
8482    
8483     #if MAX_NUMNODES > 1
8484     @@ -5051,6 +5052,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
8485     char *buf, size_t nbytes, loff_t off)
8486     {
8487     struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8488     + unsigned long nr_pages;
8489     unsigned long high;
8490     int err;
8491    
8492     @@ -5061,6 +5063,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
8493    
8494     memcg->high = high;
8495    
8496     + nr_pages = page_counter_read(&memcg->memory);
8497     + if (nr_pages > high)
8498     + try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
8499     + GFP_KERNEL, true);
8500     +
8501     memcg_wb_domain_size_changed(memcg);
8502     return nbytes;
8503     }
8504     @@ -5082,6 +5089,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
8505     char *buf, size_t nbytes, loff_t off)
8506     {
8507     struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8508     + unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
8509     + bool drained = false;
8510     unsigned long max;
8511     int err;
8512    
8513     @@ -5090,9 +5099,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
8514     if (err)
8515     return err;
8516    
8517     - err = mem_cgroup_resize_limit(memcg, max);
8518     - if (err)
8519     - return err;
8520     + xchg(&memcg->memory.limit, max);
8521     +
8522     + for (;;) {
8523     + unsigned long nr_pages = page_counter_read(&memcg->memory);
8524     +
8525     + if (nr_pages <= max)
8526     + break;
8527     +
8528     + if (signal_pending(current)) {
8529     + err = -EINTR;
8530     + break;
8531     + }
8532     +
8533     + if (!drained) {
8534     + drain_all_stock(memcg);
8535     + drained = true;
8536     + continue;
8537     + }
8538     +
8539     + if (nr_reclaims) {
8540     + if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
8541     + GFP_KERNEL, true))
8542     + nr_reclaims--;
8543     + continue;
8544     + }
8545     +
8546     + mem_cgroup_events(memcg, MEMCG_OOM, 1);
8547     + if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
8548     + break;
8549     + }
8550    
8551     memcg_wb_domain_size_changed(memcg);
8552     return nbytes;
8553     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
8554     index 838ca8bb64f7..9d9044e91ac5 100644
8555     --- a/mm/page_alloc.c
8556     +++ b/mm/page_alloc.c
8557     @@ -660,34 +660,28 @@ static inline void __free_one_page(struct page *page,
8558     unsigned long combined_idx;
8559     unsigned long uninitialized_var(buddy_idx);
8560     struct page *buddy;
8561     - unsigned int max_order = MAX_ORDER;
8562     + unsigned int max_order;
8563     +
8564     + max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
8565    
8566     VM_BUG_ON(!zone_is_initialized(zone));
8567     VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
8568    
8569     VM_BUG_ON(migratetype == -1);
8570     - if (is_migrate_isolate(migratetype)) {
8571     - /*
8572     - * We restrict max order of merging to prevent merge
8573     - * between freepages on isolate pageblock and normal
8574     - * pageblock. Without this, pageblock isolation
8575     - * could cause incorrect freepage accounting.
8576     - */
8577     - max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
8578     - } else {
8579     + if (likely(!is_migrate_isolate(migratetype)))
8580     __mod_zone_freepage_state(zone, 1 << order, migratetype);
8581     - }
8582    
8583     - page_idx = pfn & ((1 << max_order) - 1);
8584     + page_idx = pfn & ((1 << MAX_ORDER) - 1);
8585    
8586     VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
8587     VM_BUG_ON_PAGE(bad_range(zone, page), page);
8588    
8589     +continue_merging:
8590     while (order < max_order - 1) {
8591     buddy_idx = __find_buddy_index(page_idx, order);
8592     buddy = page + (buddy_idx - page_idx);
8593     if (!page_is_buddy(page, buddy, order))
8594     - break;
8595     + goto done_merging;
8596     /*
8597     * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
8598     * merge with it and move up one order.
8599     @@ -704,6 +698,32 @@ static inline void __free_one_page(struct page *page,
8600     page_idx = combined_idx;
8601     order++;
8602     }
8603     + if (max_order < MAX_ORDER) {
8604     + /* If we are here, it means order is >= pageblock_order.
8605     + * We want to prevent merge between freepages on isolate
8606     + * pageblock and normal pageblock. Without this, pageblock
8607     + * isolation could cause incorrect freepage or CMA accounting.
8608     + *
8609     + * We don't want to hit this code for the more frequent
8610     + * low-order merging.
8611     + */
8612     + if (unlikely(has_isolate_pageblock(zone))) {
8613     + int buddy_mt;
8614     +
8615     + buddy_idx = __find_buddy_index(page_idx, order);
8616     + buddy = page + (buddy_idx - page_idx);
8617     + buddy_mt = get_pageblock_migratetype(buddy);
8618     +
8619     + if (migratetype != buddy_mt
8620     + && (is_migrate_isolate(migratetype) ||
8621     + is_migrate_isolate(buddy_mt)))
8622     + goto done_merging;
8623     + }
8624     + max_order++;
8625     + goto continue_merging;
8626     + }
8627     +
8628     +done_merging:
8629     set_page_order(page, order);
8630    
8631     /*
8632     diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
8633     index 5a5089cb6570..1363b8ffd89c 100644
8634     --- a/net/bluetooth/mgmt.c
8635     +++ b/net/bluetooth/mgmt.c
8636     @@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8637     return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8638     MGMT_STATUS_INVALID_PARAMS);
8639    
8640     + if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8641     + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8642     + MGMT_STATUS_INVALID_PARAMS);
8643     +
8644     flags = __le32_to_cpu(cp->flags);
8645     timeout = __le16_to_cpu(cp->timeout);
8646     duration = __le16_to_cpu(cp->duration);
8647     diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
8648     index f085f5968c52..ce8cc9c006e5 100644
8649     --- a/scripts/coccinelle/iterators/use_after_iter.cocci
8650     +++ b/scripts/coccinelle/iterators/use_after_iter.cocci
8651     @@ -123,7 +123,7 @@ list_remove_head(x,c,...)
8652     |
8653     sizeof(<+...c...+>)
8654     |
8655     -&c->member
8656     + &c->member
8657     |
8658     c = E
8659     |
8660     diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
8661     index 25db8cff44a2..0a35d6dbfb80 100644
8662     --- a/scripts/gdb/linux/modules.py
8663     +++ b/scripts/gdb/linux/modules.py
8664     @@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
8665     " " if utils.get_long_type().sizeof == 8 else ""))
8666    
8667     for module in module_list():
8668     + layout = module['core_layout']
8669     gdb.write("{address} {name:<19} {size:>8} {ref}".format(
8670     - address=str(module['module_core']).split()[0],
8671     + address=str(layout['base']).split()[0],
8672     name=module['name'].string(),
8673     - size=str(module['core_size']),
8674     + size=str(layout['size']),
8675     ref=str(module['refcnt']['counter'])))
8676    
8677     source_list = module['source_list']
8678     diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
8679     index 627750cb420d..9a0f8923f67c 100644
8680     --- a/scripts/gdb/linux/symbols.py
8681     +++ b/scripts/gdb/linux/symbols.py
8682     @@ -108,7 +108,7 @@ lx-symbols command."""
8683    
8684     def load_module_symbols(self, module):
8685     module_name = module['name'].string()
8686     - module_addr = str(module['module_core']).split()[0]
8687     + module_addr = str(module['core_layout']['base']).split()[0]
8688    
8689     module_file = self._get_module_file(module_name)
8690     if not module_file and not self.module_files_updated:
8691     diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
8692     index d79cba4ce3eb..ebced77deb9c 100644
8693     --- a/scripts/kconfig/Makefile
8694     +++ b/scripts/kconfig/Makefile
8695     @@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
8696     defconfig: $(obj)/conf
8697     ifeq ($(KBUILD_DEFCONFIG),)
8698     $< $(silent) --defconfig $(Kconfig)
8699     -else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
8700     +else
8701     +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
8702     @$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
8703     $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
8704     else
8705     @$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
8706     $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
8707     endif
8708     +endif
8709    
8710     %_defconfig: $(obj)/conf
8711     $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
8712     diff --git a/scripts/package/mkspec b/scripts/package/mkspec
8713     index 71004daefe31..fe44d68e9344 100755
8714     --- a/scripts/package/mkspec
8715     +++ b/scripts/package/mkspec
8716     @@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
8717     echo ""
8718     echo "%post"
8719     echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
8720     -echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
8721     -echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
8722     +echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
8723     +echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
8724     echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
8725     -echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
8726     -echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
8727     +echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
8728     +echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
8729     echo "fi"
8730     echo ""
8731     echo "%files"
8732     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
8733     index 6b5a811e01a5..3a9b66c6e09c 100644
8734     --- a/sound/core/pcm_lib.c
8735     +++ b/sound/core/pcm_lib.c
8736     @@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
8737     char name[16];
8738     snd_pcm_debug_name(substream, name, sizeof(name));
8739     pcm_err(substream->pcm,
8740     - "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
8741     + "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
8742     name, pos, runtime->buffer_size,
8743     runtime->period_size);
8744     }
8745     diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
8746     index e361024eabb6..d1a4d6973330 100644
8747     --- a/sound/hda/hdac_device.c
8748     +++ b/sound/hda/hdac_device.c
8749     @@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
8750     }
8751     EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
8752    
8753     +/* like snd_hdac_power_up_pm(), but only increment the pm count when
8754     + * already powered up. Returns -1 if not powered up, 1 if incremented
8755     + * or 0 if unchanged. Only used in hdac_regmap.c
8756     + */
8757     +int snd_hdac_keep_power_up(struct hdac_device *codec)
8758     +{
8759     + if (!atomic_inc_not_zero(&codec->in_pm)) {
8760     + int ret = pm_runtime_get_if_in_use(&codec->dev);
8761     + if (!ret)
8762     + return -1;
8763     + if (ret < 0)
8764     + return 0;
8765     + }
8766     + return 1;
8767     +}
8768     +
8769     /**
8770     * snd_hdac_power_down_pm - power down the codec
8771     * @codec: the codec object
8772     diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
8773     index eb8f7c30cb09..bdbcd6b75ff6 100644
8774     --- a/sound/hda/hdac_regmap.c
8775     +++ b/sound/hda/hdac_regmap.c
8776     @@ -21,13 +21,16 @@
8777     #include <sound/hdaudio.h>
8778     #include <sound/hda_regmap.h>
8779    
8780     -#ifdef CONFIG_PM
8781     -#define codec_is_running(codec) \
8782     - (atomic_read(&(codec)->in_pm) || \
8783     - !pm_runtime_suspended(&(codec)->dev))
8784     -#else
8785     -#define codec_is_running(codec) true
8786     -#endif
8787     +static int codec_pm_lock(struct hdac_device *codec)
8788     +{
8789     + return snd_hdac_keep_power_up(codec);
8790     +}
8791     +
8792     +static void codec_pm_unlock(struct hdac_device *codec, int lock)
8793     +{
8794     + if (lock == 1)
8795     + snd_hdac_power_down_pm(codec);
8796     +}
8797    
8798     #define get_verb(reg) (((reg) >> 8) & 0xfff)
8799    
8800     @@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
8801     struct hdac_device *codec = context;
8802     int verb = get_verb(reg);
8803     int err;
8804     + int pm_lock = 0;
8805    
8806     - if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
8807     - return -EAGAIN;
8808     + if (verb != AC_VERB_GET_POWER_STATE) {
8809     + pm_lock = codec_pm_lock(codec);
8810     + if (pm_lock < 0)
8811     + return -EAGAIN;
8812     + }
8813     reg |= (codec->addr << 28);
8814     - if (is_stereo_amp_verb(reg))
8815     - return hda_reg_read_stereo_amp(codec, reg, val);
8816     - if (verb == AC_VERB_GET_PROC_COEF)
8817     - return hda_reg_read_coef(codec, reg, val);
8818     + if (is_stereo_amp_verb(reg)) {
8819     + err = hda_reg_read_stereo_amp(codec, reg, val);
8820     + goto out;
8821     + }
8822     + if (verb == AC_VERB_GET_PROC_COEF) {
8823     + err = hda_reg_read_coef(codec, reg, val);
8824     + goto out;
8825     + }
8826     if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
8827     reg &= ~AC_AMP_FAKE_MUTE;
8828    
8829     err = snd_hdac_exec_verb(codec, reg, 0, val);
8830     if (err < 0)
8831     - return err;
8832     + goto out;
8833     /* special handling for asymmetric reads */
8834     if (verb == AC_VERB_GET_POWER_STATE) {
8835     if (*val & AC_PWRST_ERROR)
8836     @@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
8837     else /* take only the actual state */
8838     *val = (*val >> 4) & 0x0f;
8839     }
8840     - return 0;
8841     + out:
8842     + codec_pm_unlock(codec, pm_lock);
8843     + return err;
8844     }
8845    
8846     static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
8847     @@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
8848     struct hdac_device *codec = context;
8849     unsigned int verb;
8850     int i, bytes, err;
8851     + int pm_lock = 0;
8852    
8853     if (codec->caps_overwriting)
8854     return 0;
8855     @@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
8856     reg |= (codec->addr << 28);
8857     verb = get_verb(reg);
8858    
8859     - if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
8860     - return codec->lazy_cache ? 0 : -EAGAIN;
8861     + if (verb != AC_VERB_SET_POWER_STATE) {
8862     + pm_lock = codec_pm_lock(codec);
8863     + if (pm_lock < 0)
8864     + return codec->lazy_cache ? 0 : -EAGAIN;
8865     + }
8866    
8867     - if (is_stereo_amp_verb(reg))
8868     - return hda_reg_write_stereo_amp(codec, reg, val);
8869     + if (is_stereo_amp_verb(reg)) {
8870     + err = hda_reg_write_stereo_amp(codec, reg, val);
8871     + goto out;
8872     + }
8873    
8874     - if (verb == AC_VERB_SET_PROC_COEF)
8875     - return hda_reg_write_coef(codec, reg, val);
8876     + if (verb == AC_VERB_SET_PROC_COEF) {
8877     + err = hda_reg_write_coef(codec, reg, val);
8878     + goto out;
8879     + }
8880    
8881     switch (verb & 0xf00) {
8882     case AC_VERB_SET_AMP_GAIN_MUTE:
8883     @@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
8884     reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
8885     err = snd_hdac_exec_verb(codec, reg, 0, NULL);
8886     if (err < 0)
8887     - return err;
8888     + goto out;
8889     }
8890    
8891     - return 0;
8892     + out:
8893     + codec_pm_unlock(codec, pm_lock);
8894     + return err;
8895     }
8896    
8897     static const struct regmap_config hda_regmap_cfg = {
8898     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
8899     index c1c855a6c0af..a47e8ae0eb30 100644
8900     --- a/sound/pci/hda/patch_cirrus.c
8901     +++ b/sound/pci/hda/patch_cirrus.c
8902     @@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
8903     snd_hda_gen_update_outputs(codec);
8904    
8905     if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
8906     - spec->gpio_data = spec->gen.hp_jack_present ?
8907     - spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
8908     + if (spec->gen.automute_speaker)
8909     + spec->gpio_data = spec->gen.hp_jack_present ?
8910     + spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
8911     + else
8912     + spec->gpio_data =
8913     + spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
8914     snd_hda_codec_write(codec, 0x01, 0,
8915     AC_VERB_SET_GPIO_DATA, spec->gpio_data);
8916     }
8917     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
8918     index 6122b8ca872f..56fefbd85782 100644
8919     --- a/sound/pci/hda/patch_conexant.c
8920     +++ b/sound/pci/hda/patch_conexant.c
8921     @@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
8922     {
8923     struct conexant_spec *spec = codec->spec;
8924    
8925     - if (codec->core.vendor_id != 0x14f150f2)
8926     + switch (codec->core.vendor_id) {
8927     + case 0x14f150f2: /* CX20722 */
8928     + case 0x14f150f4: /* CX20724 */
8929     + break;
8930     + default:
8931     return;
8932     + }
8933    
8934     /* Turn the CX20722 codec into D3 to avoid spurious noises
8935     from the internal speaker during (and after) reboot */
8936     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
8937     index bcbc4ee10130..e68fa449ebef 100644
8938     --- a/sound/pci/hda/patch_hdmi.c
8939     +++ b/sound/pci/hda/patch_hdmi.c
8940     @@ -152,13 +152,17 @@ struct hdmi_spec {
8941     struct hda_pcm_stream pcm_playback;
8942    
8943     /* i915/powerwell (Haswell+/Valleyview+) specific */
8944     + bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
8945     struct i915_audio_component_audio_ops i915_audio_ops;
8946     bool i915_bound; /* was i915 bound in this driver? */
8947     };
8948    
8949     #ifdef CONFIG_SND_HDA_I915
8950     -#define codec_has_acomp(codec) \
8951     - ((codec)->bus->core.audio_component != NULL)
8952     +static inline bool codec_has_acomp(struct hda_codec *codec)
8953     +{
8954     + struct hdmi_spec *spec = codec->spec;
8955     + return spec->use_acomp_notifier;
8956     +}
8957     #else
8958     #define codec_has_acomp(codec) false
8959     #endif
8960     @@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
8961     eld->eld_size) != 0)
8962     eld_changed = true;
8963    
8964     + pin_eld->monitor_present = eld->monitor_present;
8965     pin_eld->eld_valid = eld->eld_valid;
8966     pin_eld->eld_size = eld->eld_size;
8967     if (eld->eld_valid)
8968     @@ -1665,11 +1670,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
8969     int size;
8970    
8971     mutex_lock(&per_pin->lock);
8972     + eld->monitor_present = false;
8973     size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
8974     &eld->monitor_present, eld->eld_buffer,
8975     ELD_MAX_SIZE);
8976     - if (size < 0)
8977     - goto unlock;
8978     if (size > 0) {
8979     size = min(size, ELD_MAX_SIZE);
8980     if (snd_hdmi_parse_eld(codec, &eld->info,
8981     @@ -1873,7 +1877,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
8982    
8983     /* Call sync_audio_rate to set the N/CTS/M manually if necessary */
8984     /* Todo: add DP1.2 MST audio support later */
8985     - snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
8986     + if (codec_has_acomp(codec))
8987     + snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
8988    
8989     non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
8990     mutex_lock(&per_pin->lock);
8991     @@ -2432,6 +2437,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
8992     struct hda_codec *codec = audio_ptr;
8993     int pin_nid = port + 0x04;
8994    
8995     + /* we assume only from port-B to port-D */
8996     + if (port < 1 || port > 3)
8997     + return;
8998     +
8999     /* skip notification during system suspend (but not in runtime PM);
9000     * the state will be updated at resume
9001     */
9002     @@ -2456,11 +2465,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
9003     codec->spec = spec;
9004     hdmi_array_init(spec, 4);
9005    
9006     - /* Try to bind with i915 for any Intel codecs (if not done yet) */
9007     - if (!codec_has_acomp(codec) &&
9008     - (codec->core.vendor_id >> 16) == 0x8086)
9009     - if (!snd_hdac_i915_init(&codec->bus->core))
9010     - spec->i915_bound = true;
9011     +#ifdef CONFIG_SND_HDA_I915
9012     + /* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
9013     + if ((codec->core.vendor_id >> 16) == 0x8086 &&
9014     + is_haswell_plus(codec)) {
9015     +#if 0
9016     + /* on-demand binding leads to an unbalanced refcount when
9017     + * both i915 and hda drivers are probed concurrently;
9018     + * disabled temporarily for now
9019     + */
9020     + if (!codec->bus->core.audio_component)
9021     + if (!snd_hdac_i915_init(&codec->bus->core))
9022     + spec->i915_bound = true;
9023     +#endif
9024     + /* use i915 audio component notifier for hotplug */
9025     + if (codec->bus->core.audio_component)
9026     + spec->use_acomp_notifier = true;
9027     + }
9028     +#endif
9029    
9030     if (is_haswell_plus(codec)) {
9031     intel_haswell_enable_all_pins(codec, true);
9032     @@ -3659,6 +3681,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
9033     HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
9034     HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
9035     HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
9036     +HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
9037     HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
9038     HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
9039     HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
9040     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9041     index 93d2156b6241..4f5ca0b9ce27 100644
9042     --- a/sound/pci/hda/patch_realtek.c
9043     +++ b/sound/pci/hda/patch_realtek.c
9044     @@ -5556,6 +5556,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9045     SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
9046     SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
9047     SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
9048     + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
9049     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
9050     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
9051     SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
9052     diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
9053     index 42bcbac801a3..ccdab29a8b66 100644
9054     --- a/sound/pci/intel8x0.c
9055     +++ b/sound/pci/intel8x0.c
9056     @@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
9057    
9058     static struct snd_pci_quirk intel8x0_clock_list[] = {
9059     SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
9060     + SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
9061     SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
9062     SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
9063     SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
9064     diff --git a/sound/usb/clock.c b/sound/usb/clock.c
9065     index 2ed260b10f6d..7ccbcaf6a147 100644
9066     --- a/sound/usb/clock.c
9067     +++ b/sound/usb/clock.c
9068     @@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
9069     unsigned char data[3];
9070     int err, crate;
9071    
9072     + if (get_iface_desc(alts)->bNumEndpoints < 1)
9073     + return -EINVAL;
9074     ep = get_endpoint(alts, 0)->bEndpointAddress;
9075    
9076     /* if endpoint doesn't have sampling rate control, bail out */
9077     diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
9078     index 7b1cb365ffab..c07a7eda42a2 100644
9079     --- a/sound/usb/endpoint.c
9080     +++ b/sound/usb/endpoint.c
9081     @@ -438,6 +438,9 @@ exit_clear:
9082     *
9083     * New endpoints will be added to chip->ep_list and must be freed by
9084     * calling snd_usb_endpoint_free().
9085     + *
9086     + * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
9087     + * bNumEndpoints > 1 beforehand.
9088     */
9089     struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
9090     struct usb_host_interface *alts,
9091     diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
9092     index 279025650568..f6c3bf79af9a 100644
9093     --- a/sound/usb/mixer_quirks.c
9094     +++ b/sound/usb/mixer_quirks.c
9095     @@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
9096    
9097     /* use known values for that card: interface#1 altsetting#1 */
9098     iface = usb_ifnum_to_if(chip->dev, 1);
9099     + if (!iface || iface->num_altsetting < 2)
9100     + return -EINVAL;
9101     alts = &iface->altsetting[1];
9102     + if (get_iface_desc(alts)->bNumEndpoints < 1)
9103     + return -EINVAL;
9104     ep = get_endpoint(alts, 0)->bEndpointAddress;
9105    
9106     err = snd_usb_ctl_msg(chip->dev,
9107     diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
9108     index 9245f52d43bd..44d178ee9177 100644
9109     --- a/sound/usb/pcm.c
9110     +++ b/sound/usb/pcm.c
9111     @@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
9112     unsigned char data[1];
9113     int err;
9114    
9115     + if (get_iface_desc(alts)->bNumEndpoints < 1)
9116     + return -EINVAL;
9117     ep = get_endpoint(alts, 0)->bEndpointAddress;
9118    
9119     data[0] = 1;
9120     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
9121     index c458d60d5030..cd7eac28edee 100644
9122     --- a/sound/usb/quirks.c
9123     +++ b/sound/usb/quirks.c
9124     @@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
9125     usb_audio_err(chip, "cannot memdup\n");
9126     return -ENOMEM;
9127     }
9128     + INIT_LIST_HEAD(&fp->list);
9129     if (fp->nr_rates > MAX_NR_RATES) {
9130     kfree(fp);
9131     return -EINVAL;
9132     @@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
9133     stream = (fp->endpoint & USB_DIR_IN)
9134     ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
9135     err = snd_usb_add_audio_stream(chip, stream, fp);
9136     - if (err < 0) {
9137     - kfree(fp);
9138     - kfree(rate_table);
9139     - return err;
9140     - }
9141     + if (err < 0)
9142     + goto error;
9143     if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
9144     fp->altset_idx >= iface->num_altsetting) {
9145     - kfree(fp);
9146     - kfree(rate_table);
9147     - return -EINVAL;
9148     + err = -EINVAL;
9149     + goto error;
9150     }
9151     alts = &iface->altsetting[fp->altset_idx];
9152     altsd = get_iface_desc(alts);
9153     + if (altsd->bNumEndpoints < 1) {
9154     + err = -EINVAL;
9155     + goto error;
9156     + }
9157     +
9158     fp->protocol = altsd->bInterfaceProtocol;
9159    
9160     if (fp->datainterval == 0)
9161     @@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
9162     snd_usb_init_pitch(chip, fp->iface, alts, fp);
9163     snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
9164     return 0;
9165     +
9166     + error:
9167     + list_del(&fp->list); /* unlink for avoiding double-free */
9168     + kfree(fp);
9169     + kfree(rate_table);
9170     + return err;
9171     }
9172    
9173     static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
9174     @@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
9175     fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
9176     fp->datainterval = 0;
9177     fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
9178     + INIT_LIST_HEAD(&fp->list);
9179    
9180     switch (fp->maxpacksize) {
9181     case 0x120:
9182     @@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
9183     ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
9184     err = snd_usb_add_audio_stream(chip, stream, fp);
9185     if (err < 0) {
9186     + list_del(&fp->list); /* unlink for avoiding double-free */
9187     kfree(fp);
9188     return err;
9189     }
9190     @@ -1121,6 +1131,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
9191     switch (chip->usb_id) {
9192     case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
9193     case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
9194     + case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
9195     case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
9196     case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
9197     case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
9198     diff --git a/sound/usb/stream.c b/sound/usb/stream.c
9199     index c4dc577ab1bd..8e9548bc1f1a 100644
9200     --- a/sound/usb/stream.c
9201     +++ b/sound/usb/stream.c
9202     @@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
9203     /*
9204     * add this endpoint to the chip instance.
9205     * if a stream with the same endpoint already exists, append to it.
9206     - * if not, create a new pcm stream.
9207     + * if not, create a new pcm stream. note, fp is added to the substream
9208     + * fmt_list and will be freed on the chip instance release. do not free
9209     + * fp or do remove it from the substream fmt_list to avoid double-free.
9210     */
9211     int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
9212     int stream,
9213     @@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
9214     * (fp->maxpacksize & 0x7ff);
9215     fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
9216     fp->clock = clock;
9217     + INIT_LIST_HEAD(&fp->list);
9218    
9219     /* some quirks for attributes here */
9220    
9221     @@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
9222     dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
9223     err = snd_usb_add_audio_stream(chip, stream, fp);
9224     if (err < 0) {
9225     + list_del(&fp->list); /* unlink for avoiding double-free */
9226     kfree(fp->rate_table);
9227     kfree(fp->chmap);
9228     kfree(fp);
9229     diff --git a/tools/hv/Makefile b/tools/hv/Makefile
9230     index a8ab79556926..a8c4644022a6 100644
9231     --- a/tools/hv/Makefile
9232     +++ b/tools/hv/Makefile
9233     @@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
9234     WARNINGS = -Wall -Wextra
9235     CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
9236    
9237     +CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
9238     +
9239     all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
9240     %: %.c
9241     $(CC) $(CFLAGS) -o $@ $^
9242     diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
9243     index 813d9b272c81..48a1c5e7dc0d 100644
9244     --- a/tools/perf/util/parse-events.c
9245     +++ b/tools/perf/util/parse-events.c
9246     @@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
9247    
9248     /* valid terms */
9249     if (additional_terms) {
9250     - if (!asprintf(&str, "valid terms: %s,%s",
9251     - additional_terms, static_terms))
9252     + if (asprintf(&str, "valid terms: %s,%s",
9253     + additional_terms, static_terms) < 0)
9254     goto fail;
9255     } else {
9256     - if (!asprintf(&str, "valid terms: %s", static_terms))
9257     + if (asprintf(&str, "valid terms: %s", static_terms) < 0)
9258     goto fail;
9259     }
9260     return str;
9261     diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
9262     index b597bcc8fc78..37b4f5070353 100644
9263     --- a/tools/perf/util/pmu.c
9264     +++ b/tools/perf/util/pmu.c
9265     @@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
9266     {
9267     struct dirent *evt_ent;
9268     DIR *event_dir;
9269     - int ret = 0;
9270    
9271     event_dir = opendir(dir);
9272     if (!event_dir)
9273     return -EINVAL;
9274    
9275     - while (!ret && (evt_ent = readdir(event_dir))) {
9276     + while ((evt_ent = readdir(event_dir))) {
9277     char path[PATH_MAX];
9278     char *name = evt_ent->d_name;
9279     FILE *file;
9280     @@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
9281    
9282     snprintf(path, PATH_MAX, "%s/%s", dir, name);
9283    
9284     - ret = -EINVAL;
9285     file = fopen(path, "r");
9286     - if (!file)
9287     - break;
9288     + if (!file) {
9289     + pr_debug("Cannot open %s\n", path);
9290     + continue;
9291     + }
9292    
9293     - ret = perf_pmu__new_alias(head, dir, name, file);
9294     + if (perf_pmu__new_alias(head, dir, name, file) < 0)
9295     + pr_debug("Cannot set up %s\n", name);
9296     fclose(file);
9297     }
9298    
9299     closedir(event_dir);
9300     - return ret;
9301     + return 0;
9302     }
9303    
9304     /*
9305     diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
9306     index 1833103768cb..c8680984d2d6 100644
9307     --- a/tools/perf/util/setup.py
9308     +++ b/tools/perf/util/setup.py
9309     @@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
9310     # switch off several checks (need to be at the end of cflags list)
9311     cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
9312    
9313     +src_perf = getenv('srctree') + '/tools/perf'
9314     build_lib = getenv('PYTHON_EXTBUILD_LIB')
9315     build_tmp = getenv('PYTHON_EXTBUILD_TMP')
9316     libtraceevent = getenv('LIBTRACEEVENT')
9317     @@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
9318     ext_sources = [f.strip() for f in file('util/python-ext-sources')
9319     if len(f.strip()) > 0 and f[0] != '#']
9320    
9321     +# use full paths with source files
9322     +ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
9323     +
9324     perf = Extension('perf',
9325     sources = ext_sources,
9326     include_dirs = ['util/include'],
9327     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9328     index 9102ae172d2a..298473707f17 100644
9329     --- a/virt/kvm/kvm_main.c
9330     +++ b/virt/kvm/kvm_main.c
9331     @@ -537,6 +537,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
9332     if (!kvm)
9333     return ERR_PTR(-ENOMEM);
9334    
9335     + spin_lock_init(&kvm->mmu_lock);
9336     + atomic_inc(&current->mm->mm_count);
9337     + kvm->mm = current->mm;
9338     + kvm_eventfd_init(kvm);
9339     + mutex_init(&kvm->lock);
9340     + mutex_init(&kvm->irq_lock);
9341     + mutex_init(&kvm->slots_lock);
9342     + atomic_set(&kvm->users_count, 1);
9343     + INIT_LIST_HEAD(&kvm->devices);
9344     +
9345     r = kvm_arch_init_vm(kvm, type);
9346     if (r)
9347     goto out_err_no_disable;
9348     @@ -569,16 +579,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
9349     goto out_err;
9350     }
9351    
9352     - spin_lock_init(&kvm->mmu_lock);
9353     - kvm->mm = current->mm;
9354     - atomic_inc(&kvm->mm->mm_count);
9355     - kvm_eventfd_init(kvm);
9356     - mutex_init(&kvm->lock);
9357     - mutex_init(&kvm->irq_lock);
9358     - mutex_init(&kvm->slots_lock);
9359     - atomic_set(&kvm->users_count, 1);
9360     - INIT_LIST_HEAD(&kvm->devices);
9361     -
9362     r = kvm_init_mmu_notifier(kvm);
9363     if (r)
9364     goto out_err;
9365     @@ -603,6 +603,7 @@ out_err_no_disable:
9366     for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
9367     kvm_free_memslots(kvm, kvm->memslots[i]);
9368     kvm_arch_free_vm(kvm);
9369     + mmdrop(current->mm);
9370     return ERR_PTR(r);
9371     }
9372