Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0103-4.1.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 3 months ago) by niro
File size: 392028 byte(s)
-linux-4.1 patches up to 4.1.15
1 diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
2 index d0d0c578324c..0a378a88217a 100644
3 --- a/Documentation/ABI/testing/ima_policy
4 +++ b/Documentation/ABI/testing/ima_policy
5 @@ -20,17 +20,19 @@ Description:
6 action: measure | dont_measure | appraise | dont_appraise | audit
7 condition:= base | lsm [option]
8 base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
9 - [fowner]]
10 + [euid=] [fowner=]]
11 lsm: [[subj_user=] [subj_role=] [subj_type=]
12 [obj_user=] [obj_role=] [obj_type=]]
13 option: [[appraise_type=]] [permit_directio]
14
15 base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
16 [FIRMWARE_CHECK]
17 - mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
18 + mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
19 + [[^]MAY_EXEC]
20 fsmagic:= hex value
21 fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
22 uid:= decimal value
23 + euid:= decimal value
24 fowner:=decimal value
25 lsm: are LSM specific
26 option: appraise_type:= [imasig]
27 @@ -49,11 +51,25 @@ Description:
28 dont_measure fsmagic=0x01021994
29 dont_appraise fsmagic=0x01021994
30 # RAMFS_MAGIC
31 - dont_measure fsmagic=0x858458f6
32 dont_appraise fsmagic=0x858458f6
33 + # DEVPTS_SUPER_MAGIC
34 + dont_measure fsmagic=0x1cd1
35 + dont_appraise fsmagic=0x1cd1
36 + # BINFMTFS_MAGIC
37 + dont_measure fsmagic=0x42494e4d
38 + dont_appraise fsmagic=0x42494e4d
39 # SECURITYFS_MAGIC
40 dont_measure fsmagic=0x73636673
41 dont_appraise fsmagic=0x73636673
42 + # SELINUX_MAGIC
43 + dont_measure fsmagic=0xf97cff8c
44 + dont_appraise fsmagic=0xf97cff8c
45 + # CGROUP_SUPER_MAGIC
46 + dont_measure fsmagic=0x27e0eb
47 + dont_appraise fsmagic=0x27e0eb
48 + # NSFS_MAGIC
49 + dont_measure fsmagic=0x6e736673
50 + dont_appraise fsmagic=0x6e736673
51
52 measure func=BPRM_CHECK
53 measure func=FILE_MMAP mask=MAY_EXEC
54 @@ -70,10 +86,6 @@ Description:
55 Examples of LSM specific definitions:
56
57 SELinux:
58 - # SELINUX_MAGIC
59 - dont_measure fsmagic=0xf97cff8c
60 - dont_appraise fsmagic=0xf97cff8c
61 -
62 dont_measure obj_type=var_log_t
63 dont_appraise obj_type=var_log_t
64 dont_measure obj_type=auditd_log_t
65 diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata
66 index 0a932155cbba..9231daef3813 100644
67 --- a/Documentation/ABI/testing/sysfs-ata
68 +++ b/Documentation/ABI/testing/sysfs-ata
69 @@ -90,6 +90,17 @@ gscr
70 130: SATA_PMP_GSCR_SII_GPIO
71 Only valid if the device is a PM.
72
73 +trim
74 +
75 + Shows the DSM TRIM mode currently used by the device. Valid
76 + values are:
77 + unsupported: Drive does not support DSM TRIM
78 + unqueued: Drive supports unqueued DSM TRIM only
79 + queued: Drive supports queued DSM TRIM
80 + forced_unqueued: Drive's unqueued DSM support is known to be
81 + buggy and only unqueued TRIM commands
82 + are sent
83 +
84 spdn_cnt
85
86 Number of time libata decided to lower the speed of link due to errors.
87 diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
88 index 3befcb19f414..1fbdd79d1624 100644
89 --- a/Documentation/ABI/testing/sysfs-bus-iio
90 +++ b/Documentation/ABI/testing/sysfs-bus-iio
91 @@ -1165,10 +1165,8 @@ Description:
92 object is near the sensor, usually be observing
93 reflectivity of infrared or ultrasound emitted.
94 Often these sensors are unit less and as such conversion
95 - to SI units is not possible. Where it is, the units should
96 - be meters. If such a conversion is not possible, the reported
97 - values should behave in the same way as a distance, i.e. lower
98 - values indicate something is closer to the sensor.
99 + to SI units is not possible. Higher proximity measurements
100 + indicate closer objects, and vice versa.
101
102 What: /sys/.../iio:deviceX/in_illuminance_input
103 What: /sys/.../iio:deviceX/in_illuminance_raw
104 diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
105 index adda2a8d1d52..e357b020861d 100644
106 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
107 +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
108 @@ -92,5 +92,5 @@ mpp61 61 gpo, dev(wen1), uart1(txd), audio(rclk)
109 mpp62 62 gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
110 audio(mclk), uart0(cts)
111 mpp63 63 gpo, spi0(sck), tclk
112 -mpp64 64 gpio, spi0(miso), spi0-1(cs1)
113 -mpp65 65 gpio, spi0(mosi), spi0-1(cs2)
114 +mpp64 64 gpio, spi0(miso), spi0(cs1)
115 +mpp65 65 gpio, spi0(mosi), spi0(cs2)
116 diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
117 index 7de0cda4a379..bedbe42c8c0a 100644
118 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
119 +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
120 @@ -22,8 +22,8 @@ mpp5 5 gpio, dev(ad7), spi0(cs2), spi1(cs2)
121 mpp6 6 gpio, dev(ad0), led(p1), audio(rclk)
122 mpp7 7 gpio, dev(ad1), ptp(clk), led(p2), audio(extclk)
123 mpp8 8 gpio, dev (bootcs), spi0(cs0), spi1(cs0)
124 -mpp9 9 gpio, nf(wen), spi0(sck), spi1(sck)
125 -mpp10 10 gpio, nf(ren), dram(vttctrl), led(c1)
126 +mpp9 9 gpio, spi0(sck), spi1(sck), nand(we)
127 +mpp10 10 gpio, dram(vttctrl), led(c1), nand(re)
128 mpp11 11 gpio, dev(a0), led(c2), audio(sdo)
129 mpp12 12 gpio, dev(a1), audio(bclk)
130 mpp13 13 gpio, dev(readyn), pcie0(rstoutn), pcie1(rstoutn)
131 diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
132 index b17c96849fc9..4ac138aaaf87 100644
133 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
134 +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
135 @@ -27,15 +27,15 @@ mpp8 8 gpio, ge0(txd1), dev(ad10)
136 mpp9 9 gpio, ge0(txd2), dev(ad11)
137 mpp10 10 gpio, ge0(txd3), dev(ad12)
138 mpp11 11 gpio, ge0(txctl), dev(ad13)
139 -mpp12 12 gpio, ge0(rxd0), pcie0(rstout), pcie1(rstout) [1], spi0(cs1), dev(ad14)
140 -mpp13 13 gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15)
141 -mpp14 14 gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1)
142 -mpp15 15 gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi), pcie1(rstout) [1]
143 -mpp16 16 gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq)
144 +mpp12 12 gpio, ge0(rxd0), pcie0(rstout), spi0(cs1), dev(ad14), pcie3(clkreq)
145 +mpp13 13 gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15), pcie2(clkreq)
146 +mpp14 14 gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1), pcie3(clkreq)
147 +mpp15 15 gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi)
148 +mpp16 16 gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq), pcie1(clkreq) [1]
149 mpp17 17 gpio, ge0(rxclk), ptp(clk), ua1(rxd), spi0(sck), sata1(prsnt)
150 -mpp18 18 gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0), pcie1(rstout) [1]
151 -mpp19 19 gpio, ge0(col), ptp(event_req), pcie0(clkreq), sata1(prsnt), ua0(cts)
152 -mpp20 20 gpio, ge0(txclk), ptp(clk), pcie1(rstout) [1], sata0(prsnt), ua0(rts)
153 +mpp18 18 gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0)
154 +mpp19 19 gpio, ge0(col), ptp(event_req), ge0(txerr), sata1(prsnt), ua0(cts)
155 +mpp20 20 gpio, ge0(txclk), ptp(clk), sata0(prsnt), ua0(rts)
156 mpp21 21 gpio, spi0(cs1), ge1(rxd0), sata0(prsnt), sd0(cmd), dev(bootcs)
157 mpp22 22 gpio, spi0(mosi), dev(ad0)
158 mpp23 23 gpio, spi0(sck), dev(ad2)
159 @@ -58,23 +58,23 @@ mpp39 39 gpio, i2c1(sck), ge1(rxd2), ua0(cts), sd0(d1), dev(a2)
160 mpp40 40 gpio, i2c1(sda), ge1(rxd3), ua0(rts), sd0(d2), dev(ad6)
161 mpp41 41 gpio, ua1(rxd), ge1(rxctl), ua0(cts), spi1(cs3), dev(burst/last)
162 mpp42 42 gpio, ua1(txd), ua0(rts), dev(ad7)
163 -mpp43 43 gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), pcie0(rstout), dev(clkout)
164 -mpp44 44 gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3], pcie0(rstout)
165 -mpp45 45 gpio, ref(clk_out0), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
166 -mpp46 46 gpio, ref(clk_out1), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
167 -mpp47 47 gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], spi1(cs2), sata3(prsnt) [2]
168 -mpp48 48 gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4)
169 -mpp49 49 gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5)
170 -mpp50 50 gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(drx), audio(extclk), sd0(cmd)
171 +mpp43 43 gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), spi1(cs2), dev(clkout)
172 +mpp44 44 gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3]
173 +mpp45 45 gpio, ref(clk_out0), pcie0(rstout)
174 +mpp46 46 gpio, ref(clk_out1), pcie0(rstout)
175 +mpp47 47 gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [2]
176 +mpp48 48 gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4), pcie0(clkreq)
177 +mpp49 49 gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5), pcie1(clkreq)
178 +mpp50 50 gpio, pcie0(rstout), tdm2c(drx), audio(extclk), sd0(cmd)
179 mpp51 51 gpio, tdm2c(dtx), audio(sdo), m(decc_err)
180 -mpp52 52 gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(intn), audio(sdi), sd0(d6)
181 +mpp52 52 gpio, pcie0(rstout), tdm2c(intn), audio(sdi), sd0(d6)
182 mpp53 53 gpio, sata1(prsnt), sata0(prsnt), tdm2c(rstn), audio(bclk), sd0(d7)
183 -mpp54 54 gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), pcie1(rstout) [1], sd0(d3)
184 +mpp54 54 gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), ge0(txerr), sd0(d3)
185 mpp55 55 gpio, ua1(cts), ge(mdio), pcie1(clkreq) [1], spi1(cs1), sd0(d0)
186 mpp56 56 gpio, ua1(rts), ge(mdc), m(decc_err), spi1(mosi)
187 mpp57 57 gpio, spi1(sck), sd0(clk)
188 mpp58 58 gpio, pcie1(clkreq) [1], i2c1(sck), pcie2(clkreq), spi1(miso), sd0(d1)
189 -mpp59 59 gpio, pcie0(rstout), i2c1(sda), pcie1(rstout) [1], spi1(cs0), sd0(d2)
190 +mpp59 59 gpio, pcie0(rstout), i2c1(sda), spi1(cs0), sd0(d2)
191
192 [1]: only available on 88F6820 and 88F6828
193 [2]: only available on 88F6828
194 diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
195 index 373dbccd7ab0..96e7744cab84 100644
196 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
197 +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
198 @@ -42,15 +42,15 @@ mpp20 20 gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
199 mpp21 21 gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
200 mpp22 22 gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
201 mpp23 23 gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
202 -mpp24 24 gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
203 -mpp25 25 gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
204 -mpp26 26 gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
205 +mpp24 24 gpio, lcd(hsync), sata1(prsnt), tdm(rst)
206 +mpp25 25 gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
207 +mpp26 26 gpio, lcd(clk), tdm(fsync)
208 mpp27 27 gpio, lcd(e), tdm(dtx), ptp(trig)
209 mpp28 28 gpio, lcd(pwm), tdm(drx), ptp(evreq)
210 -mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
211 +mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk)
212 mpp30 30 gpio, tdm(int1), sd0(clk)
213 -mpp31 31 gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
214 -mpp32 32 gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
215 +mpp31 31 gpio, tdm(int2), sd0(cmd)
216 +mpp32 32 gpio, tdm(int3), sd0(d0)
217 mpp33 33 gpio, tdm(int4), sd0(d1), mem(bat)
218 mpp34 34 gpio, tdm(int5), sd0(d2), sata0(prsnt)
219 mpp35 35 gpio, tdm(int6), sd0(d3), sata1(prsnt)
220 @@ -58,21 +58,18 @@ mpp36 36 gpio, spi(mosi)
221 mpp37 37 gpio, spi(miso)
222 mpp38 38 gpio, spi(sck)
223 mpp39 39 gpio, spi(cs0)
224 -mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
225 - pcie(clkreq0)
226 +mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
227 mpp41 41 gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
228 pcie(clkreq1)
229 -mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
230 - vdd(cpu0-pd)
231 -mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
232 - vdd(cpu2-3-pd){1}
233 +mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
234 +mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
235 mpp44 44 gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
236 mem(bat)
237 mpp45 45 gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
238 mpp46 46 gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
239 mpp47 47 gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
240 ref(clkout)
241 -mpp48 48 gpio, tclk, dev(burst/last)
242 +mpp48 48 gpio, dev(clkout), dev(burst/last)
243
244 * Marvell Armada XP (mv78260 and mv78460 only)
245
246 @@ -84,9 +81,9 @@ mpp51 51 gpio, dev(ad16)
247 mpp52 52 gpio, dev(ad17)
248 mpp53 53 gpio, dev(ad18)
249 mpp54 54 gpio, dev(ad19)
250 -mpp55 55 gpio, dev(ad20), vdd(cpu0-pd)
251 -mpp56 56 gpio, dev(ad21), vdd(cpu1-pd)
252 -mpp57 57 gpio, dev(ad22), vdd(cpu2-3-pd){1}
253 +mpp55 55 gpio, dev(ad20)
254 +mpp56 56 gpio, dev(ad21)
255 +mpp57 57 gpio, dev(ad22)
256 mpp58 58 gpio, dev(ad23)
257 mpp59 59 gpio, dev(ad24)
258 mpp60 60 gpio, dev(ad25)
259 @@ -96,6 +93,3 @@ mpp63 63 gpio, dev(ad28)
260 mpp64 64 gpio, dev(ad29)
261 mpp65 65 gpio, dev(ad30)
262 mpp66 66 gpio, dev(ad31)
263 -
264 -Notes:
265 -* {1} vdd(cpu2-3-pd) only available on mv78460.
266 diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
267 index e180d56c75db..de773a00e2d4 100644
268 --- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
269 +++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
270 @@ -60,9 +60,9 @@ Atmel High-Speed USB device controller
271
272 Required properties:
273 - compatible: Should be one of the following
274 - "at91sam9rl-udc"
275 - "at91sam9g45-udc"
276 - "sama5d3-udc"
277 + "atmel,at91sam9rl-udc"
278 + "atmel,at91sam9g45-udc"
279 + "atmel,sama5d3-udc"
280 - reg: Address and length of the register set for the device
281 - interrupts: Should contain usba interrupt
282 - ep childnode: To specify the number of endpoints and their properties.
283 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
284 index 6726139bd289..cd03a0faca8f 100644
285 --- a/Documentation/kernel-parameters.txt
286 +++ b/Documentation/kernel-parameters.txt
287 @@ -1398,7 +1398,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
288 The list of supported hash algorithms is defined
289 in crypto/hash_info.h.
290
291 - ima_tcb [IMA]
292 + ima_policy= [IMA]
293 + The builtin measurement policy to load during IMA
294 + setup. Specyfing "tcb" as the value, measures all
295 + programs exec'd, files mmap'd for exec, and all files
296 + opened with the read mode bit set by either the
297 + effective uid (euid=0) or uid=0.
298 + Format: "tcb"
299 +
300 + ima_tcb [IMA] Deprecated. Use ima_policy= instead.
301 Load a policy which meets the needs of the Trusted
302 Computing Base. This means IMA will measure all
303 programs exec'd, files mmap'd for exec, and all files
304 diff --git a/Makefile b/Makefile
305 index e3cdec4898be..36f3225cdf1f 100644
306 --- a/Makefile
307 +++ b/Makefile
308 @@ -1,6 +1,6 @@
309 VERSION = 4
310 PATCHLEVEL = 1
311 -SUBLEVEL = 3
312 +SUBLEVEL = 4
313 EXTRAVERSION =
314 NAME = Series 4800
315
316 diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
317 index 89ef4a540db5..45e7761b7a29 100644
318 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts
319 +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
320 @@ -108,8 +108,8 @@
321 mmc0: mmc@f8000000 {
322 pinctrl-names = "default";
323 pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_cd>;
324 - slot@1 {
325 - reg = <1>;
326 + slot@0 {
327 + reg = <0>;
328 bus-width = <4>;
329 cd-gpios = <&pioE 5 0>;
330 };
331 diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
332 index 70e59c5ceb2f..e54421176af8 100644
333 --- a/arch/arm/boot/dts/at91sam9g45.dtsi
334 +++ b/arch/arm/boot/dts/at91sam9g45.dtsi
335 @@ -1148,7 +1148,7 @@
336 usb2: gadget@fff78000 {
337 #address-cells = <1>;
338 #size-cells = <0>;
339 - compatible = "atmel,at91sam9rl-udc";
340 + compatible = "atmel,at91sam9g45-udc";
341 reg = <0x00600000 0x80000
342 0xfff78000 0x400>;
343 interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
344 diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
345 index 3aa56ae3410a..3314a7303754 100644
346 --- a/arch/arm/boot/dts/at91sam9x5.dtsi
347 +++ b/arch/arm/boot/dts/at91sam9x5.dtsi
348 @@ -1062,7 +1062,7 @@
349 usb2: gadget@f803c000 {
350 #address-cells = <1>;
351 #size-cells = <0>;
352 - compatible = "atmel,at91sam9rl-udc";
353 + compatible = "atmel,at91sam9g45-udc";
354 reg = <0x00500000 0x80000
355 0xf803c000 0x400>;
356 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
357 diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
358 index bbcfb5a19c77..0cb8b0b11c3f 100644
359 --- a/arch/arm/boot/dts/imx23.dtsi
360 +++ b/arch/arm/boot/dts/imx23.dtsi
361 @@ -435,6 +435,7 @@
362 interrupts = <36 37 38 39 40 41 42 43 44>;
363 status = "disabled";
364 clocks = <&clks 26>;
365 + #io-channel-cells = <1>;
366 };
367
368 spdif@80054000 {
369 diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
370 index 57ab8587f7b9..37e6182f1470 100644
371 --- a/arch/arm/boot/dts/sama5d3.dtsi
372 +++ b/arch/arm/boot/dts/sama5d3.dtsi
373 @@ -1321,7 +1321,7 @@
374 usb0: gadget@00500000 {
375 #address-cells = <1>;
376 #size-cells = <0>;
377 - compatible = "atmel,at91sam9rl-udc";
378 + compatible = "atmel,sama5d3-udc";
379 reg = <0x00500000 0x100000
380 0xf8030000 0x4000>;
381 interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
382 diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
383 index 6b1bb58f9c0b..a5f5f4090af6 100644
384 --- a/arch/arm/boot/dts/sama5d4.dtsi
385 +++ b/arch/arm/boot/dts/sama5d4.dtsi
386 @@ -123,7 +123,7 @@
387 usb0: gadget@00400000 {
388 #address-cells = <1>;
389 #size-cells = <0>;
390 - compatible = "atmel,at91sam9rl-udc";
391 + compatible = "atmel,sama5d3-udc";
392 reg = <0x00400000 0x100000
393 0xfc02c000 0x4000>;
394 interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>;
395 @@ -1125,10 +1125,10 @@
396 compatible = "atmel,at91sam9g46-aes";
397 reg = <0xfc044000 0x100>;
398 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
399 - dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
400 - AT91_XDMAC_DT_PERID(41)>,
401 - <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
402 - AT91_XDMAC_DT_PERID(40)>;
403 + dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
404 + | AT91_XDMAC_DT_PERID(41))>,
405 + <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
406 + | AT91_XDMAC_DT_PERID(40))>;
407 dma-names = "tx", "rx";
408 clocks = <&aes_clk>;
409 clock-names = "aes_clk";
410 @@ -1139,10 +1139,10 @@
411 compatible = "atmel,at91sam9g46-tdes";
412 reg = <0xfc04c000 0x100>;
413 interrupts = <14 IRQ_TYPE_LEVEL_HIGH 0>;
414 - dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
415 - AT91_XDMAC_DT_PERID(42)>,
416 - <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
417 - AT91_XDMAC_DT_PERID(43)>;
418 + dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
419 + | AT91_XDMAC_DT_PERID(42))>,
420 + <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
421 + | AT91_XDMAC_DT_PERID(43))>;
422 dma-names = "tx", "rx";
423 clocks = <&tdes_clk>;
424 clock-names = "tdes_clk";
425 @@ -1153,8 +1153,8 @@
426 compatible = "atmel,at91sam9g46-sha";
427 reg = <0xfc050000 0x100>;
428 interrupts = <15 IRQ_TYPE_LEVEL_HIGH 0>;
429 - dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
430 - AT91_XDMAC_DT_PERID(44)>;
431 + dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
432 + | AT91_XDMAC_DT_PERID(44))>;
433 dma-names = "tx";
434 clocks = <&sha_clk>;
435 clock-names = "sha_clk";
436 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
437 index cca5b8758185..f11d82527076 100644
438 --- a/arch/arm/kernel/smp.c
439 +++ b/arch/arm/kernel/smp.c
440 @@ -576,7 +576,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
441 struct pt_regs *old_regs = set_irq_regs(regs);
442
443 if ((unsigned)ipinr < NR_IPI) {
444 - trace_ipi_entry(ipi_types[ipinr]);
445 + trace_ipi_entry_rcuidle(ipi_types[ipinr]);
446 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
447 }
448
449 @@ -635,7 +635,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
450 }
451
452 if ((unsigned)ipinr < NR_IPI)
453 - trace_ipi_exit(ipi_types[ipinr]);
454 + trace_ipi_exit_rcuidle(ipi_types[ipinr]);
455 set_irq_regs(old_regs);
456 }
457
458 diff --git a/arch/arm/mach-dove/include/mach/irqs.h b/arch/arm/mach-dove/include/mach/irqs.h
459 index 03d401d20453..3f29e6bca058 100644
460 --- a/arch/arm/mach-dove/include/mach/irqs.h
461 +++ b/arch/arm/mach-dove/include/mach/irqs.h
462 @@ -14,73 +14,73 @@
463 /*
464 * Dove Low Interrupt Controller
465 */
466 -#define IRQ_DOVE_BRIDGE 0
467 -#define IRQ_DOVE_H2C 1
468 -#define IRQ_DOVE_C2H 2
469 -#define IRQ_DOVE_NAND 3
470 -#define IRQ_DOVE_PDMA 4
471 -#define IRQ_DOVE_SPI1 5
472 -#define IRQ_DOVE_SPI0 6
473 -#define IRQ_DOVE_UART_0 7
474 -#define IRQ_DOVE_UART_1 8
475 -#define IRQ_DOVE_UART_2 9
476 -#define IRQ_DOVE_UART_3 10
477 -#define IRQ_DOVE_I2C 11
478 -#define IRQ_DOVE_GPIO_0_7 12
479 -#define IRQ_DOVE_GPIO_8_15 13
480 -#define IRQ_DOVE_GPIO_16_23 14
481 -#define IRQ_DOVE_PCIE0_ERR 15
482 -#define IRQ_DOVE_PCIE0 16
483 -#define IRQ_DOVE_PCIE1_ERR 17
484 -#define IRQ_DOVE_PCIE1 18
485 -#define IRQ_DOVE_I2S0 19
486 -#define IRQ_DOVE_I2S0_ERR 20
487 -#define IRQ_DOVE_I2S1 21
488 -#define IRQ_DOVE_I2S1_ERR 22
489 -#define IRQ_DOVE_USB_ERR 23
490 -#define IRQ_DOVE_USB0 24
491 -#define IRQ_DOVE_USB1 25
492 -#define IRQ_DOVE_GE00_RX 26
493 -#define IRQ_DOVE_GE00_TX 27
494 -#define IRQ_DOVE_GE00_MISC 28
495 -#define IRQ_DOVE_GE00_SUM 29
496 -#define IRQ_DOVE_GE00_ERR 30
497 -#define IRQ_DOVE_CRYPTO 31
498 +#define IRQ_DOVE_BRIDGE (1 + 0)
499 +#define IRQ_DOVE_H2C (1 + 1)
500 +#define IRQ_DOVE_C2H (1 + 2)
501 +#define IRQ_DOVE_NAND (1 + 3)
502 +#define IRQ_DOVE_PDMA (1 + 4)
503 +#define IRQ_DOVE_SPI1 (1 + 5)
504 +#define IRQ_DOVE_SPI0 (1 + 6)
505 +#define IRQ_DOVE_UART_0 (1 + 7)
506 +#define IRQ_DOVE_UART_1 (1 + 8)
507 +#define IRQ_DOVE_UART_2 (1 + 9)
508 +#define IRQ_DOVE_UART_3 (1 + 10)
509 +#define IRQ_DOVE_I2C (1 + 11)
510 +#define IRQ_DOVE_GPIO_0_7 (1 + 12)
511 +#define IRQ_DOVE_GPIO_8_15 (1 + 13)
512 +#define IRQ_DOVE_GPIO_16_23 (1 + 14)
513 +#define IRQ_DOVE_PCIE0_ERR (1 + 15)
514 +#define IRQ_DOVE_PCIE0 (1 + 16)
515 +#define IRQ_DOVE_PCIE1_ERR (1 + 17)
516 +#define IRQ_DOVE_PCIE1 (1 + 18)
517 +#define IRQ_DOVE_I2S0 (1 + 19)
518 +#define IRQ_DOVE_I2S0_ERR (1 + 20)
519 +#define IRQ_DOVE_I2S1 (1 + 21)
520 +#define IRQ_DOVE_I2S1_ERR (1 + 22)
521 +#define IRQ_DOVE_USB_ERR (1 + 23)
522 +#define IRQ_DOVE_USB0 (1 + 24)
523 +#define IRQ_DOVE_USB1 (1 + 25)
524 +#define IRQ_DOVE_GE00_RX (1 + 26)
525 +#define IRQ_DOVE_GE00_TX (1 + 27)
526 +#define IRQ_DOVE_GE00_MISC (1 + 28)
527 +#define IRQ_DOVE_GE00_SUM (1 + 29)
528 +#define IRQ_DOVE_GE00_ERR (1 + 30)
529 +#define IRQ_DOVE_CRYPTO (1 + 31)
530
531 /*
532 * Dove High Interrupt Controller
533 */
534 -#define IRQ_DOVE_AC97 32
535 -#define IRQ_DOVE_PMU 33
536 -#define IRQ_DOVE_CAM 34
537 -#define IRQ_DOVE_SDIO0 35
538 -#define IRQ_DOVE_SDIO1 36
539 -#define IRQ_DOVE_SDIO0_WAKEUP 37
540 -#define IRQ_DOVE_SDIO1_WAKEUP 38
541 -#define IRQ_DOVE_XOR_00 39
542 -#define IRQ_DOVE_XOR_01 40
543 -#define IRQ_DOVE_XOR0_ERR 41
544 -#define IRQ_DOVE_XOR_10 42
545 -#define IRQ_DOVE_XOR_11 43
546 -#define IRQ_DOVE_XOR1_ERR 44
547 -#define IRQ_DOVE_LCD_DCON 45
548 -#define IRQ_DOVE_LCD1 46
549 -#define IRQ_DOVE_LCD0 47
550 -#define IRQ_DOVE_GPU 48
551 -#define IRQ_DOVE_PERFORM_MNTR 49
552 -#define IRQ_DOVE_VPRO_DMA1 51
553 -#define IRQ_DOVE_SSP_TIMER 54
554 -#define IRQ_DOVE_SSP 55
555 -#define IRQ_DOVE_MC_L2_ERR 56
556 -#define IRQ_DOVE_CRYPTO_ERR 59
557 -#define IRQ_DOVE_GPIO_24_31 60
558 -#define IRQ_DOVE_HIGH_GPIO 61
559 -#define IRQ_DOVE_SATA 62
560 +#define IRQ_DOVE_AC97 (1 + 32)
561 +#define IRQ_DOVE_PMU (1 + 33)
562 +#define IRQ_DOVE_CAM (1 + 34)
563 +#define IRQ_DOVE_SDIO0 (1 + 35)
564 +#define IRQ_DOVE_SDIO1 (1 + 36)
565 +#define IRQ_DOVE_SDIO0_WAKEUP (1 + 37)
566 +#define IRQ_DOVE_SDIO1_WAKEUP (1 + 38)
567 +#define IRQ_DOVE_XOR_00 (1 + 39)
568 +#define IRQ_DOVE_XOR_01 (1 + 40)
569 +#define IRQ_DOVE_XOR0_ERR (1 + 41)
570 +#define IRQ_DOVE_XOR_10 (1 + 42)
571 +#define IRQ_DOVE_XOR_11 (1 + 43)
572 +#define IRQ_DOVE_XOR1_ERR (1 + 44)
573 +#define IRQ_DOVE_LCD_DCON (1 + 45)
574 +#define IRQ_DOVE_LCD1 (1 + 46)
575 +#define IRQ_DOVE_LCD0 (1 + 47)
576 +#define IRQ_DOVE_GPU (1 + 48)
577 +#define IRQ_DOVE_PERFORM_MNTR (1 + 49)
578 +#define IRQ_DOVE_VPRO_DMA1 (1 + 51)
579 +#define IRQ_DOVE_SSP_TIMER (1 + 54)
580 +#define IRQ_DOVE_SSP (1 + 55)
581 +#define IRQ_DOVE_MC_L2_ERR (1 + 56)
582 +#define IRQ_DOVE_CRYPTO_ERR (1 + 59)
583 +#define IRQ_DOVE_GPIO_24_31 (1 + 60)
584 +#define IRQ_DOVE_HIGH_GPIO (1 + 61)
585 +#define IRQ_DOVE_SATA (1 + 62)
586
587 /*
588 * DOVE General Purpose Pins
589 */
590 -#define IRQ_DOVE_GPIO_START 64
591 +#define IRQ_DOVE_GPIO_START 65
592 #define NR_GPIO_IRQS 64
593
594 /*
595 diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
596 index 4a5a7aedcb76..df0223f76fa9 100644
597 --- a/arch/arm/mach-dove/irq.c
598 +++ b/arch/arm/mach-dove/irq.c
599 @@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs)
600 stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF);
601 stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF);
602 if (stat) {
603 - unsigned int hwirq = __fls(stat);
604 + unsigned int hwirq = 1 + __fls(stat);
605 handle_IRQ(hwirq, regs);
606 return;
607 }
608 stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF);
609 stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF);
610 if (stat) {
611 - unsigned int hwirq = 32 + __fls(stat);
612 + unsigned int hwirq = 33 + __fls(stat);
613 handle_IRQ(hwirq, regs);
614 return;
615 }
616 @@ -144,8 +144,8 @@ void __init dove_init_irq(void)
617 {
618 int i;
619
620 - orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
621 - orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
622 + orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
623 + orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
624
625 #ifdef CONFIG_MULTI_IRQ_HANDLER
626 set_handle_irq(dove_legacy_handle_irq);
627 diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
628 index 9005b07296c8..aedec81d1198 100644
629 --- a/arch/arm/vdso/vdsomunge.c
630 +++ b/arch/arm/vdso/vdsomunge.c
631 @@ -45,13 +45,11 @@
632 * it does.
633 */
634
635 -#define _GNU_SOURCE
636 -
637 #include <byteswap.h>
638 #include <elf.h>
639 #include <errno.h>
640 -#include <error.h>
641 #include <fcntl.h>
642 +#include <stdarg.h>
643 #include <stdbool.h>
644 #include <stdio.h>
645 #include <stdlib.h>
646 @@ -82,11 +80,25 @@
647 #define EF_ARM_ABI_FLOAT_HARD 0x400
648 #endif
649
650 +static int failed;
651 +static const char *argv0;
652 static const char *outfile;
653
654 +static void fail(const char *fmt, ...)
655 +{
656 + va_list ap;
657 +
658 + failed = 1;
659 + fprintf(stderr, "%s: ", argv0);
660 + va_start(ap, fmt);
661 + vfprintf(stderr, fmt, ap);
662 + va_end(ap);
663 + exit(EXIT_FAILURE);
664 +}
665 +
666 static void cleanup(void)
667 {
668 - if (error_message_count > 0 && outfile != NULL)
669 + if (failed && outfile != NULL)
670 unlink(outfile);
671 }
672
673 @@ -119,68 +131,66 @@ int main(int argc, char **argv)
674 int infd;
675
676 atexit(cleanup);
677 + argv0 = argv[0];
678
679 if (argc != 3)
680 - error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
681 + fail("Usage: %s [infile] [outfile]\n", argv[0]);
682
683 infile = argv[1];
684 outfile = argv[2];
685
686 infd = open(infile, O_RDONLY);
687 if (infd < 0)
688 - error(EXIT_FAILURE, errno, "Cannot open %s", infile);
689 + fail("Cannot open %s: %s\n", infile, strerror(errno));
690
691 if (fstat(infd, &stat) != 0)
692 - error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
693 + fail("Failed stat for %s: %s\n", infile, strerror(errno));
694
695 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
696 if (inbuf == MAP_FAILED)
697 - error(EXIT_FAILURE, errno, "Failed to map %s", infile);
698 + fail("Failed to map %s: %s\n", infile, strerror(errno));
699
700 close(infd);
701
702 inhdr = inbuf;
703
704 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
705 - error(EXIT_FAILURE, 0, "Not an ELF file");
706 + fail("Not an ELF file\n");
707
708 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
709 - error(EXIT_FAILURE, 0, "Unsupported ELF class");
710 + fail("Unsupported ELF class\n");
711
712 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
713
714 if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
715 - error(EXIT_FAILURE, 0, "Not a shared object");
716 + fail("Not a shared object\n");
717
718 - if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
719 - error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
720 - inhdr->e_machine);
721 - }
722 + if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
723 + fail("Unsupported architecture %#x\n", inhdr->e_machine);
724
725 e_flags = read_elf_word(inhdr->e_flags, swap);
726
727 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
728 - error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
729 - EF_ARM_EABI_VERSION(e_flags));
730 + fail("Unsupported EABI version %#x\n",
731 + EF_ARM_EABI_VERSION(e_flags));
732 }
733
734 if (e_flags & EF_ARM_ABI_FLOAT_HARD)
735 - error(EXIT_FAILURE, 0,
736 - "Unexpected hard-float flag set in e_flags");
737 + fail("Unexpected hard-float flag set in e_flags\n");
738
739 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
740
741 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
742 if (outfd < 0)
743 - error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
744 + fail("Cannot open %s: %s\n", outfile, strerror(errno));
745
746 if (ftruncate(outfd, stat.st_size) != 0)
747 - error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
748 + fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
749
750 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
751 outfd, 0);
752 if (outbuf == MAP_FAILED)
753 - error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
754 + fail("Failed to map %s: %s\n", outfile, strerror(errno));
755
756 close(outfd);
757
758 @@ -195,7 +205,7 @@ int main(int argc, char **argv)
759 }
760
761 if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
762 - error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
763 + fail("Failed to sync %s: %s\n", outfile, strerror(errno));
764
765 return EXIT_SUCCESS;
766 }
767 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
768 index 2cb008177252..d3a202b85ba6 100644
769 --- a/arch/arm64/kernel/smp.c
770 +++ b/arch/arm64/kernel/smp.c
771 @@ -569,7 +569,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
772 struct pt_regs *old_regs = set_irq_regs(regs);
773
774 if ((unsigned)ipinr < NR_IPI) {
775 - trace_ipi_entry(ipi_types[ipinr]);
776 + trace_ipi_entry_rcuidle(ipi_types[ipinr]);
777 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
778 }
779
780 @@ -612,7 +612,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
781 }
782
783 if ((unsigned)ipinr < NR_IPI)
784 - trace_ipi_exit(ipi_types[ipinr]);
785 + trace_ipi_exit_rcuidle(ipi_types[ipinr]);
786 set_irq_regs(old_regs);
787 }
788
789 diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
790 index 2de9d2e59d96..0eeb4f0930a0 100644
791 --- a/arch/arm64/mm/hugetlbpage.c
792 +++ b/arch/arm64/mm/hugetlbpage.c
793 @@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
794
795 int pmd_huge(pmd_t pmd)
796 {
797 - return !(pmd_val(pmd) & PMD_TABLE_BIT);
798 + return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
799 }
800
801 int pud_huge(pud_t pud)
802 {
803 #ifndef __PAGETABLE_PMD_FOLDED
804 - return !(pud_val(pud) & PUD_TABLE_BIT);
805 + return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
806 #else
807 return 0;
808 #endif
809 diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
810 index de0a81a539a0..98a26ce82d26 100644
811 --- a/arch/arm64/net/bpf_jit.h
812 +++ b/arch/arm64/net/bpf_jit.h
813 @@ -110,6 +110,10 @@
814 /* Rd = Rn >> shift; signed */
815 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
816
817 +/* Zero extend */
818 +#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
819 +#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
820 +
821 /* Move wide (immediate) */
822 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
823 aarch64_insn_gen_movewide(Rd, imm16, shift, \
824 diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
825 index dc6a4842683a..c047598b09e0 100644
826 --- a/arch/arm64/net/bpf_jit_comp.c
827 +++ b/arch/arm64/net/bpf_jit_comp.c
828 @@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
829 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
830 const struct jit_ctx *ctx)
831 {
832 - int to = ctx->offset[bpf_to + 1];
833 + int to = ctx->offset[bpf_to];
834 /* -1 to account for the Branch instruction */
835 - int from = ctx->offset[bpf_from + 1] - 1;
836 + int from = ctx->offset[bpf_from] - 1;
837
838 return to - from;
839 }
840 @@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
841 case BPF_ALU | BPF_END | BPF_FROM_BE:
842 #ifdef CONFIG_CPU_BIG_ENDIAN
843 if (BPF_SRC(code) == BPF_FROM_BE)
844 - break;
845 + goto emit_bswap_uxt;
846 #else /* !CONFIG_CPU_BIG_ENDIAN */
847 if (BPF_SRC(code) == BPF_FROM_LE)
848 - break;
849 + goto emit_bswap_uxt;
850 #endif
851 switch (imm) {
852 case 16:
853 emit(A64_REV16(is64, dst, dst), ctx);
854 + /* zero-extend 16 bits into 64 bits */
855 + emit(A64_UXTH(is64, dst, dst), ctx);
856 break;
857 case 32:
858 emit(A64_REV32(is64, dst, dst), ctx);
859 + /* upper 32 bits already cleared */
860 break;
861 case 64:
862 emit(A64_REV64(dst, dst), ctx);
863 break;
864 }
865 break;
866 +emit_bswap_uxt:
867 + switch (imm) {
868 + case 16:
869 + /* zero-extend 16 bits into 64 bits */
870 + emit(A64_UXTH(is64, dst, dst), ctx);
871 + break;
872 + case 32:
873 + /* zero-extend 32 bits into 64 bits */
874 + emit(A64_UXTW(is64, dst, dst), ctx);
875 + break;
876 + case 64:
877 + /* nop */
878 + break;
879 + }
880 + break;
881 /* dst = imm */
882 case BPF_ALU | BPF_MOV | BPF_K:
883 case BPF_ALU64 | BPF_MOV | BPF_K:
884 @@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
885 const struct bpf_insn *insn = &prog->insnsi[i];
886 int ret;
887
888 + ret = build_insn(insn, ctx);
889 +
890 if (ctx->image == NULL)
891 ctx->offset[i] = ctx->idx;
892
893 - ret = build_insn(insn, ctx);
894 if (ret > 0) {
895 i++;
896 continue;
897 diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
898 index 33013dfcd3e1..5c68c85d5dbe 100644
899 --- a/arch/m68k/Kconfig.cpu
900 +++ b/arch/m68k/Kconfig.cpu
901 @@ -125,6 +125,13 @@ endif # M68KCLASSIC
902
903 if COLDFIRE
904
905 +choice
906 + prompt "ColdFire SoC type"
907 + default M520x
908 + help
909 + Select the type of ColdFire System-on-Chip (SoC) that you want
910 + to build for.
911 +
912 config M5206
913 bool "MCF5206"
914 depends on !MMU
915 @@ -174,9 +181,6 @@ config M525x
916 help
917 Freescale (Motorola) Coldfire 5251/5253 processor support.
918
919 -config M527x
920 - bool
921 -
922 config M5271
923 bool "MCF5271"
924 depends on !MMU
925 @@ -223,9 +227,6 @@ config M5307
926 help
927 Motorola ColdFire 5307 processor support.
928
929 -config M53xx
930 - bool
931 -
932 config M532x
933 bool "MCF532x"
934 depends on !MMU
935 @@ -251,9 +252,6 @@ config M5407
936 help
937 Motorola ColdFire 5407 processor support.
938
939 -config M54xx
940 - bool
941 -
942 config M547x
943 bool "MCF547x"
944 select M54xx
945 @@ -280,6 +278,17 @@ config M5441x
946 help
947 Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
948
949 +endchoice
950 +
951 +config M527x
952 + bool
953 +
954 +config M53xx
955 + bool
956 +
957 +config M54xx
958 + bool
959 +
960 endif # COLDFIRE
961
962
963 @@ -416,22 +425,10 @@ config HAVE_MBAR
964 config HAVE_IPSBAR
965 bool
966
967 -config CLOCK_SET
968 - bool "Enable setting the CPU clock frequency"
969 - depends on COLDFIRE
970 - default n
971 - help
972 - On some CPU's you do not need to know what the core CPU clock
973 - frequency is. On these you can disable clock setting. On some
974 - traditional 68K parts, and on all ColdFire parts you need to set
975 - the appropriate CPU clock frequency. On these devices many of the
976 - onboard peripherals derive their timing from the master CPU clock
977 - frequency.
978 -
979 config CLOCK_FREQ
980 int "Set the core clock frequency"
981 default "66666666"
982 - depends on CLOCK_SET
983 + depends on COLDFIRE
984 help
985 Define the CPU clock frequency in use. This is the core clock
986 frequency, it may or may not be the same as the external clock
987 diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
988 index c94557b91448..50aa4dac9ca2 100644
989 --- a/arch/m68k/include/asm/coldfire.h
990 +++ b/arch/m68k/include/asm/coldfire.h
991 @@ -19,7 +19,7 @@
992 * in any case new boards come along from time to time that have yet
993 * another different clocking frequency.
994 */
995 -#ifdef CONFIG_CLOCK_SET
996 +#ifdef CONFIG_CLOCK_FREQ
997 #define MCF_CLK CONFIG_CLOCK_FREQ
998 #else
999 #error "Don't know what your ColdFire CPU clock frequency is??"
1000 diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
1001 index e5a693b16da2..443f44de1020 100644
1002 --- a/arch/openrisc/Kconfig
1003 +++ b/arch/openrisc/Kconfig
1004 @@ -17,6 +17,7 @@ config OPENRISC
1005 select GENERIC_IRQ_SHOW
1006 select GENERIC_IOMAP
1007 select GENERIC_CPU_DEVICES
1008 + select HAVE_UID16
1009 select GENERIC_ATOMIC64
1010 select GENERIC_CLOCKEVENTS
1011 select GENERIC_STRNCPY_FROM_USER
1012 @@ -31,9 +32,6 @@ config MMU
1013 config HAVE_DMA_ATTRS
1014 def_bool y
1015
1016 -config UID16
1017 - def_bool y
1018 -
1019 config RWSEM_GENERIC_SPINLOCK
1020 def_bool y
1021
1022 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
1023 index 9d518d693b4b..844b06d67df4 100644
1024 --- a/arch/x86/mm/mmap.c
1025 +++ b/arch/x86/mm/mmap.c
1026 @@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1027 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1028 }
1029 }
1030 +
1031 +const char *arch_vma_name(struct vm_area_struct *vma)
1032 +{
1033 + if (vma->vm_flags & VM_MPX)
1034 + return "[mpx]";
1035 + return NULL;
1036 +}
1037 diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
1038 index c439ec478216..4d1c11c07fe1 100644
1039 --- a/arch/x86/mm/mpx.c
1040 +++ b/arch/x86/mm/mpx.c
1041 @@ -18,26 +18,9 @@
1042 #include <asm/processor.h>
1043 #include <asm/fpu-internal.h>
1044
1045 -static const char *mpx_mapping_name(struct vm_area_struct *vma)
1046 -{
1047 - return "[mpx]";
1048 -}
1049 -
1050 -static struct vm_operations_struct mpx_vma_ops = {
1051 - .name = mpx_mapping_name,
1052 -};
1053 -
1054 -static int is_mpx_vma(struct vm_area_struct *vma)
1055 -{
1056 - return (vma->vm_ops == &mpx_vma_ops);
1057 -}
1058 -
1059 /*
1060 * This is really a simplified "vm_mmap". it only handles MPX
1061 * bounds tables (the bounds directory is user-allocated).
1062 - *
1063 - * Later on, we use the vma->vm_ops to uniquely identify these
1064 - * VMAs.
1065 */
1066 static unsigned long mpx_mmap(unsigned long len)
1067 {
1068 @@ -83,7 +66,6 @@ static unsigned long mpx_mmap(unsigned long len)
1069 ret = -ENOMEM;
1070 goto out;
1071 }
1072 - vma->vm_ops = &mpx_vma_ops;
1073
1074 if (vm_flags & VM_LOCKED) {
1075 up_write(&mm->mmap_sem);
1076 @@ -661,7 +643,7 @@ static int zap_bt_entries(struct mm_struct *mm,
1077 * so stop immediately and return an error. This
1078 * probably results in a SIGSEGV.
1079 */
1080 - if (!is_mpx_vma(vma))
1081 + if (!(vma->vm_flags & VM_MPX))
1082 return -EINVAL;
1083
1084 len = min(vma->vm_end, end) - addr;
1085 diff --git a/block/bio.c b/block/bio.c
1086 index f66a4eae16ee..4441522ca339 100644
1087 --- a/block/bio.c
1088 +++ b/block/bio.c
1089 @@ -1814,8 +1814,9 @@ EXPORT_SYMBOL(bio_endio_nodec);
1090 * Allocates and returns a new bio which represents @sectors from the start of
1091 * @bio, and updates @bio to represent the remaining sectors.
1092 *
1093 - * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1094 - * responsibility to ensure that @bio is not freed before the split.
1095 + * Unless this is a discard request the newly allocated bio will point
1096 + * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1097 + * @bio is not freed before the split.
1098 */
1099 struct bio *bio_split(struct bio *bio, int sectors,
1100 gfp_t gfp, struct bio_set *bs)
1101 @@ -1825,7 +1826,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
1102 BUG_ON(sectors <= 0);
1103 BUG_ON(sectors >= bio_sectors(bio));
1104
1105 - split = bio_clone_fast(bio, gfp, bs);
1106 + /*
1107 + * Discards need a mutable bio_vec to accommodate the payload
1108 + * required by the DSM TRIM and UNMAP commands.
1109 + */
1110 + if (bio->bi_rw & REQ_DISCARD)
1111 + split = bio_clone_bioset(bio, gfp, bs);
1112 + else
1113 + split = bio_clone_fast(bio, gfp, bs);
1114 +
1115 if (!split)
1116 return NULL;
1117
1118 diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
1119 index f97330886d58..3f5b537ab33e 100644
1120 --- a/crypto/asymmetric_keys/asymmetric_keys.h
1121 +++ b/crypto/asymmetric_keys/asymmetric_keys.h
1122 @@ -11,6 +11,9 @@
1123
1124 extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
1125
1126 +extern int __asymmetric_key_hex_to_key_id(const char *id,
1127 + struct asymmetric_key_id *match_id,
1128 + size_t hexlen);
1129 static inline
1130 const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
1131 {
1132 diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
1133 index bcbbbd794e1d..b0e4ed23d668 100644
1134 --- a/crypto/asymmetric_keys/asymmetric_type.c
1135 +++ b/crypto/asymmetric_keys/asymmetric_type.c
1136 @@ -104,6 +104,15 @@ static bool asymmetric_match_key_ids(
1137 return false;
1138 }
1139
1140 +/* helper function can be called directly with pre-allocated memory */
1141 +inline int __asymmetric_key_hex_to_key_id(const char *id,
1142 + struct asymmetric_key_id *match_id,
1143 + size_t hexlen)
1144 +{
1145 + match_id->len = hexlen;
1146 + return hex2bin(match_id->data, id, hexlen);
1147 +}
1148 +
1149 /**
1150 * asymmetric_key_hex_to_key_id - Convert a hex string into a key ID.
1151 * @id: The ID as a hex string.
1152 @@ -111,21 +120,20 @@ static bool asymmetric_match_key_ids(
1153 struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id)
1154 {
1155 struct asymmetric_key_id *match_id;
1156 - size_t hexlen;
1157 + size_t asciihexlen;
1158 int ret;
1159
1160 if (!*id)
1161 return ERR_PTR(-EINVAL);
1162 - hexlen = strlen(id);
1163 - if (hexlen & 1)
1164 + asciihexlen = strlen(id);
1165 + if (asciihexlen & 1)
1166 return ERR_PTR(-EINVAL);
1167
1168 - match_id = kmalloc(sizeof(struct asymmetric_key_id) + hexlen / 2,
1169 + match_id = kmalloc(sizeof(struct asymmetric_key_id) + asciihexlen / 2,
1170 GFP_KERNEL);
1171 if (!match_id)
1172 return ERR_PTR(-ENOMEM);
1173 - match_id->len = hexlen / 2;
1174 - ret = hex2bin(match_id->data, id, hexlen / 2);
1175 + ret = __asymmetric_key_hex_to_key_id(id, match_id, asciihexlen / 2);
1176 if (ret < 0) {
1177 kfree(match_id);
1178 return ERR_PTR(-EINVAL);
1179 diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
1180 index a6c42031628e..24f17e6c5904 100644
1181 --- a/crypto/asymmetric_keys/x509_public_key.c
1182 +++ b/crypto/asymmetric_keys/x509_public_key.c
1183 @@ -28,17 +28,30 @@ static bool use_builtin_keys;
1184 static struct asymmetric_key_id *ca_keyid;
1185
1186 #ifndef MODULE
1187 +static struct {
1188 + struct asymmetric_key_id id;
1189 + unsigned char data[10];
1190 +} cakey;
1191 +
1192 static int __init ca_keys_setup(char *str)
1193 {
1194 if (!str) /* default system keyring */
1195 return 1;
1196
1197 if (strncmp(str, "id:", 3) == 0) {
1198 - struct asymmetric_key_id *p;
1199 - p = asymmetric_key_hex_to_key_id(str + 3);
1200 - if (p == ERR_PTR(-EINVAL))
1201 - pr_err("Unparsable hex string in ca_keys\n");
1202 - else if (!IS_ERR(p))
1203 + struct asymmetric_key_id *p = &cakey.id;
1204 + size_t hexlen = (strlen(str) - 3) / 2;
1205 + int ret;
1206 +
1207 + if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
1208 + pr_err("Missing or invalid ca_keys id\n");
1209 + return 1;
1210 + }
1211 +
1212 + ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
1213 + if (ret < 0)
1214 + pr_err("Unparsable ca_keys id hex string\n");
1215 + else
1216 ca_keyid = p; /* owner key 'id:xxxxxx' */
1217 } else if (strcmp(str, "builtin") == 0) {
1218 use_builtin_keys = true;
1219 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1220 index 37fb19047603..73f056a597a9 100644
1221 --- a/drivers/acpi/acpi_lpss.c
1222 +++ b/drivers/acpi/acpi_lpss.c
1223 @@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
1224 pdata->mmio_size = resource_size(rentry->res);
1225 pdata->mmio_base = ioremap(rentry->res->start,
1226 pdata->mmio_size);
1227 - if (!pdata->mmio_base)
1228 - goto err_out;
1229 break;
1230 }
1231
1232 acpi_dev_free_resource_list(&resource_list);
1233
1234 + if (!pdata->mmio_base) {
1235 + ret = -ENOMEM;
1236 + goto err_out;
1237 + }
1238 +
1239 pdata->dev_desc = dev_desc;
1240
1241 if (dev_desc->setup)
1242 diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
1243 index 87b27521fcac..7f50dd9eb1d0 100644
1244 --- a/drivers/acpi/acpica/aclocal.h
1245 +++ b/drivers/acpi/acpica/aclocal.h
1246 @@ -213,6 +213,7 @@ struct acpi_table_list {
1247
1248 #define ACPI_TABLE_INDEX_DSDT (0)
1249 #define ACPI_TABLE_INDEX_FACS (1)
1250 +#define ACPI_TABLE_INDEX_X_FACS (2)
1251
1252 struct acpi_find_context {
1253 char *search_for;
1254 diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
1255 index 7d2486005e3f..05be59c772c7 100644
1256 --- a/drivers/acpi/acpica/tbfadt.c
1257 +++ b/drivers/acpi/acpica/tbfadt.c
1258 @@ -350,9 +350,18 @@ void acpi_tb_parse_fadt(u32 table_index)
1259 /* If Hardware Reduced flag is set, there is no FACS */
1260
1261 if (!acpi_gbl_reduced_hardware) {
1262 - acpi_tb_install_fixed_table((acpi_physical_address)
1263 - acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS,
1264 - ACPI_TABLE_INDEX_FACS);
1265 + if (acpi_gbl_FADT.facs) {
1266 + acpi_tb_install_fixed_table((acpi_physical_address)
1267 + acpi_gbl_FADT.facs,
1268 + ACPI_SIG_FACS,
1269 + ACPI_TABLE_INDEX_FACS);
1270 + }
1271 + if (acpi_gbl_FADT.Xfacs) {
1272 + acpi_tb_install_fixed_table((acpi_physical_address)
1273 + acpi_gbl_FADT.Xfacs,
1274 + ACPI_SIG_FACS,
1275 + ACPI_TABLE_INDEX_X_FACS);
1276 + }
1277 }
1278 }
1279
1280 @@ -491,13 +500,9 @@ static void acpi_tb_convert_fadt(void)
1281 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
1282
1283 /*
1284 - * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
1285 + * Expand the 32-bit DSDT addresses to 64-bit as necessary.
1286 * Later ACPICA code will always use the X 64-bit field.
1287 */
1288 - acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
1289 - acpi_gbl_FADT.facs,
1290 - acpi_gbl_FADT.Xfacs);
1291 -
1292 acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
1293 acpi_gbl_FADT.dsdt,
1294 acpi_gbl_FADT.Xdsdt);
1295 diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
1296 index 6559a58439c5..2fb1afaacc6d 100644
1297 --- a/drivers/acpi/acpica/tbutils.c
1298 +++ b/drivers/acpi/acpica/tbutils.c
1299 @@ -68,7 +68,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
1300
1301 acpi_status acpi_tb_initialize_facs(void)
1302 {
1303 - acpi_status status;
1304 + struct acpi_table_facs *facs32;
1305 + struct acpi_table_facs *facs64;
1306
1307 /* If Hardware Reduced flag is set, there is no FACS */
1308
1309 @@ -77,11 +78,22 @@ acpi_status acpi_tb_initialize_facs(void)
1310 return (AE_OK);
1311 }
1312
1313 - status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
1314 - ACPI_CAST_INDIRECT_PTR(struct
1315 - acpi_table_header,
1316 - &acpi_gbl_FACS));
1317 - return (status);
1318 + (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
1319 + ACPI_CAST_INDIRECT_PTR(struct
1320 + acpi_table_header,
1321 + &facs32));
1322 + (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
1323 + ACPI_CAST_INDIRECT_PTR(struct
1324 + acpi_table_header,
1325 + &facs64));
1326 +
1327 + if (acpi_gbl_use32_bit_facs_addresses) {
1328 + acpi_gbl_FACS = facs32 ? facs32 : facs64;
1329 + } else {
1330 + acpi_gbl_FACS = facs64 ? facs64 : facs32;
1331 + }
1332 +
1333 + return (AE_OK);
1334 }
1335 #endif /* !ACPI_REDUCED_HARDWARE */
1336
1337 @@ -101,7 +113,7 @@ acpi_status acpi_tb_initialize_facs(void)
1338 u8 acpi_tb_tables_loaded(void)
1339 {
1340
1341 - if (acpi_gbl_root_table_list.current_table_count >= 3) {
1342 + if (acpi_gbl_root_table_list.current_table_count >= 4) {
1343 return (TRUE);
1344 }
1345
1346 @@ -357,11 +369,11 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
1347 table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
1348
1349 /*
1350 - * First two entries in the table array are reserved for the DSDT
1351 - * and FACS, which are not actually present in the RSDT/XSDT - they
1352 - * come from the FADT
1353 + * First three entries in the table array are reserved for the DSDT
1354 + * and 32bit/64bit FACS, which are not actually present in the
1355 + * RSDT/XSDT - they come from the FADT
1356 */
1357 - acpi_gbl_root_table_list.current_table_count = 2;
1358 + acpi_gbl_root_table_list.current_table_count = 3;
1359
1360 /* Initialize the root table array from the RSDT/XSDT */
1361
1362 diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
1363 index aadb3002a2dd..b63e35d6d1bf 100644
1364 --- a/drivers/acpi/acpica/tbxfload.c
1365 +++ b/drivers/acpi/acpica/tbxfload.c
1366 @@ -166,7 +166,8 @@ static acpi_status acpi_tb_load_namespace(void)
1367
1368 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
1369 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
1370 - if ((!ACPI_COMPARE_NAME
1371 + if (!acpi_gbl_root_table_list.tables[i].address ||
1372 + (!ACPI_COMPARE_NAME
1373 (&(acpi_gbl_root_table_list.tables[i].signature),
1374 ACPI_SIG_SSDT)
1375 &&
1376 diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
1377 index 083a76891889..42a32a66ef22 100644
1378 --- a/drivers/acpi/acpica/utxfinit.c
1379 +++ b/drivers/acpi/acpica/utxfinit.c
1380 @@ -179,10 +179,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
1381 * Obtain a permanent mapping for the FACS. This is required for the
1382 * Global Lock and the Firmware Waking Vector
1383 */
1384 - status = acpi_tb_initialize_facs();
1385 - if (ACPI_FAILURE(status)) {
1386 - ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
1387 - return_ACPI_STATUS(status);
1388 + if (!(flags & ACPI_NO_FACS_INIT)) {
1389 + status = acpi_tb_initialize_facs();
1390 + if (ACPI_FAILURE(status)) {
1391 + ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
1392 + return_ACPI_STATUS(status);
1393 + }
1394 }
1395 #endif /* !ACPI_REDUCED_HARDWARE */
1396
1397 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
1398 index 5226a8b921ae..98f5316aad72 100644
1399 --- a/drivers/acpi/osl.c
1400 +++ b/drivers/acpi/osl.c
1401 @@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
1402 if (!addr || !length)
1403 return;
1404
1405 - acpi_reserve_region(addr, length, gas->space_id, 0, desc);
1406 + /* Resources are never freed */
1407 + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1408 + request_region(addr, length, desc);
1409 + else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1410 + request_mem_region(addr, length, desc);
1411 }
1412
1413 -static void __init acpi_reserve_resources(void)
1414 +static int __init acpi_reserve_resources(void)
1415 {
1416 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
1417 "ACPI PM1a_EVT_BLK");
1418 @@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
1419 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
1420 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
1421 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
1422 +
1423 + return 0;
1424 }
1425 +fs_initcall_sync(acpi_reserve_resources);
1426
1427 void acpi_os_printf(const char *fmt, ...)
1428 {
1429 @@ -1838,7 +1845,6 @@ acpi_status __init acpi_os_initialize(void)
1430
1431 acpi_status __init acpi_os_initialize1(void)
1432 {
1433 - acpi_reserve_resources();
1434 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1435 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1436 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1437 diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
1438 index fcb7807ea8b7..f1c966e05078 100644
1439 --- a/drivers/acpi/resource.c
1440 +++ b/drivers/acpi/resource.c
1441 @@ -26,7 +26,6 @@
1442 #include <linux/device.h>
1443 #include <linux/export.h>
1444 #include <linux/ioport.h>
1445 -#include <linux/list.h>
1446 #include <linux/slab.h>
1447
1448 #ifdef CONFIG_X86
1449 @@ -194,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
1450 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
1451 bool wp = addr->info.mem.write_protect;
1452 u64 len = attr->address_length;
1453 + u64 start, end, offset = 0;
1454 struct resource *res = &win->res;
1455
1456 /*
1457 @@ -205,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
1458 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
1459 addr->min_address_fixed, addr->max_address_fixed, len);
1460
1461 - res->start = attr->minimum;
1462 - res->end = attr->maximum;
1463 -
1464 /*
1465 * For bridges that translate addresses across the bridge,
1466 * translation_offset is the offset that must be added to the
1467 @@ -215,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
1468 * primary side. Non-bridge devices must list 0 for all Address
1469 * Translation offset bits.
1470 */
1471 - if (addr->producer_consumer == ACPI_PRODUCER) {
1472 - res->start += attr->translation_offset;
1473 - res->end += attr->translation_offset;
1474 - } else if (attr->translation_offset) {
1475 + if (addr->producer_consumer == ACPI_PRODUCER)
1476 + offset = attr->translation_offset;
1477 + else if (attr->translation_offset)
1478 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
1479 attr->translation_offset);
1480 + start = attr->minimum + offset;
1481 + end = attr->maximum + offset;
1482 +
1483 + win->offset = offset;
1484 + res->start = start;
1485 + res->end = end;
1486 + if (sizeof(resource_size_t) < sizeof(u64) &&
1487 + (offset != win->offset || start != res->start || end != res->end)) {
1488 + pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
1489 + attr->minimum, attr->maximum);
1490 + return false;
1491 }
1492
1493 switch (addr->resource_type) {
1494 @@ -237,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
1495 return false;
1496 }
1497
1498 - win->offset = attr->translation_offset;
1499 -
1500 if (addr->producer_consumer == ACPI_PRODUCER)
1501 res->flags |= IORESOURCE_WINDOW;
1502
1503 @@ -622,162 +627,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
1504 return (type & types) ? 0 : 1;
1505 }
1506 EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
1507 -
1508 -struct reserved_region {
1509 - struct list_head node;
1510 - u64 start;
1511 - u64 end;
1512 -};
1513 -
1514 -static LIST_HEAD(reserved_io_regions);
1515 -static LIST_HEAD(reserved_mem_regions);
1516 -
1517 -static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
1518 - char *desc)
1519 -{
1520 - unsigned int length = end - start + 1;
1521 - struct resource *res;
1522 -
1523 - res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
1524 - request_region(start, length, desc) :
1525 - request_mem_region(start, length, desc);
1526 - if (!res)
1527 - return -EIO;
1528 -
1529 - res->flags &= ~flags;
1530 - return 0;
1531 -}
1532 -
1533 -static int add_region_before(u64 start, u64 end, u8 space_id,
1534 - unsigned long flags, char *desc,
1535 - struct list_head *head)
1536 -{
1537 - struct reserved_region *reg;
1538 - int error;
1539 -
1540 - reg = kmalloc(sizeof(*reg), GFP_KERNEL);
1541 - if (!reg)
1542 - return -ENOMEM;
1543 -
1544 - error = request_range(start, end, space_id, flags, desc);
1545 - if (error)
1546 - return error;
1547 -
1548 - reg->start = start;
1549 - reg->end = end;
1550 - list_add_tail(&reg->node, head);
1551 - return 0;
1552 -}
1553 -
1554 -/**
1555 - * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
1556 - * @start: Starting address of the region.
1557 - * @length: Length of the region.
1558 - * @space_id: Identifier of address space to reserve the region from.
1559 - * @flags: Resource flags to clear for the region after requesting it.
1560 - * @desc: Region description (for messages).
1561 - *
1562 - * Reserve an I/O or memory region as a system resource to prevent others from
1563 - * using it. If the new region overlaps with one of the regions (in the given
1564 - * address space) already reserved by this routine, only the non-overlapping
1565 - * parts of it will be reserved.
1566 - *
1567 - * Returned is either 0 (success) or a negative error code indicating a resource
1568 - * reservation problem. It is the code of the first encountered error, but the
1569 - * routine doesn't abort until it has attempted to request all of the parts of
1570 - * the new region that don't overlap with other regions reserved previously.
1571 - *
1572 - * The resources requested by this routine are never released.
1573 - */
1574 -int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
1575 - unsigned long flags, char *desc)
1576 -{
1577 - struct list_head *regions;
1578 - struct reserved_region *reg;
1579 - u64 end = start + length - 1;
1580 - int ret = 0, error = 0;
1581 -
1582 - if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1583 - regions = &reserved_io_regions;
1584 - else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1585 - regions = &reserved_mem_regions;
1586 - else
1587 - return -EINVAL;
1588 -
1589 - if (list_empty(regions))
1590 - return add_region_before(start, end, space_id, flags, desc, regions);
1591 -
1592 - list_for_each_entry(reg, regions, node)
1593 - if (reg->start == end + 1) {
1594 - /* The new region can be prepended to this one. */
1595 - ret = request_range(start, end, space_id, flags, desc);
1596 - if (!ret)
1597 - reg->start = start;
1598 -
1599 - return ret;
1600 - } else if (reg->start > end) {
1601 - /* No overlap. Add the new region here and get out. */
1602 - return add_region_before(start, end, space_id, flags,
1603 - desc, &reg->node);
1604 - } else if (reg->end == start - 1) {
1605 - goto combine;
1606 - } else if (reg->end >= start) {
1607 - goto overlap;
1608 - }
1609 -
1610 - /* The new region goes after the last existing one. */
1611 - return add_region_before(start, end, space_id, flags, desc, regions);
1612 -
1613 - overlap:
1614 - /*
1615 - * The new region overlaps an existing one.
1616 - *
1617 - * The head part of the new region immediately preceding the existing
1618 - * overlapping one can be combined with it right away.
1619 - */
1620 - if (reg->start > start) {
1621 - error = request_range(start, reg->start - 1, space_id, flags, desc);
1622 - if (error)
1623 - ret = error;
1624 - else
1625 - reg->start = start;
1626 - }
1627 -
1628 - combine:
1629 - /*
1630 - * The new region is adjacent to an existing one. If it extends beyond
1631 - * that region all the way to the next one, it is possible to combine
1632 - * all three of them.
1633 - */
1634 - while (reg->end < end) {
1635 - struct reserved_region *next = NULL;
1636 - u64 a = reg->end + 1, b = end;
1637 -
1638 - if (!list_is_last(&reg->node, regions)) {
1639 - next = list_next_entry(reg, node);
1640 - if (next->start <= end)
1641 - b = next->start - 1;
1642 - }
1643 - error = request_range(a, b, space_id, flags, desc);
1644 - if (!error) {
1645 - if (next && next->start == b + 1) {
1646 - reg->end = next->end;
1647 - list_del(&next->node);
1648 - kfree(next);
1649 - } else {
1650 - reg->end = end;
1651 - break;
1652 - }
1653 - } else if (next) {
1654 - if (!ret)
1655 - ret = error;
1656 -
1657 - reg = next;
1658 - } else {
1659 - break;
1660 - }
1661 - }
1662 -
1663 - return ret ? ret : error;
1664 -}
1665 -EXPORT_SYMBOL_GPL(acpi_reserve_region);
1666 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1667 index 577849c6611a..41c99be9bd41 100644
1668 --- a/drivers/ata/libata-core.c
1669 +++ b/drivers/ata/libata-core.c
1670 @@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
1671 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
1672 dev->max_sectors);
1673
1674 + if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
1675 + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
1676 + dev->max_sectors);
1677 +
1678 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
1679 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1680
1681 @@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1682 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
1683 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
1684
1685 + /*
1686 + * Causes silent data corruption with higher max sects.
1687 + * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
1688 + */
1689 + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1690 +
1691 /* Devices we expect to fail diagnostics */
1692
1693 /* Devices where NCQ should be avoided */
1694 @@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1695 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
1696 ATA_HORKAGE_FIRMWARE_WARN },
1697
1698 - /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
1699 + /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
1700 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
1701 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
1702 + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
1703
1704 /* Blacklist entries taken from Silicon Image 3124/3132
1705 Windows driver .inf file - also several Linux problem reports */
1706 @@ -4225,11 +4236,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1707 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
1708
1709 /* devices that don't properly handle queued TRIM commands */
1710 - { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1711 + { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1712 ATA_HORKAGE_ZERO_AFTER_TRIM, },
1713 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1714 ATA_HORKAGE_ZERO_AFTER_TRIM, },
1715 - { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
1716 + { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
1717 ATA_HORKAGE_ZERO_AFTER_TRIM, },
1718 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
1719 ATA_HORKAGE_ZERO_AFTER_TRIM, },
1720 @@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1721 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1722 ATA_HORKAGE_ZERO_AFTER_TRIM, },
1723
1724 + /* devices that don't properly handle TRIM commands */
1725 + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
1726 +
1727 /*
1728 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
1729 * (Return Zero After Trim) flags in the ATA Command Set are
1730 @@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1731 else /* In the ancient relic department - skip all of this */
1732 return 0;
1733
1734 - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1735 + /* On some disks, this command causes spin-up, so we need longer timeout */
1736 + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
1737
1738 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1739 return err_mask;
1740 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1741 index cf0022ec07f2..7465031a893c 100644
1742 --- a/drivers/ata/libata-eh.c
1743 +++ b/drivers/ata/libata-eh.c
1744 @@ -1507,16 +1507,21 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1745 {
1746 struct ata_taskfile tf;
1747 unsigned int err_mask;
1748 + bool dma = false;
1749
1750 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1751
1752 +retry:
1753 ata_tf_init(dev, &tf);
1754 - if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) {
1755 + if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
1756 + !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
1757 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1758 tf.protocol = ATA_PROT_DMA;
1759 + dma = true;
1760 } else {
1761 tf.command = ATA_CMD_READ_LOG_EXT;
1762 tf.protocol = ATA_PROT_PIO;
1763 + dma = false;
1764 }
1765 tf.lbal = log;
1766 tf.lbam = page;
1767 @@ -1527,6 +1532,12 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1768 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1769 buf, sectors * ATA_SECT_SIZE, 0);
1770
1771 + if (err_mask && dma) {
1772 + dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
1773 + ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
1774 + goto retry;
1775 + }
1776 +
1777 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1778 return err_mask;
1779 }
1780 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
1781 index 3131adcc1f87..641a61a59e89 100644
1782 --- a/drivers/ata/libata-scsi.c
1783 +++ b/drivers/ata/libata-scsi.c
1784 @@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
1785 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
1786 rbuf[15] = lowest_aligned;
1787
1788 - if (ata_id_has_trim(args->id)) {
1789 + if (ata_id_has_trim(args->id) &&
1790 + !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
1791 rbuf[14] |= 0x80; /* LBPME */
1792
1793 if (ata_id_has_zero_after_trim(args->id) &&
1794 diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
1795 index 3227b7c8a05f..e2d94972962d 100644
1796 --- a/drivers/ata/libata-transport.c
1797 +++ b/drivers/ata/libata-transport.c
1798 @@ -560,6 +560,29 @@ show_ata_dev_gscr(struct device *dev,
1799
1800 static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
1801
1802 +static ssize_t
1803 +show_ata_dev_trim(struct device *dev,
1804 + struct device_attribute *attr, char *buf)
1805 +{
1806 + struct ata_device *ata_dev = transport_class_to_dev(dev);
1807 + unsigned char *mode;
1808 +
1809 + if (!ata_id_has_trim(ata_dev->id))
1810 + mode = "unsupported";
1811 + else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
1812 + mode = "forced_unsupported";
1813 + else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
1814 + mode = "forced_unqueued";
1815 + else if (ata_fpdma_dsm_supported(ata_dev))
1816 + mode = "queued";
1817 + else
1818 + mode = "unqueued";
1819 +
1820 + return snprintf(buf, 20, "%s\n", mode);
1821 +}
1822 +
1823 +static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
1824 +
1825 static DECLARE_TRANSPORT_CLASS(ata_dev_class,
1826 "ata_device", NULL, NULL, NULL);
1827
1828 @@ -733,6 +756,7 @@ struct scsi_transport_template *ata_attach_transport(void)
1829 SETUP_DEV_ATTRIBUTE(ering);
1830 SETUP_DEV_ATTRIBUTE(id);
1831 SETUP_DEV_ATTRIBUTE(gscr);
1832 + SETUP_DEV_ATTRIBUTE(trim);
1833 BUG_ON(count > ATA_DEV_ATTRS);
1834 i->dev_attrs[count] = NULL;
1835
1836 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
1837 index 171841ad1008..4d1d9de4f9bf 100644
1838 --- a/drivers/base/firmware_class.c
1839 +++ b/drivers/base/firmware_class.c
1840 @@ -544,10 +544,8 @@ static void fw_dev_release(struct device *dev)
1841 kfree(fw_priv);
1842 }
1843
1844 -static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
1845 +static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
1846 {
1847 - struct firmware_priv *fw_priv = to_firmware_priv(dev);
1848 -
1849 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
1850 return -ENOMEM;
1851 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
1852 @@ -558,6 +556,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
1853 return 0;
1854 }
1855
1856 +static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
1857 +{
1858 + struct firmware_priv *fw_priv = to_firmware_priv(dev);
1859 + int err = 0;
1860 +
1861 + mutex_lock(&fw_lock);
1862 + if (fw_priv->buf)
1863 + err = do_firmware_uevent(fw_priv, env);
1864 + mutex_unlock(&fw_lock);
1865 + return err;
1866 +}
1867 +
1868 static struct class firmware_class = {
1869 .name = "firmware",
1870 .class_attrs = firmware_class_attrs,
1871 diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
1872 index 7fdd0172605a..c7b0fcebf168 100644
1873 --- a/drivers/base/power/clock_ops.c
1874 +++ b/drivers/base/power/clock_ops.c
1875 @@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
1876 return -ENOMEM;
1877 }
1878 } else {
1879 - if (IS_ERR(ce->clk) || !__clk_get(clk)) {
1880 + if (IS_ERR(clk) || !__clk_get(clk)) {
1881 kfree(ce);
1882 return -ENOENT;
1883 }
1884 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1885 index d7173cb1ea76..cef6fa83a274 100644
1886 --- a/drivers/block/loop.c
1887 +++ b/drivers/block/loop.c
1888 @@ -86,8 +86,6 @@ static DEFINE_MUTEX(loop_index_mutex);
1889 static int max_part;
1890 static int part_shift;
1891
1892 -static struct workqueue_struct *loop_wq;
1893 -
1894 static int transfer_xor(struct loop_device *lo, int cmd,
1895 struct page *raw_page, unsigned raw_off,
1896 struct page *loop_page, unsigned loop_off,
1897 @@ -725,6 +723,12 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
1898 size = get_loop_size(lo, file);
1899 if ((loff_t)(sector_t)size != size)
1900 goto out_putf;
1901 + error = -ENOMEM;
1902 + lo->wq = alloc_workqueue("kloopd%d",
1903 + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16,
1904 + lo->lo_number);
1905 + if (!lo->wq)
1906 + goto out_putf;
1907
1908 error = 0;
1909
1910 @@ -872,6 +876,8 @@ static int loop_clr_fd(struct loop_device *lo)
1911 lo->lo_flags = 0;
1912 if (!part_shift)
1913 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1914 + destroy_workqueue(lo->wq);
1915 + lo->wq = NULL;
1916 mutex_unlock(&lo->lo_ctl_mutex);
1917 /*
1918 * Need not hold lo_ctl_mutex to fput backing file.
1919 @@ -1425,9 +1431,13 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1920 const struct blk_mq_queue_data *bd)
1921 {
1922 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1923 + struct loop_device *lo = cmd->rq->q->queuedata;
1924
1925 blk_mq_start_request(bd->rq);
1926
1927 + if (lo->lo_state != Lo_bound)
1928 + return -EIO;
1929 +
1930 if (cmd->rq->cmd_flags & REQ_WRITE) {
1931 struct loop_device *lo = cmd->rq->q->queuedata;
1932 bool need_sched = true;
1933 @@ -1441,9 +1451,9 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1934 spin_unlock_irq(&lo->lo_lock);
1935
1936 if (need_sched)
1937 - queue_work(loop_wq, &lo->write_work);
1938 + queue_work(lo->wq, &lo->write_work);
1939 } else {
1940 - queue_work(loop_wq, &cmd->read_work);
1941 + queue_work(lo->wq, &cmd->read_work);
1942 }
1943
1944 return BLK_MQ_RQ_QUEUE_OK;
1945 @@ -1455,9 +1465,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1946 struct loop_device *lo = cmd->rq->q->queuedata;
1947 int ret = -EIO;
1948
1949 - if (lo->lo_state != Lo_bound)
1950 - goto failed;
1951 -
1952 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
1953 goto failed;
1954
1955 @@ -1806,13 +1813,6 @@ static int __init loop_init(void)
1956 goto misc_out;
1957 }
1958
1959 - loop_wq = alloc_workqueue("kloopd",
1960 - WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
1961 - if (!loop_wq) {
1962 - err = -ENOMEM;
1963 - goto misc_out;
1964 - }
1965 -
1966 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1967 THIS_MODULE, loop_probe, NULL, NULL);
1968
1969 @@ -1850,8 +1850,6 @@ static void __exit loop_exit(void)
1970 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1971 unregister_blkdev(LOOP_MAJOR, "loop");
1972
1973 - destroy_workqueue(loop_wq);
1974 -
1975 misc_deregister(&loop_misc);
1976 }
1977
1978 diff --git a/drivers/block/loop.h b/drivers/block/loop.h
1979 index 301c27f8323f..49564edf5581 100644
1980 --- a/drivers/block/loop.h
1981 +++ b/drivers/block/loop.h
1982 @@ -54,6 +54,7 @@ struct loop_device {
1983 gfp_t old_gfp_mask;
1984
1985 spinlock_t lo_lock;
1986 + struct workqueue_struct *wq;
1987 struct list_head write_cmd_head;
1988 struct work_struct write_work;
1989 bool write_started;
1990 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1991 index ec6c5c6e1ac9..53f253574abe 100644
1992 --- a/drivers/block/rbd.c
1993 +++ b/drivers/block/rbd.c
1994 @@ -2001,11 +2001,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1995 rbd_assert(obj_request_type_valid(type));
1996
1997 size = strlen(object_name) + 1;
1998 - name = kmalloc(size, GFP_KERNEL);
1999 + name = kmalloc(size, GFP_NOIO);
2000 if (!name)
2001 return NULL;
2002
2003 - obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
2004 + obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2005 if (!obj_request) {
2006 kfree(name);
2007 return NULL;
2008 diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
2009 index 4bba86677adc..3f146c9911c1 100644
2010 --- a/drivers/bluetooth/btbcm.c
2011 +++ b/drivers/bluetooth/btbcm.c
2012 @@ -378,12 +378,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
2013
2014 /* Read Verbose Config Version Info */
2015 skb = btbcm_read_verbose_config(hdev);
2016 - if (IS_ERR(skb))
2017 - return PTR_ERR(skb);
2018 -
2019 - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
2020 - get_unaligned_le16(skb->data + 5));
2021 - kfree_skb(skb);
2022 + if (!IS_ERR(skb)) {
2023 + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
2024 + get_unaligned_le16(skb->data + 5));
2025 + kfree_skb(skb);
2026 + }
2027
2028 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
2029
2030 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2031 index 420cc9f3eb76..c65501539224 100644
2032 --- a/drivers/bluetooth/btusb.c
2033 +++ b/drivers/bluetooth/btusb.c
2034 @@ -268,7 +268,7 @@ static const struct usb_device_id blacklist_table[] = {
2035 { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
2036
2037 /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
2038 - { USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
2039 + { USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
2040
2041 /* Digianswer devices */
2042 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
2043 @@ -1993,6 +1993,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
2044 }
2045 fw_ptr = fw->data;
2046
2047 + kfree_skb(skb);
2048 +
2049 /* This Intel specific command enables the manufacturer mode of the
2050 * controller.
2051 *
2052 @@ -2334,6 +2336,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2053 struct intel_boot_params *params;
2054 const struct firmware *fw;
2055 const u8 *fw_ptr;
2056 + u32 frag_len;
2057 char fwname[64];
2058 ktime_t calltime, delta, rettime;
2059 unsigned long long duration;
2060 @@ -2540,24 +2543,33 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2061 }
2062
2063 fw_ptr = fw->data + 644;
2064 + frag_len = 0;
2065
2066 while (fw_ptr - fw->data < fw->size) {
2067 - struct hci_command_hdr *cmd = (void *)fw_ptr;
2068 - u8 cmd_len;
2069 + struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
2070
2071 - cmd_len = sizeof(*cmd) + cmd->plen;
2072 + frag_len += sizeof(*cmd) + cmd->plen;
2073
2074 - /* Send each command from the firmware data buffer as
2075 - * a single Data fragment.
2076 + /* The paramter length of the secure send command requires
2077 + * a 4 byte alignment. It happens so that the firmware file
2078 + * contains proper Intel_NOP commands to align the fragments
2079 + * as needed.
2080 + *
2081 + * Send set of commands with 4 byte alignment from the
2082 + * firmware data buffer as a single Data fragement.
2083 */
2084 - err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
2085 - if (err < 0) {
2086 - BT_ERR("%s: Failed to send firmware data (%d)",
2087 - hdev->name, err);
2088 - goto done;
2089 - }
2090 + if (!(frag_len % 4)) {
2091 + err = btusb_intel_secure_send(hdev, 0x01, frag_len,
2092 + fw_ptr);
2093 + if (err < 0) {
2094 + BT_ERR("%s: Failed to send firmware data (%d)",
2095 + hdev->name, err);
2096 + goto done;
2097 + }
2098
2099 - fw_ptr += cmd_len;
2100 + fw_ptr += frag_len;
2101 + frag_len = 0;
2102 + }
2103 }
2104
2105 set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
2106 diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
2107 index aaa0f2a87118..60397ec77ff7 100644
2108 --- a/drivers/bus/arm-ccn.c
2109 +++ b/drivers/bus/arm-ccn.c
2110 @@ -212,7 +212,7 @@ static int arm_ccn_node_to_xp_port(int node)
2111
2112 static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
2113 {
2114 - *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
2115 + *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
2116 *config |= (node_xp << 0) | (type << 8) | (port << 24);
2117 }
2118
2119 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
2120 index 0b4188b9af7c..c6dea3f6917b 100644
2121 --- a/drivers/char/agp/intel-gtt.c
2122 +++ b/drivers/char/agp/intel-gtt.c
2123 @@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void)
2124 /* Query intel_iommu to see if we need the workaround. Presumably that
2125 * was loaded first.
2126 */
2127 - if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
2128 + if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
2129 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
2130 intel_iommu_gfx_mapped)
2131 return 1;
2132 diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
2133 index 283f00a7f036..1082d4bb016a 100644
2134 --- a/drivers/char/tpm/tpm-chip.c
2135 +++ b/drivers/char/tpm/tpm-chip.c
2136 @@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
2137
2138 device_initialize(&chip->dev);
2139
2140 - chip->cdev.owner = chip->pdev->driver->owner;
2141 cdev_init(&chip->cdev, &tpm_fops);
2142 + chip->cdev.owner = chip->pdev->driver->owner;
2143 + chip->cdev.kobj.parent = &chip->dev.kobj;
2144
2145 return chip;
2146 }
2147 diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
2148 index b26ceee3585e..1267322595da 100644
2149 --- a/drivers/char/tpm/tpm_crb.c
2150 +++ b/drivers/char/tpm/tpm_crb.c
2151 @@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
2152 return -ENODEV;
2153 }
2154
2155 + /* At least some versions of AMI BIOS have a bug that TPM2 table has
2156 + * zero address for the control area and therefore we must fail.
2157 + */
2158 + if (!buf->control_area_pa) {
2159 + dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
2160 + return -EINVAL;
2161 + }
2162 +
2163 if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
2164 dev_err(dev, "TPM2 ACPI table has wrong size");
2165 return -EINVAL;
2166 @@ -267,7 +275,7 @@ static int crb_acpi_add(struct acpi_device *device)
2167
2168 memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
2169 pa = le64_to_cpu(pa);
2170 - priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa),
2171 + priv->cmd = devm_ioremap_nocache(dev, pa,
2172 ioread32(&priv->cca->cmd_size));
2173 if (!priv->cmd) {
2174 dev_err(dev, "ioremap of the command buffer failed\n");
2175 @@ -276,7 +284,7 @@ static int crb_acpi_add(struct acpi_device *device)
2176
2177 memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
2178 pa = le64_to_cpu(pa);
2179 - priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa),
2180 + priv->rsp = devm_ioremap_nocache(dev, pa,
2181 ioread32(&priv->cca->rsp_size));
2182 if (!priv->rsp) {
2183 dev_err(dev, "ioremap of the response buffer failed\n");
2184 diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
2185 index 42ffa5e7a1e0..27ebf9511cb4 100644
2186 --- a/drivers/char/tpm/tpm_ibmvtpm.c
2187 +++ b/drivers/char/tpm/tpm_ibmvtpm.c
2188 @@ -578,6 +578,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
2189 goto cleanup;
2190 }
2191
2192 + ibmvtpm->dev = dev;
2193 + ibmvtpm->vdev = vio_dev;
2194 +
2195 crq_q = &ibmvtpm->crq_queue;
2196 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
2197 if (!crq_q->crq_addr) {
2198 @@ -622,8 +625,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
2199
2200 crq_q->index = 0;
2201
2202 - ibmvtpm->dev = dev;
2203 - ibmvtpm->vdev = vio_dev;
2204 TPM_VPRIV(chip) = (void *)ibmvtpm;
2205
2206 spin_lock_init(&ibmvtpm->rtce_lock);
2207 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
2208 index 5b0f41868b42..9f9cadd00bc8 100644
2209 --- a/drivers/clk/clk.c
2210 +++ b/drivers/clk/clk.c
2211 @@ -230,11 +230,12 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2212 if (!c)
2213 return;
2214
2215 + /* This should be JSON format, i.e. elements separated with a comma */
2216 seq_printf(s, "\"%s\": { ", c->name);
2217 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2218 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2219 - seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
2220 - seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
2221 + seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2222 + seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2223 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2224 }
2225
2226 diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
2227 index b95d17fbb8d7..92936f0912d2 100644
2228 --- a/drivers/clk/qcom/clk-rcg2.c
2229 +++ b/drivers/clk/qcom/clk-rcg2.c
2230 @@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
2231 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
2232 struct freq_tbl f = *rcg->freq_tbl;
2233 const struct frac_entry *frac = frac_table_pixel;
2234 - unsigned long request, src_rate;
2235 + unsigned long request;
2236 int delta = 100000;
2237 u32 mask = BIT(rcg->hid_width) - 1;
2238 u32 hid_div;
2239 - int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
2240 - struct clk *parent = clk_get_parent_by_index(hw->clk, index);
2241
2242 for (; frac->num; frac++) {
2243 request = (rate * frac->den) / frac->num;
2244
2245 - src_rate = __clk_round_rate(parent, request);
2246 - if ((src_rate < (request - delta)) ||
2247 - (src_rate > (request + delta)))
2248 + if ((parent_rate < (request - delta)) ||
2249 + (parent_rate > (request + delta)))
2250 continue;
2251
2252 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
2253 diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
2254 index d86bc46b93bd..0a1df821860f 100644
2255 --- a/drivers/clk/ti/clk-dra7-atl.c
2256 +++ b/drivers/clk/ti/clk-dra7-atl.c
2257 @@ -252,6 +252,11 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
2258 }
2259
2260 clk = of_clk_get_from_provider(&clkspec);
2261 + if (IS_ERR(clk)) {
2262 + pr_err("%s: failed to get atl clock %d from provider\n",
2263 + __func__, i);
2264 + return PTR_ERR(clk);
2265 + }
2266
2267 cdesc = to_atl_desc(__clk_get_hw(clk));
2268 cdesc->cinfo = cinfo;
2269 diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
2270 index 83564c9cfdbe..c844616028d2 100644
2271 --- a/drivers/clocksource/exynos_mct.c
2272 +++ b/drivers/clocksource/exynos_mct.c
2273 @@ -466,15 +466,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
2274 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
2275
2276 if (mct_int_type == MCT_INT_SPI) {
2277 - evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
2278 - if (request_irq(evt->irq, exynos4_mct_tick_isr,
2279 - IRQF_TIMER | IRQF_NOBALANCING,
2280 - evt->name, mevt)) {
2281 - pr_err("exynos-mct: cannot register IRQ %d\n",
2282 - evt->irq);
2283 +
2284 + if (evt->irq == -1)
2285 return -EIO;
2286 - }
2287 - irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
2288 +
2289 + irq_force_affinity(evt->irq, cpumask_of(cpu));
2290 + enable_irq(evt->irq);
2291 } else {
2292 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
2293 }
2294 @@ -487,10 +484,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
2295 static void exynos4_local_timer_stop(struct clock_event_device *evt)
2296 {
2297 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
2298 - if (mct_int_type == MCT_INT_SPI)
2299 - free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
2300 - else
2301 + if (mct_int_type == MCT_INT_SPI) {
2302 + if (evt->irq != -1)
2303 + disable_irq_nosync(evt->irq);
2304 + } else {
2305 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
2306 + }
2307 }
2308
2309 static int exynos4_mct_cpu_notify(struct notifier_block *self,
2310 @@ -522,7 +521,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
2311
2312 static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
2313 {
2314 - int err;
2315 + int err, cpu;
2316 struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
2317 struct clk *mct_clk, *tick_clk;
2318
2319 @@ -549,7 +548,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
2320 WARN(err, "MCT: can't request IRQ %d (%d)\n",
2321 mct_irqs[MCT_L0_IRQ], err);
2322 } else {
2323 - irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
2324 + for_each_possible_cpu(cpu) {
2325 + int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
2326 + struct mct_clock_event_device *pcpu_mevt =
2327 + per_cpu_ptr(&percpu_mct_tick, cpu);
2328 +
2329 + pcpu_mevt->evt.irq = -1;
2330 +
2331 + irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
2332 + if (request_irq(mct_irq,
2333 + exynos4_mct_tick_isr,
2334 + IRQF_TIMER | IRQF_NOBALANCING,
2335 + pcpu_mevt->name, pcpu_mevt)) {
2336 + pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
2337 + cpu);
2338 +
2339 + continue;
2340 + }
2341 + pcpu_mevt->evt.irq = mct_irq;
2342 + }
2343 }
2344
2345 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
2346 diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
2347 index 1c56001df676..50f1b422dee3 100644
2348 --- a/drivers/dma/mv_xor.c
2349 +++ b/drivers/dma/mv_xor.c
2350 @@ -273,7 +273,8 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
2351 dma_cookie_t cookie = 0;
2352 int busy = mv_chan_is_busy(mv_chan);
2353 u32 current_desc = mv_chan_get_current_desc(mv_chan);
2354 - int seen_current = 0;
2355 + int current_cleaned = 0;
2356 + struct mv_xor_desc *hw_desc;
2357
2358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
2359 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
2360 @@ -285,38 +286,57 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
2361
2362 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
2363 chain_node) {
2364 - prefetch(_iter);
2365 - prefetch(&_iter->async_tx);
2366
2367 - /* do not advance past the current descriptor loaded into the
2368 - * hardware channel, subsequent descriptors are either in
2369 - * process or have not been submitted
2370 - */
2371 - if (seen_current)
2372 - break;
2373 + /* clean finished descriptors */
2374 + hw_desc = iter->hw_desc;
2375 + if (hw_desc->status & XOR_DESC_SUCCESS) {
2376 + cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
2377 + cookie);
2378
2379 - /* stop the search if we reach the current descriptor and the
2380 - * channel is busy
2381 - */
2382 - if (iter->async_tx.phys == current_desc) {
2383 - seen_current = 1;
2384 - if (busy)
2385 + /* done processing desc, clean slot */
2386 + mv_xor_clean_slot(iter, mv_chan);
2387 +
2388 + /* break if we did cleaned the current */
2389 + if (iter->async_tx.phys == current_desc) {
2390 + current_cleaned = 1;
2391 + break;
2392 + }
2393 + } else {
2394 + if (iter->async_tx.phys == current_desc) {
2395 + current_cleaned = 0;
2396 break;
2397 + }
2398 }
2399 -
2400 - cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
2401 -
2402 - if (mv_xor_clean_slot(iter, mv_chan))
2403 - break;
2404 }
2405
2406 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
2407 - struct mv_xor_desc_slot *chain_head;
2408 - chain_head = list_entry(mv_chan->chain.next,
2409 - struct mv_xor_desc_slot,
2410 - chain_node);
2411 -
2412 - mv_xor_start_new_chain(mv_chan, chain_head);
2413 + if (current_cleaned) {
2414 + /*
2415 + * current descriptor cleaned and removed, run
2416 + * from list head
2417 + */
2418 + iter = list_entry(mv_chan->chain.next,
2419 + struct mv_xor_desc_slot,
2420 + chain_node);
2421 + mv_xor_start_new_chain(mv_chan, iter);
2422 + } else {
2423 + if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
2424 + /*
2425 + * descriptors are still waiting after
2426 + * current, trigger them
2427 + */
2428 + iter = list_entry(iter->chain_node.next,
2429 + struct mv_xor_desc_slot,
2430 + chain_node);
2431 + mv_xor_start_new_chain(mv_chan, iter);
2432 + } else {
2433 + /*
2434 + * some descriptors are still waiting
2435 + * to be cleaned
2436 + */
2437 + tasklet_schedule(&mv_chan->irq_tasklet);
2438 + }
2439 + }
2440 }
2441
2442 if (cookie > 0)
2443 diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
2444 index 91958dba39a2..0e302b3a33ad 100644
2445 --- a/drivers/dma/mv_xor.h
2446 +++ b/drivers/dma/mv_xor.h
2447 @@ -31,6 +31,7 @@
2448 #define XOR_OPERATION_MODE_XOR 0
2449 #define XOR_OPERATION_MODE_MEMCPY 2
2450 #define XOR_DESCRIPTOR_SWAP BIT(14)
2451 +#define XOR_DESC_SUCCESS 0x40000000
2452
2453 #define XOR_DESC_DMA_OWNED BIT(31)
2454 #define XOR_DESC_EOD_INT_EN BIT(31)
2455 diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
2456 index 7e98084d3645..afea7fc625cc 100644
2457 --- a/drivers/edac/octeon_edac-l2c.c
2458 +++ b/drivers/edac/octeon_edac-l2c.c
2459 @@ -151,7 +151,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
2460 l2c->ctl_name = "octeon_l2c_err";
2461
2462
2463 - if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
2464 + if (OCTEON_IS_OCTEON1PLUS()) {
2465 union cvmx_l2t_err l2t_err;
2466 union cvmx_l2d_err l2d_err;
2467
2468 diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
2469 index bb19e0732681..cda6dab5067a 100644
2470 --- a/drivers/edac/octeon_edac-lmc.c
2471 +++ b/drivers/edac/octeon_edac-lmc.c
2472 @@ -234,7 +234,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
2473 layers[0].size = 1;
2474 layers[0].is_virt_csrow = false;
2475
2476 - if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
2477 + if (OCTEON_IS_OCTEON1PLUS()) {
2478 union cvmx_lmcx_mem_cfg0 cfg0;
2479
2480 cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
2481 diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
2482 index 0f83c33a7d1f..2ab6cf24c959 100644
2483 --- a/drivers/edac/octeon_edac-pc.c
2484 +++ b/drivers/edac/octeon_edac-pc.c
2485 @@ -73,7 +73,7 @@ static int co_cache_error_event(struct notifier_block *this,
2486 edac_device_handle_ce(p->ed, cpu, 0, "dcache");
2487
2488 /* Clear the error indication */
2489 - if (OCTEON_IS_MODEL(OCTEON_FAM_2))
2490 + if (OCTEON_IS_OCTEON2())
2491 write_octeon_c0_dcacheerr(1);
2492 else
2493 write_octeon_c0_dcacheerr(0);
2494 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
2495 index 97b1616aa391..bba843c2b0ac 100644
2496 --- a/drivers/firmware/dmi_scan.c
2497 +++ b/drivers/firmware/dmi_scan.c
2498 @@ -89,9 +89,9 @@ static void dmi_table(u8 *buf,
2499
2500 /*
2501 * Stop when we have seen all the items the table claimed to have
2502 - * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
2503 - * off the end of the table (should never happen but sometimes does
2504 - * on bogus implementations.)
2505 + * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
2506 + * >= 3.0 only) OR we run off the end of the table (should never
2507 + * happen but sometimes does on bogus implementations.)
2508 */
2509 while ((!dmi_num || i < dmi_num) &&
2510 (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
2511 @@ -110,8 +110,13 @@ static void dmi_table(u8 *buf,
2512
2513 /*
2514 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
2515 + * For tables behind a 64-bit entry point, we have no item
2516 + * count and no exact table length, so stop on end-of-table
2517 + * marker. For tables behind a 32-bit entry point, we have
2518 + * seen OEM structures behind the end-of-table marker on
2519 + * some systems, so don't trust it.
2520 */
2521 - if (dm->type == DMI_ENTRY_END_OF_TABLE)
2522 + if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
2523 break;
2524
2525 data += 2;
2526 diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
2527 index 9d2f053382e1..63a09e4079f3 100644
2528 --- a/drivers/gpu/drm/bridge/ptn3460.c
2529 +++ b/drivers/gpu/drm/bridge/ptn3460.c
2530 @@ -15,6 +15,7 @@
2531
2532 #include <linux/delay.h>
2533 #include <linux/gpio.h>
2534 +#include <linux/gpio/consumer.h>
2535 #include <linux/i2c.h>
2536 #include <linux/module.h>
2537 #include <linux/of.h>
2538 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
2539 index 3007b44e6bf4..800a025dd062 100644
2540 --- a/drivers/gpu/drm/drm_crtc.c
2541 +++ b/drivers/gpu/drm/drm_crtc.c
2542 @@ -2749,8 +2749,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2543 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2544 return -EINVAL;
2545
2546 - /* For some reason crtc x/y offsets are signed internally. */
2547 - if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
2548 + /*
2549 + * Universal plane src offsets are only 16.16, prevent havoc for
2550 + * drivers using universal plane code internally.
2551 + */
2552 + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
2553 return -ERANGE;
2554
2555 drm_modeset_lock_all(dev);
2556 @@ -5048,12 +5051,9 @@ void drm_mode_config_reset(struct drm_device *dev)
2557 if (encoder->funcs->reset)
2558 encoder->funcs->reset(encoder);
2559
2560 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2561 - connector->status = connector_status_unknown;
2562 -
2563 + list_for_each_entry(connector, &dev->mode_config.connector_list, head)
2564 if (connector->funcs->reset)
2565 connector->funcs->reset(connector);
2566 - }
2567 }
2568 EXPORT_SYMBOL(drm_mode_config_reset);
2569
2570 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2571 index 132581ca4ad8..778bbb6425b8 100644
2572 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
2573 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2574 @@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct kref *kref)
2575 port->vcpi.num_slots = 0;
2576
2577 kfree(port->cached_edid);
2578 - if (port->connector)
2579 - (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
2580 +
2581 + /* we can't destroy the connector here, as
2582 + we might be holding the mode_config.mutex
2583 + from an EDID retrieval */
2584 + if (port->connector) {
2585 + mutex_lock(&mgr->destroy_connector_lock);
2586 + list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
2587 + mutex_unlock(&mgr->destroy_connector_lock);
2588 + schedule_work(&mgr->destroy_connector_work);
2589 + }
2590 drm_dp_port_teardown_pdt(port, port->pdt);
2591
2592 if (!port->input && port->vcpi.vcpi > 0)
2593 @@ -1163,6 +1171,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
2594 struct drm_dp_mst_port *port;
2595 int i;
2596 /* find the port by iterating down */
2597 +
2598 + mutex_lock(&mgr->lock);
2599 mstb = mgr->mst_primary;
2600
2601 for (i = 0; i < lct - 1; i++) {
2602 @@ -1182,6 +1192,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
2603 }
2604 }
2605 kref_get(&mstb->kref);
2606 + mutex_unlock(&mgr->lock);
2607 return mstb;
2608 }
2609
2610 @@ -1189,7 +1200,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
2611 struct drm_dp_mst_branch *mstb)
2612 {
2613 struct drm_dp_mst_port *port;
2614 -
2615 + struct drm_dp_mst_branch *mstb_child;
2616 if (!mstb->link_address_sent) {
2617 drm_dp_send_link_address(mgr, mstb);
2618 mstb->link_address_sent = true;
2619 @@ -1204,17 +1215,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
2620 if (!port->available_pbn)
2621 drm_dp_send_enum_path_resources(mgr, mstb, port);
2622
2623 - if (port->mstb)
2624 - drm_dp_check_and_send_link_address(mgr, port->mstb);
2625 + if (port->mstb) {
2626 + mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
2627 + if (mstb_child) {
2628 + drm_dp_check_and_send_link_address(mgr, mstb_child);
2629 + drm_dp_put_mst_branch_device(mstb_child);
2630 + }
2631 + }
2632 }
2633 }
2634
2635 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2636 {
2637 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2638 + struct drm_dp_mst_branch *mstb;
2639
2640 - drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
2641 -
2642 + mutex_lock(&mgr->lock);
2643 + mstb = mgr->mst_primary;
2644 + if (mstb) {
2645 + kref_get(&mstb->kref);
2646 + }
2647 + mutex_unlock(&mgr->lock);
2648 + if (mstb) {
2649 + drm_dp_check_and_send_link_address(mgr, mstb);
2650 + drm_dp_put_mst_branch_device(mstb);
2651 + }
2652 }
2653
2654 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2655 @@ -2632,6 +2657,30 @@ static void drm_dp_tx_work(struct work_struct *work)
2656 mutex_unlock(&mgr->qlock);
2657 }
2658
2659 +static void drm_dp_destroy_connector_work(struct work_struct *work)
2660 +{
2661 + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2662 + struct drm_connector *connector;
2663 +
2664 + /*
2665 + * Not a regular list traverse as we have to drop the destroy
2666 + * connector lock before destroying the connector, to avoid AB->BA
2667 + * ordering between this lock and the config mutex.
2668 + */
2669 + for (;;) {
2670 + mutex_lock(&mgr->destroy_connector_lock);
2671 + connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
2672 + if (!connector) {
2673 + mutex_unlock(&mgr->destroy_connector_lock);
2674 + break;
2675 + }
2676 + list_del(&connector->destroy_list);
2677 + mutex_unlock(&mgr->destroy_connector_lock);
2678 +
2679 + mgr->cbs->destroy_connector(mgr, connector);
2680 + }
2681 +}
2682 +
2683 /**
2684 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2685 * @mgr: manager struct to initialise
2686 @@ -2651,10 +2700,13 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2687 mutex_init(&mgr->lock);
2688 mutex_init(&mgr->qlock);
2689 mutex_init(&mgr->payload_lock);
2690 + mutex_init(&mgr->destroy_connector_lock);
2691 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2692 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2693 + INIT_LIST_HEAD(&mgr->destroy_connector_list);
2694 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2695 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2696 + INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
2697 init_waitqueue_head(&mgr->tx_waitq);
2698 mgr->dev = dev;
2699 mgr->aux = aux;
2700 @@ -2679,6 +2731,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2701 */
2702 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2703 {
2704 + flush_work(&mgr->destroy_connector_work);
2705 mutex_lock(&mgr->payload_lock);
2706 kfree(mgr->payloads);
2707 mgr->payloads = NULL;
2708 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
2709 index aa8bbb460c57..9cfcd0aef0df 100644
2710 --- a/drivers/gpu/drm/drm_ioc32.c
2711 +++ b/drivers/gpu/drm/drm_ioc32.c
2712 @@ -70,6 +70,8 @@
2713
2714 #define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
2715
2716 +#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
2717 +
2718 typedef struct drm_version_32 {
2719 int version_major; /**< Major version */
2720 int version_minor; /**< Minor version */
2721 @@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
2722 return 0;
2723 }
2724
2725 +typedef struct drm_mode_fb_cmd232 {
2726 + u32 fb_id;
2727 + u32 width;
2728 + u32 height;
2729 + u32 pixel_format;
2730 + u32 flags;
2731 + u32 handles[4];
2732 + u32 pitches[4];
2733 + u32 offsets[4];
2734 + u64 modifier[4];
2735 +} __attribute__((packed)) drm_mode_fb_cmd232_t;
2736 +
2737 +static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
2738 + unsigned long arg)
2739 +{
2740 + struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
2741 + struct drm_mode_fb_cmd232 req32;
2742 + struct drm_mode_fb_cmd2 __user *req64;
2743 + int i;
2744 + int err;
2745 +
2746 + if (copy_from_user(&req32, argp, sizeof(req32)))
2747 + return -EFAULT;
2748 +
2749 + req64 = compat_alloc_user_space(sizeof(*req64));
2750 +
2751 + if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
2752 + || __put_user(req32.width, &req64->width)
2753 + || __put_user(req32.height, &req64->height)
2754 + || __put_user(req32.pixel_format, &req64->pixel_format)
2755 + || __put_user(req32.flags, &req64->flags))
2756 + return -EFAULT;
2757 +
2758 + for (i = 0; i < 4; i++) {
2759 + if (__put_user(req32.handles[i], &req64->handles[i]))
2760 + return -EFAULT;
2761 + if (__put_user(req32.pitches[i], &req64->pitches[i]))
2762 + return -EFAULT;
2763 + if (__put_user(req32.offsets[i], &req64->offsets[i]))
2764 + return -EFAULT;
2765 + if (__put_user(req32.modifier[i], &req64->modifier[i]))
2766 + return -EFAULT;
2767 + }
2768 +
2769 + err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
2770 + if (err)
2771 + return err;
2772 +
2773 + if (__get_user(req32.fb_id, &req64->fb_id))
2774 + return -EFAULT;
2775 +
2776 + if (copy_to_user(argp, &req32, sizeof(req32)))
2777 + return -EFAULT;
2778 +
2779 + return 0;
2780 +}
2781 +
2782 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
2783 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
2784 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
2785 @@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
2786 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
2787 #endif
2788 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
2789 + [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
2790 };
2791
2792 /**
2793 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2794 index 2d0995e7afc3..596bce56e379 100644
2795 --- a/drivers/gpu/drm/i915/i915_gem.c
2796 +++ b/drivers/gpu/drm/i915/i915_gem.c
2797 @@ -2401,6 +2401,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2798 }
2799
2800 request->emitted_jiffies = jiffies;
2801 + ring->last_submitted_seqno = request->seqno;
2802 list_add_tail(&request->list, &ring->request_list);
2803 request->file_priv = NULL;
2804
2805 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
2806 index 0239fbff7bf7..ad90fa3045e5 100644
2807 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
2808 +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
2809 @@ -502,17 +502,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
2810 struct page *page_table;
2811
2812 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
2813 - continue;
2814 + break;
2815
2816 pd = ppgtt->pdp.page_directory[pdpe];
2817
2818 if (WARN_ON(!pd->page_table[pde]))
2819 - continue;
2820 + break;
2821
2822 pt = pd->page_table[pde];
2823
2824 if (WARN_ON(!pt->page))
2825 - continue;
2826 + break;
2827
2828 page_table = pt->page;
2829
2830 diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
2831 index 176de6322e4d..23aa04cded6b 100644
2832 --- a/drivers/gpu/drm/i915/i915_ioc32.c
2833 +++ b/drivers/gpu/drm/i915/i915_ioc32.c
2834 @@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2835 drm_ioctl_compat_t *fn = NULL;
2836 int ret;
2837
2838 - if (nr < DRM_COMMAND_BASE)
2839 + if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
2840 return drm_compat_ioctl(filp, cmd, arg);
2841
2842 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
2843 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2844 index 6d494432b19f..b0df8d10482a 100644
2845 --- a/drivers/gpu/drm/i915/i915_irq.c
2846 +++ b/drivers/gpu/drm/i915/i915_irq.c
2847 @@ -2650,18 +2650,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2848 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2849 }
2850
2851 -static struct drm_i915_gem_request *
2852 -ring_last_request(struct intel_engine_cs *ring)
2853 -{
2854 - return list_entry(ring->request_list.prev,
2855 - struct drm_i915_gem_request, list);
2856 -}
2857 -
2858 static bool
2859 -ring_idle(struct intel_engine_cs *ring)
2860 +ring_idle(struct intel_engine_cs *ring, u32 seqno)
2861 {
2862 return (list_empty(&ring->request_list) ||
2863 - i915_gem_request_completed(ring_last_request(ring), false));
2864 + i915_seqno_passed(seqno, ring->last_submitted_seqno));
2865 }
2866
2867 static bool
2868 @@ -2883,7 +2876,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
2869 acthd = intel_ring_get_active_head(ring);
2870
2871 if (ring->hangcheck.seqno == seqno) {
2872 - if (ring_idle(ring)) {
2873 + if (ring_idle(ring, seqno)) {
2874 ring->hangcheck.action = HANGCHECK_IDLE;
2875
2876 if (waitqueue_active(&ring->irq_queue)) {
2877 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2878 index 773d1d24e604..a30db4b4050e 100644
2879 --- a/drivers/gpu/drm/i915/i915_reg.h
2880 +++ b/drivers/gpu/drm/i915/i915_reg.h
2881 @@ -3209,6 +3209,7 @@ enum skl_disp_power_wells {
2882 #define BLM_POLARITY_PNV (1 << 0) /* pnv only */
2883
2884 #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
2885 +#define BLM_HISTOGRAM_ENABLE (1 << 31)
2886
2887 /* New registers for PCH-split platforms. Safe where new bits show up, the
2888 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
2889 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2890 index d0f3cbc87474..57c887843dc3 100644
2891 --- a/drivers/gpu/drm/i915/intel_display.c
2892 +++ b/drivers/gpu/drm/i915/intel_display.c
2893 @@ -12499,6 +12499,16 @@ intel_check_primary_plane(struct drm_plane *plane,
2894 intel_crtc->atomic.wait_vblank = true;
2895 }
2896
2897 + /*
2898 + * FIXME: Actually if we will still have any other plane enabled
2899 + * on the pipe we could let IPS enabled still, but for
2900 + * now lets consider that when we make primary invisible
2901 + * by setting DSPCNTR to 0 on update_primary_plane function
2902 + * IPS needs to be disable.
2903 + */
2904 + if (!state->visible || !fb)
2905 + intel_crtc->atomic.disable_ips = true;
2906 +
2907 intel_crtc->atomic.fb_bits |=
2908 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2909
2910 @@ -12590,6 +12600,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
2911 if (intel_crtc->atomic.disable_fbc)
2912 intel_fbc_disable(dev);
2913
2914 + if (intel_crtc->atomic.disable_ips)
2915 + hsw_disable_ips(intel_crtc);
2916 +
2917 if (intel_crtc->atomic.pre_disable_primary)
2918 intel_pre_disable_primary(crtc);
2919
2920 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2921 index 897f17db08af..68d1f74a7403 100644
2922 --- a/drivers/gpu/drm/i915/intel_drv.h
2923 +++ b/drivers/gpu/drm/i915/intel_drv.h
2924 @@ -424,6 +424,7 @@ struct intel_crtc_atomic_commit {
2925 /* Sleepable operations to perform before commit */
2926 bool wait_for_flips;
2927 bool disable_fbc;
2928 + bool disable_ips;
2929 bool pre_disable_primary;
2930 bool update_wm;
2931 unsigned disabled_planes;
2932 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
2933 index 08532d4ffe0a..2bf92cba4a55 100644
2934 --- a/drivers/gpu/drm/i915/intel_panel.c
2935 +++ b/drivers/gpu/drm/i915/intel_panel.c
2936 @@ -879,6 +879,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
2937
2938 /* XXX: combine this into above write? */
2939 intel_panel_actually_set_backlight(connector, panel->backlight.level);
2940 +
2941 + /*
2942 + * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
2943 + * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
2944 + * that has backlight.
2945 + */
2946 + if (IS_GEN2(dev))
2947 + I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
2948 }
2949
2950 static void i965_enable_backlight(struct intel_connector *connector)
2951 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
2952 index c761fe05ad6f..94514d364d25 100644
2953 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
2954 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
2955 @@ -266,6 +266,13 @@ struct intel_engine_cs {
2956 * Do we have some not yet emitted requests outstanding?
2957 */
2958 struct drm_i915_gem_request *outstanding_lazy_request;
2959 + /**
2960 + * Seqno of request most recently submitted to request_list.
2961 + * Used exclusively by hang checker to avoid grabbing lock while
2962 + * inspecting request list.
2963 + */
2964 + u32 last_submitted_seqno;
2965 +
2966 bool gpu_caches_dirty;
2967
2968 wait_queue_head_t irq_queue;
2969 diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
2970 index ff2a74651dd4..a18807ec8371 100644
2971 --- a/drivers/gpu/drm/i915/intel_uncore.c
2972 +++ b/drivers/gpu/drm/i915/intel_uncore.c
2973 @@ -1220,10 +1220,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
2974 struct drm_i915_private *dev_priv = dev->dev_private;
2975 struct drm_i915_reg_read *reg = data;
2976 struct register_whitelist const *entry = whitelist;
2977 + unsigned size;
2978 + u64 offset;
2979 int i, ret = 0;
2980
2981 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
2982 - if (entry->offset == reg->offset &&
2983 + if (entry->offset == (reg->offset & -entry->size) &&
2984 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
2985 break;
2986 }
2987 @@ -1231,23 +1233,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
2988 if (i == ARRAY_SIZE(whitelist))
2989 return -EINVAL;
2990
2991 + /* We use the low bits to encode extra flags as the register should
2992 + * be naturally aligned (and those that are not so aligned merely
2993 + * limit the available flags for that register).
2994 + */
2995 + offset = entry->offset;
2996 + size = entry->size;
2997 + size |= reg->offset ^ offset;
2998 +
2999 intel_runtime_pm_get(dev_priv);
3000
3001 - switch (entry->size) {
3002 + switch (size) {
3003 + case 8 | 1:
3004 + reg->val = I915_READ64_2x32(offset, offset+4);
3005 + break;
3006 case 8:
3007 - reg->val = I915_READ64(reg->offset);
3008 + reg->val = I915_READ64(offset);
3009 break;
3010 case 4:
3011 - reg->val = I915_READ(reg->offset);
3012 + reg->val = I915_READ(offset);
3013 break;
3014 case 2:
3015 - reg->val = I915_READ16(reg->offset);
3016 + reg->val = I915_READ16(offset);
3017 break;
3018 case 1:
3019 - reg->val = I915_READ8(reg->offset);
3020 + reg->val = I915_READ8(offset);
3021 break;
3022 default:
3023 - MISSING_CASE(entry->size);
3024 ret = -EINVAL;
3025 goto out;
3026 }
3027 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
3028 index 97823644d347..f33251d67914 100644
3029 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
3030 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
3031 @@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
3032
3033 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
3034 cmd->type = QXL_SURFACE_CMD_CREATE;
3035 + cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
3036 cmd->u.surface_create.format = surf->surf.format;
3037 cmd->u.surface_create.width = surf->surf.width;
3038 cmd->u.surface_create.height = surf->surf.height;
3039 diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
3040 index b110883f8253..7354a4cda59d 100644
3041 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
3042 +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
3043 @@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
3044 qobj = gem_to_qxl_bo(gobj);
3045
3046 ret = qxl_release_list_add(release, qobj);
3047 - if (ret)
3048 + if (ret) {
3049 + drm_gem_object_unreference_unlocked(gobj);
3050 return NULL;
3051 + }
3052
3053 return qobj;
3054 }
3055 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
3056 index 8730562323a8..4a09947be244 100644
3057 --- a/drivers/gpu/drm/radeon/ci_dpm.c
3058 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
3059 @@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
3060 tmp |= DPM_ENABLED;
3061 break;
3062 default:
3063 - DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
3064 + DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
3065 break;
3066 }
3067 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
3068 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
3069 index ba50f3c1c2e0..845665362475 100644
3070 --- a/drivers/gpu/drm/radeon/cik.c
3071 +++ b/drivers/gpu/drm/radeon/cik.c
3072 @@ -4579,6 +4579,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
3073 WDOORBELL32(ring->doorbell_index, ring->wptr);
3074 }
3075
3076 +static void cik_compute_stop(struct radeon_device *rdev,
3077 + struct radeon_ring *ring)
3078 +{
3079 + u32 j, tmp;
3080 +
3081 + cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
3082 + /* Disable wptr polling. */
3083 + tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
3084 + tmp &= ~WPTR_POLL_EN;
3085 + WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
3086 + /* Disable HQD. */
3087 + if (RREG32(CP_HQD_ACTIVE) & 1) {
3088 + WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
3089 + for (j = 0; j < rdev->usec_timeout; j++) {
3090 + if (!(RREG32(CP_HQD_ACTIVE) & 1))
3091 + break;
3092 + udelay(1);
3093 + }
3094 + WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
3095 + WREG32(CP_HQD_PQ_RPTR, 0);
3096 + WREG32(CP_HQD_PQ_WPTR, 0);
3097 + }
3098 + cik_srbm_select(rdev, 0, 0, 0, 0);
3099 +}
3100 +
3101 /**
3102 * cik_cp_compute_enable - enable/disable the compute CP MEs
3103 *
3104 @@ -4592,6 +4617,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
3105 if (enable)
3106 WREG32(CP_MEC_CNTL, 0);
3107 else {
3108 + /*
3109 + * To make hibernation reliable we need to clear compute ring
3110 + * configuration before halting the compute ring.
3111 + */
3112 + mutex_lock(&rdev->srbm_mutex);
3113 + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3114 + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3115 + mutex_unlock(&rdev->srbm_mutex);
3116 +
3117 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
3118 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3119 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3120 @@ -7905,23 +7939,27 @@ restart_ih:
3121 case 1: /* D1 vblank/vline */
3122 switch (src_data) {
3123 case 0: /* D1 vblank */
3124 - if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
3125 - if (rdev->irq.crtc_vblank_int[0]) {
3126 - drm_handle_vblank(rdev->ddev, 0);
3127 - rdev->pm.vblank_sync = true;
3128 - wake_up(&rdev->irq.vblank_queue);
3129 - }
3130 - if (atomic_read(&rdev->irq.pflip[0]))
3131 - radeon_crtc_handle_vblank(rdev, 0);
3132 - rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3133 - DRM_DEBUG("IH: D1 vblank\n");
3134 + if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
3135 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3136 +
3137 + if (rdev->irq.crtc_vblank_int[0]) {
3138 + drm_handle_vblank(rdev->ddev, 0);
3139 + rdev->pm.vblank_sync = true;
3140 + wake_up(&rdev->irq.vblank_queue);
3141 }
3142 + if (atomic_read(&rdev->irq.pflip[0]))
3143 + radeon_crtc_handle_vblank(rdev, 0);
3144 + rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3145 + DRM_DEBUG("IH: D1 vblank\n");
3146 +
3147 break;
3148 case 1: /* D1 vline */
3149 - if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
3150 - rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3151 - DRM_DEBUG("IH: D1 vline\n");
3152 - }
3153 + if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
3154 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3155 +
3156 + rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3157 + DRM_DEBUG("IH: D1 vline\n");
3158 +
3159 break;
3160 default:
3161 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3162 @@ -7931,23 +7969,27 @@ restart_ih:
3163 case 2: /* D2 vblank/vline */
3164 switch (src_data) {
3165 case 0: /* D2 vblank */
3166 - if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
3167 - if (rdev->irq.crtc_vblank_int[1]) {
3168 - drm_handle_vblank(rdev->ddev, 1);
3169 - rdev->pm.vblank_sync = true;
3170 - wake_up(&rdev->irq.vblank_queue);
3171 - }
3172 - if (atomic_read(&rdev->irq.pflip[1]))
3173 - radeon_crtc_handle_vblank(rdev, 1);
3174 - rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3175 - DRM_DEBUG("IH: D2 vblank\n");
3176 + if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
3177 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3178 +
3179 + if (rdev->irq.crtc_vblank_int[1]) {
3180 + drm_handle_vblank(rdev->ddev, 1);
3181 + rdev->pm.vblank_sync = true;
3182 + wake_up(&rdev->irq.vblank_queue);
3183 }
3184 + if (atomic_read(&rdev->irq.pflip[1]))
3185 + radeon_crtc_handle_vblank(rdev, 1);
3186 + rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3187 + DRM_DEBUG("IH: D2 vblank\n");
3188 +
3189 break;
3190 case 1: /* D2 vline */
3191 - if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3192 - rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3193 - DRM_DEBUG("IH: D2 vline\n");
3194 - }
3195 + if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
3196 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3197 +
3198 + rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3199 + DRM_DEBUG("IH: D2 vline\n");
3200 +
3201 break;
3202 default:
3203 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3204 @@ -7957,23 +7999,27 @@ restart_ih:
3205 case 3: /* D3 vblank/vline */
3206 switch (src_data) {
3207 case 0: /* D3 vblank */
3208 - if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3209 - if (rdev->irq.crtc_vblank_int[2]) {
3210 - drm_handle_vblank(rdev->ddev, 2);
3211 - rdev->pm.vblank_sync = true;
3212 - wake_up(&rdev->irq.vblank_queue);
3213 - }
3214 - if (atomic_read(&rdev->irq.pflip[2]))
3215 - radeon_crtc_handle_vblank(rdev, 2);
3216 - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3217 - DRM_DEBUG("IH: D3 vblank\n");
3218 + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
3219 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3220 +
3221 + if (rdev->irq.crtc_vblank_int[2]) {
3222 + drm_handle_vblank(rdev->ddev, 2);
3223 + rdev->pm.vblank_sync = true;
3224 + wake_up(&rdev->irq.vblank_queue);
3225 }
3226 + if (atomic_read(&rdev->irq.pflip[2]))
3227 + radeon_crtc_handle_vblank(rdev, 2);
3228 + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3229 + DRM_DEBUG("IH: D3 vblank\n");
3230 +
3231 break;
3232 case 1: /* D3 vline */
3233 - if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3234 - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3235 - DRM_DEBUG("IH: D3 vline\n");
3236 - }
3237 + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
3238 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3239 +
3240 + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3241 + DRM_DEBUG("IH: D3 vline\n");
3242 +
3243 break;
3244 default:
3245 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3246 @@ -7983,23 +8029,27 @@ restart_ih:
3247 case 4: /* D4 vblank/vline */
3248 switch (src_data) {
3249 case 0: /* D4 vblank */
3250 - if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3251 - if (rdev->irq.crtc_vblank_int[3]) {
3252 - drm_handle_vblank(rdev->ddev, 3);
3253 - rdev->pm.vblank_sync = true;
3254 - wake_up(&rdev->irq.vblank_queue);
3255 - }
3256 - if (atomic_read(&rdev->irq.pflip[3]))
3257 - radeon_crtc_handle_vblank(rdev, 3);
3258 - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3259 - DRM_DEBUG("IH: D4 vblank\n");
3260 + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
3261 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3262 +
3263 + if (rdev->irq.crtc_vblank_int[3]) {
3264 + drm_handle_vblank(rdev->ddev, 3);
3265 + rdev->pm.vblank_sync = true;
3266 + wake_up(&rdev->irq.vblank_queue);
3267 }
3268 + if (atomic_read(&rdev->irq.pflip[3]))
3269 + radeon_crtc_handle_vblank(rdev, 3);
3270 + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3271 + DRM_DEBUG("IH: D4 vblank\n");
3272 +
3273 break;
3274 case 1: /* D4 vline */
3275 - if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3276 - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3277 - DRM_DEBUG("IH: D4 vline\n");
3278 - }
3279 + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
3280 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3281 +
3282 + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3283 + DRM_DEBUG("IH: D4 vline\n");
3284 +
3285 break;
3286 default:
3287 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3288 @@ -8009,23 +8059,27 @@ restart_ih:
3289 case 5: /* D5 vblank/vline */
3290 switch (src_data) {
3291 case 0: /* D5 vblank */
3292 - if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3293 - if (rdev->irq.crtc_vblank_int[4]) {
3294 - drm_handle_vblank(rdev->ddev, 4);
3295 - rdev->pm.vblank_sync = true;
3296 - wake_up(&rdev->irq.vblank_queue);
3297 - }
3298 - if (atomic_read(&rdev->irq.pflip[4]))
3299 - radeon_crtc_handle_vblank(rdev, 4);
3300 - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3301 - DRM_DEBUG("IH: D5 vblank\n");
3302 + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
3303 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3304 +
3305 + if (rdev->irq.crtc_vblank_int[4]) {
3306 + drm_handle_vblank(rdev->ddev, 4);
3307 + rdev->pm.vblank_sync = true;
3308 + wake_up(&rdev->irq.vblank_queue);
3309 }
3310 + if (atomic_read(&rdev->irq.pflip[4]))
3311 + radeon_crtc_handle_vblank(rdev, 4);
3312 + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3313 + DRM_DEBUG("IH: D5 vblank\n");
3314 +
3315 break;
3316 case 1: /* D5 vline */
3317 - if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3318 - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3319 - DRM_DEBUG("IH: D5 vline\n");
3320 - }
3321 + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
3322 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3323 +
3324 + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3325 + DRM_DEBUG("IH: D5 vline\n");
3326 +
3327 break;
3328 default:
3329 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3330 @@ -8035,23 +8089,27 @@ restart_ih:
3331 case 6: /* D6 vblank/vline */
3332 switch (src_data) {
3333 case 0: /* D6 vblank */
3334 - if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3335 - if (rdev->irq.crtc_vblank_int[5]) {
3336 - drm_handle_vblank(rdev->ddev, 5);
3337 - rdev->pm.vblank_sync = true;
3338 - wake_up(&rdev->irq.vblank_queue);
3339 - }
3340 - if (atomic_read(&rdev->irq.pflip[5]))
3341 - radeon_crtc_handle_vblank(rdev, 5);
3342 - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3343 - DRM_DEBUG("IH: D6 vblank\n");
3344 + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
3345 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3346 +
3347 + if (rdev->irq.crtc_vblank_int[5]) {
3348 + drm_handle_vblank(rdev->ddev, 5);
3349 + rdev->pm.vblank_sync = true;
3350 + wake_up(&rdev->irq.vblank_queue);
3351 }
3352 + if (atomic_read(&rdev->irq.pflip[5]))
3353 + radeon_crtc_handle_vblank(rdev, 5);
3354 + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3355 + DRM_DEBUG("IH: D6 vblank\n");
3356 +
3357 break;
3358 case 1: /* D6 vline */
3359 - if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3360 - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3361 - DRM_DEBUG("IH: D6 vline\n");
3362 - }
3363 + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
3364 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3365 +
3366 + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3367 + DRM_DEBUG("IH: D6 vline\n");
3368 +
3369 break;
3370 default:
3371 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3372 @@ -8071,88 +8129,112 @@ restart_ih:
3373 case 42: /* HPD hotplug */
3374 switch (src_data) {
3375 case 0:
3376 - if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
3377 - rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
3378 - queue_hotplug = true;
3379 - DRM_DEBUG("IH: HPD1\n");
3380 - }
3381 + if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
3382 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3383 +
3384 + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
3385 + queue_hotplug = true;
3386 + DRM_DEBUG("IH: HPD1\n");
3387 +
3388 break;
3389 case 1:
3390 - if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
3391 - rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3392 - queue_hotplug = true;
3393 - DRM_DEBUG("IH: HPD2\n");
3394 - }
3395 + if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
3396 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3397 +
3398 + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3399 + queue_hotplug = true;
3400 + DRM_DEBUG("IH: HPD2\n");
3401 +
3402 break;
3403 case 2:
3404 - if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3405 - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3406 - queue_hotplug = true;
3407 - DRM_DEBUG("IH: HPD3\n");
3408 - }
3409 + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
3410 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3411 +
3412 + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3413 + queue_hotplug = true;
3414 + DRM_DEBUG("IH: HPD3\n");
3415 +
3416 break;
3417 case 3:
3418 - if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3419 - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3420 - queue_hotplug = true;
3421 - DRM_DEBUG("IH: HPD4\n");
3422 - }
3423 + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
3424 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3425 +
3426 + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3427 + queue_hotplug = true;
3428 + DRM_DEBUG("IH: HPD4\n");
3429 +
3430 break;
3431 case 4:
3432 - if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3433 - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3434 - queue_hotplug = true;
3435 - DRM_DEBUG("IH: HPD5\n");
3436 - }
3437 + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
3438 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3439 +
3440 + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3441 + queue_hotplug = true;
3442 + DRM_DEBUG("IH: HPD5\n");
3443 +
3444 break;
3445 case 5:
3446 - if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3447 - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3448 - queue_hotplug = true;
3449 - DRM_DEBUG("IH: HPD6\n");
3450 - }
3451 + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
3452 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3453 +
3454 + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3455 + queue_hotplug = true;
3456 + DRM_DEBUG("IH: HPD6\n");
3457 +
3458 break;
3459 case 6:
3460 - if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
3461 - rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
3462 - queue_dp = true;
3463 - DRM_DEBUG("IH: HPD_RX 1\n");
3464 - }
3465 + if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
3466 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3467 +
3468 + rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
3469 + queue_dp = true;
3470 + DRM_DEBUG("IH: HPD_RX 1\n");
3471 +
3472 break;
3473 case 7:
3474 - if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
3475 - rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
3476 - queue_dp = true;
3477 - DRM_DEBUG("IH: HPD_RX 2\n");
3478 - }
3479 + if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
3480 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3481 +
3482 + rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
3483 + queue_dp = true;
3484 + DRM_DEBUG("IH: HPD_RX 2\n");
3485 +
3486 break;
3487 case 8:
3488 - if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
3489 - rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
3490 - queue_dp = true;
3491 - DRM_DEBUG("IH: HPD_RX 3\n");
3492 - }
3493 + if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
3494 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3495 +
3496 + rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
3497 + queue_dp = true;
3498 + DRM_DEBUG("IH: HPD_RX 3\n");
3499 +
3500 break;
3501 case 9:
3502 - if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
3503 - rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
3504 - queue_dp = true;
3505 - DRM_DEBUG("IH: HPD_RX 4\n");
3506 - }
3507 + if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
3508 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3509 +
3510 + rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
3511 + queue_dp = true;
3512 + DRM_DEBUG("IH: HPD_RX 4\n");
3513 +
3514 break;
3515 case 10:
3516 - if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
3517 - rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
3518 - queue_dp = true;
3519 - DRM_DEBUG("IH: HPD_RX 5\n");
3520 - }
3521 + if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
3522 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3523 +
3524 + rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
3525 + queue_dp = true;
3526 + DRM_DEBUG("IH: HPD_RX 5\n");
3527 +
3528 break;
3529 case 11:
3530 - if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
3531 - rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
3532 - queue_dp = true;
3533 - DRM_DEBUG("IH: HPD_RX 6\n");
3534 - }
3535 + if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
3536 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3537 +
3538 + rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
3539 + queue_dp = true;
3540 + DRM_DEBUG("IH: HPD_RX 6\n");
3541 +
3542 break;
3543 default:
3544 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3545 diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
3546 index f86eb54e7763..d16f2eebd95e 100644
3547 --- a/drivers/gpu/drm/radeon/cik_sdma.c
3548 +++ b/drivers/gpu/drm/radeon/cik_sdma.c
3549 @@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
3550 }
3551 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
3552 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
3553 +
3554 + /* FIXME use something else than big hammer but after few days can not
3555 + * seem to find good combination so reset SDMA blocks as it seems we
3556 + * do not shut them down properly. This fix hibernation and does not
3557 + * affect suspend to ram.
3558 + */
3559 + WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
3560 + (void)RREG32(SRBM_SOFT_RESET);
3561 + udelay(50);
3562 + WREG32(SRBM_SOFT_RESET, 0);
3563 + (void)RREG32(SRBM_SOFT_RESET);
3564 }
3565
3566 /**
3567 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
3568 index f848acfd3fc8..feef136cdb55 100644
3569 --- a/drivers/gpu/drm/radeon/evergreen.c
3570 +++ b/drivers/gpu/drm/radeon/evergreen.c
3571 @@ -4855,7 +4855,7 @@ restart_ih:
3572 return IRQ_NONE;
3573
3574 rptr = rdev->ih.rptr;
3575 - DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3576 + DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3577
3578 /* Order reading of wptr vs. reading of IH ring data */
3579 rmb();
3580 @@ -4873,23 +4873,27 @@ restart_ih:
3581 case 1: /* D1 vblank/vline */
3582 switch (src_data) {
3583 case 0: /* D1 vblank */
3584 - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
3585 - if (rdev->irq.crtc_vblank_int[0]) {
3586 - drm_handle_vblank(rdev->ddev, 0);
3587 - rdev->pm.vblank_sync = true;
3588 - wake_up(&rdev->irq.vblank_queue);
3589 - }
3590 - if (atomic_read(&rdev->irq.pflip[0]))
3591 - radeon_crtc_handle_vblank(rdev, 0);
3592 - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3593 - DRM_DEBUG("IH: D1 vblank\n");
3594 + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
3595 + DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
3596 +
3597 + if (rdev->irq.crtc_vblank_int[0]) {
3598 + drm_handle_vblank(rdev->ddev, 0);
3599 + rdev->pm.vblank_sync = true;
3600 + wake_up(&rdev->irq.vblank_queue);
3601 }
3602 + if (atomic_read(&rdev->irq.pflip[0]))
3603 + radeon_crtc_handle_vblank(rdev, 0);
3604 + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3605 + DRM_DEBUG("IH: D1 vblank\n");
3606 +
3607 break;
3608 case 1: /* D1 vline */
3609 - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
3610 - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3611 - DRM_DEBUG("IH: D1 vline\n");
3612 - }
3613 + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
3614 + DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
3615 +
3616 + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3617 + DRM_DEBUG("IH: D1 vline\n");
3618 +
3619 break;
3620 default:
3621 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3622 @@ -4899,23 +4903,27 @@ restart_ih:
3623 case 2: /* D2 vblank/vline */
3624 switch (src_data) {
3625 case 0: /* D2 vblank */
3626 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
3627 - if (rdev->irq.crtc_vblank_int[1]) {
3628 - drm_handle_vblank(rdev->ddev, 1);
3629 - rdev->pm.vblank_sync = true;
3630 - wake_up(&rdev->irq.vblank_queue);
3631 - }
3632 - if (atomic_read(&rdev->irq.pflip[1]))
3633 - radeon_crtc_handle_vblank(rdev, 1);
3634 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3635 - DRM_DEBUG("IH: D2 vblank\n");
3636 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
3637 + DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
3638 +
3639 + if (rdev->irq.crtc_vblank_int[1]) {
3640 + drm_handle_vblank(rdev->ddev, 1);
3641 + rdev->pm.vblank_sync = true;
3642 + wake_up(&rdev->irq.vblank_queue);
3643 }
3644 + if (atomic_read(&rdev->irq.pflip[1]))
3645 + radeon_crtc_handle_vblank(rdev, 1);
3646 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
3647 + DRM_DEBUG("IH: D2 vblank\n");
3648 +
3649 break;
3650 case 1: /* D2 vline */
3651 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
3652 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3653 - DRM_DEBUG("IH: D2 vline\n");
3654 - }
3655 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
3656 + DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
3657 +
3658 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
3659 + DRM_DEBUG("IH: D2 vline\n");
3660 +
3661 break;
3662 default:
3663 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3664 @@ -4925,23 +4933,27 @@ restart_ih:
3665 case 3: /* D3 vblank/vline */
3666 switch (src_data) {
3667 case 0: /* D3 vblank */
3668 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
3669 - if (rdev->irq.crtc_vblank_int[2]) {
3670 - drm_handle_vblank(rdev->ddev, 2);
3671 - rdev->pm.vblank_sync = true;
3672 - wake_up(&rdev->irq.vblank_queue);
3673 - }
3674 - if (atomic_read(&rdev->irq.pflip[2]))
3675 - radeon_crtc_handle_vblank(rdev, 2);
3676 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3677 - DRM_DEBUG("IH: D3 vblank\n");
3678 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
3679 + DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
3680 +
3681 + if (rdev->irq.crtc_vblank_int[2]) {
3682 + drm_handle_vblank(rdev->ddev, 2);
3683 + rdev->pm.vblank_sync = true;
3684 + wake_up(&rdev->irq.vblank_queue);
3685 }
3686 + if (atomic_read(&rdev->irq.pflip[2]))
3687 + radeon_crtc_handle_vblank(rdev, 2);
3688 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
3689 + DRM_DEBUG("IH: D3 vblank\n");
3690 +
3691 break;
3692 case 1: /* D3 vline */
3693 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
3694 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3695 - DRM_DEBUG("IH: D3 vline\n");
3696 - }
3697 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
3698 + DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
3699 +
3700 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
3701 + DRM_DEBUG("IH: D3 vline\n");
3702 +
3703 break;
3704 default:
3705 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3706 @@ -4951,23 +4963,27 @@ restart_ih:
3707 case 4: /* D4 vblank/vline */
3708 switch (src_data) {
3709 case 0: /* D4 vblank */
3710 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
3711 - if (rdev->irq.crtc_vblank_int[3]) {
3712 - drm_handle_vblank(rdev->ddev, 3);
3713 - rdev->pm.vblank_sync = true;
3714 - wake_up(&rdev->irq.vblank_queue);
3715 - }
3716 - if (atomic_read(&rdev->irq.pflip[3]))
3717 - radeon_crtc_handle_vblank(rdev, 3);
3718 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3719 - DRM_DEBUG("IH: D4 vblank\n");
3720 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
3721 + DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
3722 +
3723 + if (rdev->irq.crtc_vblank_int[3]) {
3724 + drm_handle_vblank(rdev->ddev, 3);
3725 + rdev->pm.vblank_sync = true;
3726 + wake_up(&rdev->irq.vblank_queue);
3727 }
3728 + if (atomic_read(&rdev->irq.pflip[3]))
3729 + radeon_crtc_handle_vblank(rdev, 3);
3730 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
3731 + DRM_DEBUG("IH: D4 vblank\n");
3732 +
3733 break;
3734 case 1: /* D4 vline */
3735 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
3736 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3737 - DRM_DEBUG("IH: D4 vline\n");
3738 - }
3739 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
3740 + DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
3741 +
3742 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
3743 + DRM_DEBUG("IH: D4 vline\n");
3744 +
3745 break;
3746 default:
3747 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3748 @@ -4977,23 +4993,27 @@ restart_ih:
3749 case 5: /* D5 vblank/vline */
3750 switch (src_data) {
3751 case 0: /* D5 vblank */
3752 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
3753 - if (rdev->irq.crtc_vblank_int[4]) {
3754 - drm_handle_vblank(rdev->ddev, 4);
3755 - rdev->pm.vblank_sync = true;
3756 - wake_up(&rdev->irq.vblank_queue);
3757 - }
3758 - if (atomic_read(&rdev->irq.pflip[4]))
3759 - radeon_crtc_handle_vblank(rdev, 4);
3760 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3761 - DRM_DEBUG("IH: D5 vblank\n");
3762 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
3763 + DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
3764 +
3765 + if (rdev->irq.crtc_vblank_int[4]) {
3766 + drm_handle_vblank(rdev->ddev, 4);
3767 + rdev->pm.vblank_sync = true;
3768 + wake_up(&rdev->irq.vblank_queue);
3769 }
3770 + if (atomic_read(&rdev->irq.pflip[4]))
3771 + radeon_crtc_handle_vblank(rdev, 4);
3772 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
3773 + DRM_DEBUG("IH: D5 vblank\n");
3774 +
3775 break;
3776 case 1: /* D5 vline */
3777 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
3778 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3779 - DRM_DEBUG("IH: D5 vline\n");
3780 - }
3781 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
3782 + DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
3783 +
3784 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
3785 + DRM_DEBUG("IH: D5 vline\n");
3786 +
3787 break;
3788 default:
3789 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3790 @@ -5003,23 +5023,27 @@ restart_ih:
3791 case 6: /* D6 vblank/vline */
3792 switch (src_data) {
3793 case 0: /* D6 vblank */
3794 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
3795 - if (rdev->irq.crtc_vblank_int[5]) {
3796 - drm_handle_vblank(rdev->ddev, 5);
3797 - rdev->pm.vblank_sync = true;
3798 - wake_up(&rdev->irq.vblank_queue);
3799 - }
3800 - if (atomic_read(&rdev->irq.pflip[5]))
3801 - radeon_crtc_handle_vblank(rdev, 5);
3802 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3803 - DRM_DEBUG("IH: D6 vblank\n");
3804 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
3805 + DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
3806 +
3807 + if (rdev->irq.crtc_vblank_int[5]) {
3808 + drm_handle_vblank(rdev->ddev, 5);
3809 + rdev->pm.vblank_sync = true;
3810 + wake_up(&rdev->irq.vblank_queue);
3811 }
3812 + if (atomic_read(&rdev->irq.pflip[5]))
3813 + radeon_crtc_handle_vblank(rdev, 5);
3814 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
3815 + DRM_DEBUG("IH: D6 vblank\n");
3816 +
3817 break;
3818 case 1: /* D6 vline */
3819 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
3820 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3821 - DRM_DEBUG("IH: D6 vline\n");
3822 - }
3823 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
3824 + DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
3825 +
3826 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
3827 + DRM_DEBUG("IH: D6 vline\n");
3828 +
3829 break;
3830 default:
3831 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3832 @@ -5039,88 +5063,100 @@ restart_ih:
3833 case 42: /* HPD hotplug */
3834 switch (src_data) {
3835 case 0:
3836 - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
3837 - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
3838 - queue_hotplug = true;
3839 - DRM_DEBUG("IH: HPD1\n");
3840 - }
3841 + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
3842 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3843 +
3844 + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
3845 + queue_hotplug = true;
3846 + DRM_DEBUG("IH: HPD1\n");
3847 break;
3848 case 1:
3849 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
3850 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3851 - queue_hotplug = true;
3852 - DRM_DEBUG("IH: HPD2\n");
3853 - }
3854 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
3855 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3856 +
3857 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
3858 + queue_hotplug = true;
3859 + DRM_DEBUG("IH: HPD2\n");
3860 break;
3861 case 2:
3862 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
3863 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3864 - queue_hotplug = true;
3865 - DRM_DEBUG("IH: HPD3\n");
3866 - }
3867 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
3868 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3869 +
3870 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
3871 + queue_hotplug = true;
3872 + DRM_DEBUG("IH: HPD3\n");
3873 break;
3874 case 3:
3875 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
3876 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3877 - queue_hotplug = true;
3878 - DRM_DEBUG("IH: HPD4\n");
3879 - }
3880 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
3881 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3882 +
3883 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
3884 + queue_hotplug = true;
3885 + DRM_DEBUG("IH: HPD4\n");
3886 break;
3887 case 4:
3888 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
3889 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3890 - queue_hotplug = true;
3891 - DRM_DEBUG("IH: HPD5\n");
3892 - }
3893 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
3894 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3895 +
3896 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
3897 + queue_hotplug = true;
3898 + DRM_DEBUG("IH: HPD5\n");
3899 break;
3900 case 5:
3901 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
3902 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3903 - queue_hotplug = true;
3904 - DRM_DEBUG("IH: HPD6\n");
3905 - }
3906 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
3907 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3908 +
3909 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3910 + queue_hotplug = true;
3911 + DRM_DEBUG("IH: HPD6\n");
3912 break;
3913 case 6:
3914 - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
3915 - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
3916 - queue_dp = true;
3917 - DRM_DEBUG("IH: HPD_RX 1\n");
3918 - }
3919 + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
3920 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3921 +
3922 + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
3923 + queue_dp = true;
3924 + DRM_DEBUG("IH: HPD_RX 1\n");
3925 break;
3926 case 7:
3927 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
3928 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
3929 - queue_dp = true;
3930 - DRM_DEBUG("IH: HPD_RX 2\n");
3931 - }
3932 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
3933 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3934 +
3935 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
3936 + queue_dp = true;
3937 + DRM_DEBUG("IH: HPD_RX 2\n");
3938 break;
3939 case 8:
3940 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
3941 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
3942 - queue_dp = true;
3943 - DRM_DEBUG("IH: HPD_RX 3\n");
3944 - }
3945 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
3946 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3947 +
3948 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
3949 + queue_dp = true;
3950 + DRM_DEBUG("IH: HPD_RX 3\n");
3951 break;
3952 case 9:
3953 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
3954 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
3955 - queue_dp = true;
3956 - DRM_DEBUG("IH: HPD_RX 4\n");
3957 - }
3958 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
3959 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3960 +
3961 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
3962 + queue_dp = true;
3963 + DRM_DEBUG("IH: HPD_RX 4\n");
3964 break;
3965 case 10:
3966 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
3967 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
3968 - queue_dp = true;
3969 - DRM_DEBUG("IH: HPD_RX 5\n");
3970 - }
3971 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
3972 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3973 +
3974 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
3975 + queue_dp = true;
3976 + DRM_DEBUG("IH: HPD_RX 5\n");
3977 break;
3978 case 11:
3979 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
3980 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
3981 - queue_dp = true;
3982 - DRM_DEBUG("IH: HPD_RX 6\n");
3983 - }
3984 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
3985 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3986 +
3987 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
3988 + queue_dp = true;
3989 + DRM_DEBUG("IH: HPD_RX 6\n");
3990 break;
3991 default:
3992 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3993 @@ -5130,46 +5166,52 @@ restart_ih:
3994 case 44: /* hdmi */
3995 switch (src_data) {
3996 case 0:
3997 - if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
3998 - rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
3999 - queue_hdmi = true;
4000 - DRM_DEBUG("IH: HDMI0\n");
4001 - }
4002 + if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
4003 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4004 +
4005 + rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
4006 + queue_hdmi = true;
4007 + DRM_DEBUG("IH: HDMI0\n");
4008 break;
4009 case 1:
4010 - if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
4011 - rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
4012 - queue_hdmi = true;
4013 - DRM_DEBUG("IH: HDMI1\n");
4014 - }
4015 + if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
4016 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4017 +
4018 + rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
4019 + queue_hdmi = true;
4020 + DRM_DEBUG("IH: HDMI1\n");
4021 break;
4022 case 2:
4023 - if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
4024 - rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
4025 - queue_hdmi = true;
4026 - DRM_DEBUG("IH: HDMI2\n");
4027 - }
4028 + if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
4029 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4030 +
4031 + rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
4032 + queue_hdmi = true;
4033 + DRM_DEBUG("IH: HDMI2\n");
4034 break;
4035 case 3:
4036 - if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
4037 - rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
4038 - queue_hdmi = true;
4039 - DRM_DEBUG("IH: HDMI3\n");
4040 - }
4041 + if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
4042 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4043 +
4044 + rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
4045 + queue_hdmi = true;
4046 + DRM_DEBUG("IH: HDMI3\n");
4047 break;
4048 case 4:
4049 - if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
4050 - rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
4051 - queue_hdmi = true;
4052 - DRM_DEBUG("IH: HDMI4\n");
4053 - }
4054 + if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
4055 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4056 +
4057 + rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
4058 + queue_hdmi = true;
4059 + DRM_DEBUG("IH: HDMI4\n");
4060 break;
4061 case 5:
4062 - if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
4063 - rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
4064 - queue_hdmi = true;
4065 - DRM_DEBUG("IH: HDMI5\n");
4066 - }
4067 + if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
4068 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4069 +
4070 + rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
4071 + queue_hdmi = true;
4072 + DRM_DEBUG("IH: HDMI5\n");
4073 break;
4074 default:
4075 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4076 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
4077 index 8f6d862a1882..21e479fefcab 100644
4078 --- a/drivers/gpu/drm/radeon/r600.c
4079 +++ b/drivers/gpu/drm/radeon/r600.c
4080 @@ -4039,23 +4039,27 @@ restart_ih:
4081 case 1: /* D1 vblank/vline */
4082 switch (src_data) {
4083 case 0: /* D1 vblank */
4084 - if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
4085 - if (rdev->irq.crtc_vblank_int[0]) {
4086 - drm_handle_vblank(rdev->ddev, 0);
4087 - rdev->pm.vblank_sync = true;
4088 - wake_up(&rdev->irq.vblank_queue);
4089 - }
4090 - if (atomic_read(&rdev->irq.pflip[0]))
4091 - radeon_crtc_handle_vblank(rdev, 0);
4092 - rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4093 - DRM_DEBUG("IH: D1 vblank\n");
4094 + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4095 + DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4096 +
4097 + if (rdev->irq.crtc_vblank_int[0]) {
4098 + drm_handle_vblank(rdev->ddev, 0);
4099 + rdev->pm.vblank_sync = true;
4100 + wake_up(&rdev->irq.vblank_queue);
4101 }
4102 + if (atomic_read(&rdev->irq.pflip[0]))
4103 + radeon_crtc_handle_vblank(rdev, 0);
4104 + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4105 + DRM_DEBUG("IH: D1 vblank\n");
4106 +
4107 break;
4108 case 1: /* D1 vline */
4109 - if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
4110 - rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4111 - DRM_DEBUG("IH: D1 vline\n");
4112 - }
4113 + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4114 + DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4115 +
4116 + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4117 + DRM_DEBUG("IH: D1 vline\n");
4118 +
4119 break;
4120 default:
4121 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4122 @@ -4065,23 +4069,27 @@ restart_ih:
4123 case 5: /* D2 vblank/vline */
4124 switch (src_data) {
4125 case 0: /* D2 vblank */
4126 - if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
4127 - if (rdev->irq.crtc_vblank_int[1]) {
4128 - drm_handle_vblank(rdev->ddev, 1);
4129 - rdev->pm.vblank_sync = true;
4130 - wake_up(&rdev->irq.vblank_queue);
4131 - }
4132 - if (atomic_read(&rdev->irq.pflip[1]))
4133 - radeon_crtc_handle_vblank(rdev, 1);
4134 - rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4135 - DRM_DEBUG("IH: D2 vblank\n");
4136 + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4137 + DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4138 +
4139 + if (rdev->irq.crtc_vblank_int[1]) {
4140 + drm_handle_vblank(rdev->ddev, 1);
4141 + rdev->pm.vblank_sync = true;
4142 + wake_up(&rdev->irq.vblank_queue);
4143 }
4144 + if (atomic_read(&rdev->irq.pflip[1]))
4145 + radeon_crtc_handle_vblank(rdev, 1);
4146 + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4147 + DRM_DEBUG("IH: D2 vblank\n");
4148 +
4149 break;
4150 case 1: /* D1 vline */
4151 - if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
4152 - rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4153 - DRM_DEBUG("IH: D2 vline\n");
4154 - }
4155 + if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4156 + DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4157 +
4158 + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4159 + DRM_DEBUG("IH: D2 vline\n");
4160 +
4161 break;
4162 default:
4163 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4164 @@ -4101,46 +4109,53 @@ restart_ih:
4165 case 19: /* HPD/DAC hotplug */
4166 switch (src_data) {
4167 case 0:
4168 - if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
4169 - rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4170 - queue_hotplug = true;
4171 - DRM_DEBUG("IH: HPD1\n");
4172 - }
4173 + if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4174 + DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4175 +
4176 + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4177 + queue_hotplug = true;
4178 + DRM_DEBUG("IH: HPD1\n");
4179 break;
4180 case 1:
4181 - if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
4182 - rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4183 - queue_hotplug = true;
4184 - DRM_DEBUG("IH: HPD2\n");
4185 - }
4186 + if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4187 + DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4188 +
4189 + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4190 + queue_hotplug = true;
4191 + DRM_DEBUG("IH: HPD2\n");
4192 break;
4193 case 4:
4194 - if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4195 - rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4196 - queue_hotplug = true;
4197 - DRM_DEBUG("IH: HPD3\n");
4198 - }
4199 + if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4200 + DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4201 +
4202 + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4203 + queue_hotplug = true;
4204 + DRM_DEBUG("IH: HPD3\n");
4205 break;
4206 case 5:
4207 - if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4208 - rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4209 - queue_hotplug = true;
4210 - DRM_DEBUG("IH: HPD4\n");
4211 - }
4212 + if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4213 + DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4214 +
4215 + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4216 + queue_hotplug = true;
4217 + DRM_DEBUG("IH: HPD4\n");
4218 break;
4219 case 10:
4220 - if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4221 - rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4222 - queue_hotplug = true;
4223 - DRM_DEBUG("IH: HPD5\n");
4224 - }
4225 + if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4226 + DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4227 +
4228 + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4229 + queue_hotplug = true;
4230 + DRM_DEBUG("IH: HPD5\n");
4231 break;
4232 case 12:
4233 - if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4234 - rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4235 - queue_hotplug = true;
4236 - DRM_DEBUG("IH: HPD6\n");
4237 - }
4238 + if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4239 + DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4240 +
4241 + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4242 + queue_hotplug = true;
4243 + DRM_DEBUG("IH: HPD6\n");
4244 +
4245 break;
4246 default:
4247 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4248 @@ -4150,18 +4165,22 @@ restart_ih:
4249 case 21: /* hdmi */
4250 switch (src_data) {
4251 case 4:
4252 - if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4253 - rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4254 - queue_hdmi = true;
4255 - DRM_DEBUG("IH: HDMI0\n");
4256 - }
4257 + if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4258 + DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4259 +
4260 + rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4261 + queue_hdmi = true;
4262 + DRM_DEBUG("IH: HDMI0\n");
4263 +
4264 break;
4265 case 5:
4266 - if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4267 - rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4268 - queue_hdmi = true;
4269 - DRM_DEBUG("IH: HDMI1\n");
4270 - }
4271 + if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4272 + DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4273 +
4274 + rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4275 + queue_hdmi = true;
4276 + DRM_DEBUG("IH: HDMI1\n");
4277 +
4278 break;
4279 default:
4280 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4281 diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
4282 index 25191f126f3b..fa719c53449b 100644
4283 --- a/drivers/gpu/drm/radeon/radeon_audio.c
4284 +++ b/drivers/gpu/drm/radeon/radeon_audio.c
4285 @@ -242,6 +242,13 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
4286 .dpms = evergreen_dp_enable,
4287 };
4288
4289 +static void radeon_audio_enable(struct radeon_device *rdev,
4290 + struct r600_audio_pin *pin, u8 enable_mask)
4291 +{
4292 + if (rdev->audio.funcs->enable)
4293 + rdev->audio.funcs->enable(rdev, pin, enable_mask);
4294 +}
4295 +
4296 static void radeon_audio_interface_init(struct radeon_device *rdev)
4297 {
4298 if (ASIC_IS_DCE6(rdev)) {
4299 @@ -307,7 +314,7 @@ int radeon_audio_init(struct radeon_device *rdev)
4300
4301 /* disable audio. it will be set up later */
4302 for (i = 0; i < rdev->audio.num_pins; i++)
4303 - radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
4304 + radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
4305
4306 return 0;
4307 }
4308 @@ -443,13 +450,6 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
4309 radeon_encoder->audio->select_pin(encoder);
4310 }
4311
4312 -void radeon_audio_enable(struct radeon_device *rdev,
4313 - struct r600_audio_pin *pin, u8 enable_mask)
4314 -{
4315 - if (rdev->audio.funcs->enable)
4316 - rdev->audio.funcs->enable(rdev, pin, enable_mask);
4317 -}
4318 -
4319 void radeon_audio_detect(struct drm_connector *connector,
4320 enum drm_connector_status status)
4321 {
4322 @@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector,
4323 dig = radeon_encoder->enc_priv;
4324
4325 if (status == connector_status_connected) {
4326 - struct radeon_connector *radeon_connector;
4327 - int sink_type;
4328 -
4329 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
4330 radeon_encoder->audio = NULL;
4331 return;
4332 }
4333
4334 - radeon_connector = to_radeon_connector(connector);
4335 - sink_type = radeon_dp_getsinktype(radeon_connector);
4336 + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
4337 + struct radeon_connector *radeon_connector = to_radeon_connector(connector);
4338
4339 - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
4340 - sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
4341 - radeon_encoder->audio = rdev->audio.dp_funcs;
4342 - else
4343 + if (radeon_dp_getsinktype(radeon_connector) ==
4344 + CONNECTOR_OBJECT_ID_DISPLAYPORT)
4345 + radeon_encoder->audio = rdev->audio.dp_funcs;
4346 + else
4347 + radeon_encoder->audio = rdev->audio.hdmi_funcs;
4348 + } else {
4349 radeon_encoder->audio = rdev->audio.hdmi_funcs;
4350 + }
4351
4352 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
4353 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
4354 @@ -502,7 +502,7 @@ void radeon_audio_fini(struct radeon_device *rdev)
4355 return;
4356
4357 for (i = 0; i < rdev->audio.num_pins; i++)
4358 - radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
4359 + radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
4360
4361 rdev->audio.enabled = false;
4362 }
4363 diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
4364 index c92d059ab204..8438304f7139 100644
4365 --- a/drivers/gpu/drm/radeon/radeon_audio.h
4366 +++ b/drivers/gpu/drm/radeon/radeon_audio.h
4367 @@ -74,8 +74,6 @@ u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
4368 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
4369 u32 offset, u32 reg, u32 v);
4370 struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
4371 -void radeon_audio_enable(struct radeon_device *rdev,
4372 - struct r600_audio_pin *pin, u8 enable_mask);
4373 void radeon_audio_fini(struct radeon_device *rdev);
4374 void radeon_audio_mode_set(struct drm_encoder *encoder,
4375 struct drm_display_mode *mode);
4376 diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
4377 index 45e54060ee97..fa661744a1f5 100644
4378 --- a/drivers/gpu/drm/radeon/radeon_cursor.c
4379 +++ b/drivers/gpu/drm/radeon/radeon_cursor.c
4380 @@ -205,8 +205,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
4381 | (x << 16)
4382 | y));
4383 /* offset is from DISP(2)_BASE_ADDRESS */
4384 - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
4385 - (yorigin * 256)));
4386 + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
4387 + radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
4388 + yorigin * 256);
4389 }
4390
4391 radeon_crtc->cursor_x = x;
4392 @@ -227,51 +228,32 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
4393 return ret;
4394 }
4395
4396 -static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
4397 +static void radeon_set_cursor(struct drm_crtc *crtc)
4398 {
4399 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
4400 struct radeon_device *rdev = crtc->dev->dev_private;
4401 - struct radeon_bo *robj = gem_to_radeon_bo(obj);
4402 - uint64_t gpu_addr;
4403 - int ret;
4404 -
4405 - ret = radeon_bo_reserve(robj, false);
4406 - if (unlikely(ret != 0))
4407 - goto fail;
4408 - /* Only 27 bit offset for legacy cursor */
4409 - ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
4410 - ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
4411 - &gpu_addr);
4412 - radeon_bo_unreserve(robj);
4413 - if (ret)
4414 - goto fail;
4415
4416 if (ASIC_IS_DCE4(rdev)) {
4417 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
4418 - upper_32_bits(gpu_addr));
4419 + upper_32_bits(radeon_crtc->cursor_addr));
4420 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
4421 - gpu_addr & 0xffffffff);
4422 + lower_32_bits(radeon_crtc->cursor_addr));
4423 } else if (ASIC_IS_AVIVO(rdev)) {
4424 if (rdev->family >= CHIP_RV770) {
4425 if (radeon_crtc->crtc_id)
4426 - WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
4427 + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
4428 + upper_32_bits(radeon_crtc->cursor_addr));
4429 else
4430 - WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
4431 + WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
4432 + upper_32_bits(radeon_crtc->cursor_addr));
4433 }
4434 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
4435 - gpu_addr & 0xffffffff);
4436 + lower_32_bits(radeon_crtc->cursor_addr));
4437 } else {
4438 - radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
4439 /* offset is from DISP(2)_BASE_ADDRESS */
4440 - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
4441 + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
4442 + radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
4443 }
4444 -
4445 - return 0;
4446 -
4447 -fail:
4448 - drm_gem_object_unreference_unlocked(obj);
4449 -
4450 - return ret;
4451 }
4452
4453 int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
4454 @@ -283,7 +265,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
4455 int32_t hot_y)
4456 {
4457 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
4458 + struct radeon_device *rdev = crtc->dev->dev_private;
4459 struct drm_gem_object *obj;
4460 + struct radeon_bo *robj;
4461 int ret;
4462
4463 if (!handle) {
4464 @@ -305,6 +289,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
4465 return -ENOENT;
4466 }
4467
4468 + robj = gem_to_radeon_bo(obj);
4469 + ret = radeon_bo_reserve(robj, false);
4470 + if (ret != 0) {
4471 + drm_gem_object_unreference_unlocked(obj);
4472 + return ret;
4473 + }
4474 + /* Only 27 bit offset for legacy cursor */
4475 + ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
4476 + ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
4477 + &radeon_crtc->cursor_addr);
4478 + radeon_bo_unreserve(robj);
4479 + if (ret) {
4480 + DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
4481 + drm_gem_object_unreference_unlocked(obj);
4482 + return ret;
4483 + }
4484 +
4485 radeon_crtc->cursor_width = width;
4486 radeon_crtc->cursor_height = height;
4487
4488 @@ -323,13 +324,8 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
4489 radeon_crtc->cursor_hot_y = hot_y;
4490 }
4491
4492 - ret = radeon_set_cursor(crtc, obj);
4493 -
4494 - if (ret)
4495 - DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
4496 - ret);
4497 - else
4498 - radeon_show_cursor(crtc);
4499 + radeon_set_cursor(crtc);
4500 + radeon_show_cursor(crtc);
4501
4502 radeon_lock_cursor(crtc, false);
4503
4504 @@ -341,8 +337,7 @@ unpin:
4505 radeon_bo_unpin(robj);
4506 radeon_bo_unreserve(robj);
4507 }
4508 - if (radeon_crtc->cursor_bo != obj)
4509 - drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
4510 + drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
4511 }
4512
4513 radeon_crtc->cursor_bo = obj;
4514 @@ -360,7 +355,6 @@ unpin:
4515 void radeon_cursor_reset(struct drm_crtc *crtc)
4516 {
4517 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
4518 - int ret;
4519
4520 if (radeon_crtc->cursor_bo) {
4521 radeon_lock_cursor(crtc, true);
4522 @@ -368,12 +362,8 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
4523 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
4524 radeon_crtc->cursor_y);
4525
4526 - ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
4527 - if (ret)
4528 - DRM_ERROR("radeon_set_cursor returned %d, not showing "
4529 - "cursor\n", ret);
4530 - else
4531 - radeon_show_cursor(crtc);
4532 + radeon_set_cursor(crtc);
4533 + radeon_show_cursor(crtc);
4534
4535 radeon_lock_cursor(crtc, false);
4536 }
4537 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
4538 index a7fdfa4f0857..604c44d88e7a 100644
4539 --- a/drivers/gpu/drm/radeon/radeon_device.c
4540 +++ b/drivers/gpu/drm/radeon/radeon_device.c
4541 @@ -1572,11 +1572,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
4542 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
4543 }
4544
4545 - /* unpin the front buffers */
4546 + /* unpin the front buffers and cursors */
4547 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4548 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
4549 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4550 struct radeon_bo *robj;
4551
4552 + if (radeon_crtc->cursor_bo) {
4553 + struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
4554 + r = radeon_bo_reserve(robj, false);
4555 + if (r == 0) {
4556 + radeon_bo_unpin(robj);
4557 + radeon_bo_unreserve(robj);
4558 + }
4559 + }
4560 +
4561 if (rfb == NULL || rfb->obj == NULL) {
4562 continue;
4563 }
4564 @@ -1639,6 +1649,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
4565 {
4566 struct drm_connector *connector;
4567 struct radeon_device *rdev = dev->dev_private;
4568 + struct drm_crtc *crtc;
4569 int r;
4570
4571 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4572 @@ -1678,6 +1689,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
4573
4574 radeon_restore_bios_scratch_regs(rdev);
4575
4576 + /* pin cursors */
4577 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4578 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
4579 +
4580 + if (radeon_crtc->cursor_bo) {
4581 + struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
4582 + r = radeon_bo_reserve(robj, false);
4583 + if (r == 0) {
4584 + /* Only 27 bit offset for legacy cursor */
4585 + r = radeon_bo_pin_restricted(robj,
4586 + RADEON_GEM_DOMAIN_VRAM,
4587 + ASIC_IS_AVIVO(rdev) ?
4588 + 0 : 1 << 27,
4589 + &radeon_crtc->cursor_addr);
4590 + if (r != 0)
4591 + DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
4592 + radeon_bo_unreserve(robj);
4593 + }
4594 + }
4595 + }
4596 +
4597 /* init dig PHYs, disp eng pll */
4598 if (rdev->is_atom_bios) {
4599 radeon_atom_encoder_init(rdev);
4600 diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
4601 index aeb676708e60..634793ea8418 100644
4602 --- a/drivers/gpu/drm/radeon/radeon_fb.c
4603 +++ b/drivers/gpu/drm/radeon/radeon_fb.c
4604 @@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
4605 }
4606
4607 info->par = rfbdev;
4608 - info->skip_vt_switch = true;
4609
4610 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
4611 if (ret) {
4612 diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
4613 index 5450fa95a47e..c4777c8d0312 100644
4614 --- a/drivers/gpu/drm/radeon/radeon_gart.c
4615 +++ b/drivers/gpu/drm/radeon/radeon_gart.c
4616 @@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
4617 }
4618 }
4619 }
4620 - mb();
4621 - radeon_gart_tlb_flush(rdev);
4622 + if (rdev->gart.ptr) {
4623 + mb();
4624 + radeon_gart_tlb_flush(rdev);
4625 + }
4626 }
4627
4628 /**
4629 @@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
4630 page_base += RADEON_GPU_PAGE_SIZE;
4631 }
4632 }
4633 - mb();
4634 - radeon_gart_tlb_flush(rdev);
4635 + if (rdev->gart.ptr) {
4636 + mb();
4637 + radeon_gart_tlb_flush(rdev);
4638 + }
4639 return 0;
4640 }
4641
4642 diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
4643 index ac3c1310b953..186d0b792a02 100644
4644 --- a/drivers/gpu/drm/radeon/radeon_gem.c
4645 +++ b/drivers/gpu/drm/radeon/radeon_gem.c
4646 @@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
4647 if (robj) {
4648 if (robj->gem_base.import_attach)
4649 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
4650 + radeon_mn_unregister(robj);
4651 radeon_bo_unref(&robj);
4652 }
4653 }
4654 @@ -471,6 +472,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
4655 r = ret;
4656
4657 /* Flush HDP cache via MMIO if necessary */
4658 + cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
4659 if (rdev->asic->mmio_hdp_flush &&
4660 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
4661 robj->rdev->asic->mmio_hdp_flush(rdev);
4662 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
4663 index 7162c935371c..f682e5351252 100644
4664 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
4665 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
4666 @@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
4667 struct drm_mode_config *mode_config = &dev->mode_config;
4668 struct drm_connector *connector;
4669
4670 + mutex_lock(&mode_config->mutex);
4671 if (mode_config->num_connector) {
4672 list_for_each_entry(connector, &mode_config->connector_list, head)
4673 radeon_connector_hotplug(connector);
4674 }
4675 + mutex_unlock(&mode_config->mutex);
4676 /* Just fire off a uevent and let userspace tell us what to do */
4677 drm_helper_hpd_irq_event(dev);
4678 }
4679 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
4680 index fa91a17b81b6..f01c797b78cf 100644
4681 --- a/drivers/gpu/drm/radeon/radeon_mode.h
4682 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
4683 @@ -343,7 +343,6 @@ struct radeon_crtc {
4684 int max_cursor_width;
4685 int max_cursor_height;
4686 uint32_t legacy_display_base_addr;
4687 - uint32_t legacy_cursor_offset;
4688 enum radeon_rmx_type rmx_type;
4689 u8 h_border;
4690 u8 v_border;
4691 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
4692 index 318165d4855c..676362769b8d 100644
4693 --- a/drivers/gpu/drm/radeon/radeon_object.c
4694 +++ b/drivers/gpu/drm/radeon/radeon_object.c
4695 @@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
4696 bo = container_of(tbo, struct radeon_bo, tbo);
4697
4698 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
4699 - radeon_mn_unregister(bo);
4700
4701 mutex_lock(&bo->rdev->gem.mutex);
4702 list_del_init(&bo->list);
4703 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
4704 index 4c679b802bc8..e15185b16504 100644
4705 --- a/drivers/gpu/drm/radeon/si.c
4706 +++ b/drivers/gpu/drm/radeon/si.c
4707 @@ -6466,23 +6466,27 @@ restart_ih:
4708 case 1: /* D1 vblank/vline */
4709 switch (src_data) {
4710 case 0: /* D1 vblank */
4711 - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
4712 - if (rdev->irq.crtc_vblank_int[0]) {
4713 - drm_handle_vblank(rdev->ddev, 0);
4714 - rdev->pm.vblank_sync = true;
4715 - wake_up(&rdev->irq.vblank_queue);
4716 - }
4717 - if (atomic_read(&rdev->irq.pflip[0]))
4718 - radeon_crtc_handle_vblank(rdev, 0);
4719 - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4720 - DRM_DEBUG("IH: D1 vblank\n");
4721 + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
4722 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4723 +
4724 + if (rdev->irq.crtc_vblank_int[0]) {
4725 + drm_handle_vblank(rdev->ddev, 0);
4726 + rdev->pm.vblank_sync = true;
4727 + wake_up(&rdev->irq.vblank_queue);
4728 }
4729 + if (atomic_read(&rdev->irq.pflip[0]))
4730 + radeon_crtc_handle_vblank(rdev, 0);
4731 + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4732 + DRM_DEBUG("IH: D1 vblank\n");
4733 +
4734 break;
4735 case 1: /* D1 vline */
4736 - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
4737 - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4738 - DRM_DEBUG("IH: D1 vline\n");
4739 - }
4740 + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
4741 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4742 +
4743 + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4744 + DRM_DEBUG("IH: D1 vline\n");
4745 +
4746 break;
4747 default:
4748 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4749 @@ -6492,23 +6496,27 @@ restart_ih:
4750 case 2: /* D2 vblank/vline */
4751 switch (src_data) {
4752 case 0: /* D2 vblank */
4753 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
4754 - if (rdev->irq.crtc_vblank_int[1]) {
4755 - drm_handle_vblank(rdev->ddev, 1);
4756 - rdev->pm.vblank_sync = true;
4757 - wake_up(&rdev->irq.vblank_queue);
4758 - }
4759 - if (atomic_read(&rdev->irq.pflip[1]))
4760 - radeon_crtc_handle_vblank(rdev, 1);
4761 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4762 - DRM_DEBUG("IH: D2 vblank\n");
4763 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
4764 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4765 +
4766 + if (rdev->irq.crtc_vblank_int[1]) {
4767 + drm_handle_vblank(rdev->ddev, 1);
4768 + rdev->pm.vblank_sync = true;
4769 + wake_up(&rdev->irq.vblank_queue);
4770 }
4771 + if (atomic_read(&rdev->irq.pflip[1]))
4772 + radeon_crtc_handle_vblank(rdev, 1);
4773 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4774 + DRM_DEBUG("IH: D2 vblank\n");
4775 +
4776 break;
4777 case 1: /* D2 vline */
4778 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
4779 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4780 - DRM_DEBUG("IH: D2 vline\n");
4781 - }
4782 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
4783 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4784 +
4785 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4786 + DRM_DEBUG("IH: D2 vline\n");
4787 +
4788 break;
4789 default:
4790 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4791 @@ -6518,23 +6526,27 @@ restart_ih:
4792 case 3: /* D3 vblank/vline */
4793 switch (src_data) {
4794 case 0: /* D3 vblank */
4795 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
4796 - if (rdev->irq.crtc_vblank_int[2]) {
4797 - drm_handle_vblank(rdev->ddev, 2);
4798 - rdev->pm.vblank_sync = true;
4799 - wake_up(&rdev->irq.vblank_queue);
4800 - }
4801 - if (atomic_read(&rdev->irq.pflip[2]))
4802 - radeon_crtc_handle_vblank(rdev, 2);
4803 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
4804 - DRM_DEBUG("IH: D3 vblank\n");
4805 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
4806 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4807 +
4808 + if (rdev->irq.crtc_vblank_int[2]) {
4809 + drm_handle_vblank(rdev->ddev, 2);
4810 + rdev->pm.vblank_sync = true;
4811 + wake_up(&rdev->irq.vblank_queue);
4812 }
4813 + if (atomic_read(&rdev->irq.pflip[2]))
4814 + radeon_crtc_handle_vblank(rdev, 2);
4815 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
4816 + DRM_DEBUG("IH: D3 vblank\n");
4817 +
4818 break;
4819 case 1: /* D3 vline */
4820 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
4821 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
4822 - DRM_DEBUG("IH: D3 vline\n");
4823 - }
4824 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
4825 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4826 +
4827 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
4828 + DRM_DEBUG("IH: D3 vline\n");
4829 +
4830 break;
4831 default:
4832 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4833 @@ -6544,23 +6556,27 @@ restart_ih:
4834 case 4: /* D4 vblank/vline */
4835 switch (src_data) {
4836 case 0: /* D4 vblank */
4837 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
4838 - if (rdev->irq.crtc_vblank_int[3]) {
4839 - drm_handle_vblank(rdev->ddev, 3);
4840 - rdev->pm.vblank_sync = true;
4841 - wake_up(&rdev->irq.vblank_queue);
4842 - }
4843 - if (atomic_read(&rdev->irq.pflip[3]))
4844 - radeon_crtc_handle_vblank(rdev, 3);
4845 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
4846 - DRM_DEBUG("IH: D4 vblank\n");
4847 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
4848 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4849 +
4850 + if (rdev->irq.crtc_vblank_int[3]) {
4851 + drm_handle_vblank(rdev->ddev, 3);
4852 + rdev->pm.vblank_sync = true;
4853 + wake_up(&rdev->irq.vblank_queue);
4854 }
4855 + if (atomic_read(&rdev->irq.pflip[3]))
4856 + radeon_crtc_handle_vblank(rdev, 3);
4857 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
4858 + DRM_DEBUG("IH: D4 vblank\n");
4859 +
4860 break;
4861 case 1: /* D4 vline */
4862 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
4863 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
4864 - DRM_DEBUG("IH: D4 vline\n");
4865 - }
4866 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
4867 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4868 +
4869 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
4870 + DRM_DEBUG("IH: D4 vline\n");
4871 +
4872 break;
4873 default:
4874 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4875 @@ -6570,23 +6586,27 @@ restart_ih:
4876 case 5: /* D5 vblank/vline */
4877 switch (src_data) {
4878 case 0: /* D5 vblank */
4879 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
4880 - if (rdev->irq.crtc_vblank_int[4]) {
4881 - drm_handle_vblank(rdev->ddev, 4);
4882 - rdev->pm.vblank_sync = true;
4883 - wake_up(&rdev->irq.vblank_queue);
4884 - }
4885 - if (atomic_read(&rdev->irq.pflip[4]))
4886 - radeon_crtc_handle_vblank(rdev, 4);
4887 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
4888 - DRM_DEBUG("IH: D5 vblank\n");
4889 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
4890 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4891 +
4892 + if (rdev->irq.crtc_vblank_int[4]) {
4893 + drm_handle_vblank(rdev->ddev, 4);
4894 + rdev->pm.vblank_sync = true;
4895 + wake_up(&rdev->irq.vblank_queue);
4896 }
4897 + if (atomic_read(&rdev->irq.pflip[4]))
4898 + radeon_crtc_handle_vblank(rdev, 4);
4899 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
4900 + DRM_DEBUG("IH: D5 vblank\n");
4901 +
4902 break;
4903 case 1: /* D5 vline */
4904 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
4905 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
4906 - DRM_DEBUG("IH: D5 vline\n");
4907 - }
4908 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
4909 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4910 +
4911 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
4912 + DRM_DEBUG("IH: D5 vline\n");
4913 +
4914 break;
4915 default:
4916 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4917 @@ -6596,23 +6616,27 @@ restart_ih:
4918 case 6: /* D6 vblank/vline */
4919 switch (src_data) {
4920 case 0: /* D6 vblank */
4921 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
4922 - if (rdev->irq.crtc_vblank_int[5]) {
4923 - drm_handle_vblank(rdev->ddev, 5);
4924 - rdev->pm.vblank_sync = true;
4925 - wake_up(&rdev->irq.vblank_queue);
4926 - }
4927 - if (atomic_read(&rdev->irq.pflip[5]))
4928 - radeon_crtc_handle_vblank(rdev, 5);
4929 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
4930 - DRM_DEBUG("IH: D6 vblank\n");
4931 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
4932 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4933 +
4934 + if (rdev->irq.crtc_vblank_int[5]) {
4935 + drm_handle_vblank(rdev->ddev, 5);
4936 + rdev->pm.vblank_sync = true;
4937 + wake_up(&rdev->irq.vblank_queue);
4938 }
4939 + if (atomic_read(&rdev->irq.pflip[5]))
4940 + radeon_crtc_handle_vblank(rdev, 5);
4941 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
4942 + DRM_DEBUG("IH: D6 vblank\n");
4943 +
4944 break;
4945 case 1: /* D6 vline */
4946 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
4947 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
4948 - DRM_DEBUG("IH: D6 vline\n");
4949 - }
4950 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
4951 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4952 +
4953 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
4954 + DRM_DEBUG("IH: D6 vline\n");
4955 +
4956 break;
4957 default:
4958 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4959 @@ -6632,88 +6656,112 @@ restart_ih:
4960 case 42: /* HPD hotplug */
4961 switch (src_data) {
4962 case 0:
4963 - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
4964 - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
4965 - queue_hotplug = true;
4966 - DRM_DEBUG("IH: HPD1\n");
4967 - }
4968 + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
4969 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4970 +
4971 + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
4972 + queue_hotplug = true;
4973 + DRM_DEBUG("IH: HPD1\n");
4974 +
4975 break;
4976 case 1:
4977 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
4978 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
4979 - queue_hotplug = true;
4980 - DRM_DEBUG("IH: HPD2\n");
4981 - }
4982 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
4983 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4984 +
4985 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
4986 + queue_hotplug = true;
4987 + DRM_DEBUG("IH: HPD2\n");
4988 +
4989 break;
4990 case 2:
4991 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
4992 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
4993 - queue_hotplug = true;
4994 - DRM_DEBUG("IH: HPD3\n");
4995 - }
4996 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
4997 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
4998 +
4999 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5000 + queue_hotplug = true;
5001 + DRM_DEBUG("IH: HPD3\n");
5002 +
5003 break;
5004 case 3:
5005 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
5006 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5007 - queue_hotplug = true;
5008 - DRM_DEBUG("IH: HPD4\n");
5009 - }
5010 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
5011 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5012 +
5013 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5014 + queue_hotplug = true;
5015 + DRM_DEBUG("IH: HPD4\n");
5016 +
5017 break;
5018 case 4:
5019 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
5020 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5021 - queue_hotplug = true;
5022 - DRM_DEBUG("IH: HPD5\n");
5023 - }
5024 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
5025 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5026 +
5027 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5028 + queue_hotplug = true;
5029 + DRM_DEBUG("IH: HPD5\n");
5030 +
5031 break;
5032 case 5:
5033 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
5034 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5035 - queue_hotplug = true;
5036 - DRM_DEBUG("IH: HPD6\n");
5037 - }
5038 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
5039 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5040 +
5041 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5042 + queue_hotplug = true;
5043 + DRM_DEBUG("IH: HPD6\n");
5044 +
5045 break;
5046 case 6:
5047 - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
5048 - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5049 - queue_dp = true;
5050 - DRM_DEBUG("IH: HPD_RX 1\n");
5051 - }
5052 + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
5053 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5054 +
5055 + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5056 + queue_dp = true;
5057 + DRM_DEBUG("IH: HPD_RX 1\n");
5058 +
5059 break;
5060 case 7:
5061 - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
5062 - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5063 - queue_dp = true;
5064 - DRM_DEBUG("IH: HPD_RX 2\n");
5065 - }
5066 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
5067 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5068 +
5069 + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5070 + queue_dp = true;
5071 + DRM_DEBUG("IH: HPD_RX 2\n");
5072 +
5073 break;
5074 case 8:
5075 - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
5076 - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5077 - queue_dp = true;
5078 - DRM_DEBUG("IH: HPD_RX 3\n");
5079 - }
5080 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
5081 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5082 +
5083 + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5084 + queue_dp = true;
5085 + DRM_DEBUG("IH: HPD_RX 3\n");
5086 +
5087 break;
5088 case 9:
5089 - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
5090 - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5091 - queue_dp = true;
5092 - DRM_DEBUG("IH: HPD_RX 4\n");
5093 - }
5094 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
5095 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5096 +
5097 + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5098 + queue_dp = true;
5099 + DRM_DEBUG("IH: HPD_RX 4\n");
5100 +
5101 break;
5102 case 10:
5103 - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
5104 - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5105 - queue_dp = true;
5106 - DRM_DEBUG("IH: HPD_RX 5\n");
5107 - }
5108 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
5109 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5110 +
5111 + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5112 + queue_dp = true;
5113 + DRM_DEBUG("IH: HPD_RX 5\n");
5114 +
5115 break;
5116 case 11:
5117 - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
5118 - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5119 - queue_dp = true;
5120 - DRM_DEBUG("IH: HPD_RX 6\n");
5121 - }
5122 + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
5123 + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5124 +
5125 + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5126 + queue_dp = true;
5127 + DRM_DEBUG("IH: HPD_RX 6\n");
5128 +
5129 break;
5130 default:
5131 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
5132 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
5133 index ff8b83f5e929..9dfcedec05a6 100644
5134 --- a/drivers/gpu/drm/radeon/si_dpm.c
5135 +++ b/drivers/gpu/drm/radeon/si_dpm.c
5136 @@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
5137 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
5138 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
5139 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
5140 + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
5141 { 0, 0, 0, 0 },
5142 };
5143
5144 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5145 index eb2282cc4a56..eba5f8a52fbd 100644
5146 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5147 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
5148 @@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
5149 &rk_obj->dma_attrs);
5150 }
5151
5152 -int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
5153 - struct vm_area_struct *vma)
5154 +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
5155 + struct vm_area_struct *vma)
5156 +
5157 {
5158 + int ret;
5159 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
5160 struct drm_device *drm = obj->dev;
5161 - unsigned long vm_size;
5162
5163 - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
5164 - vm_size = vma->vm_end - vma->vm_start;
5165 -
5166 - if (vm_size > obj->size)
5167 - return -EINVAL;
5168 + /*
5169 + * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
5170 + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
5171 + */
5172 + vma->vm_flags &= ~VM_PFNMAP;
5173
5174 - return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
5175 + ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
5176 obj->size, &rk_obj->dma_attrs);
5177 + if (ret)
5178 + drm_gem_vm_close(vma);
5179 +
5180 + return ret;
5181 }
5182
5183 -/* drm driver mmap file operations */
5184 -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
5185 +int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
5186 + struct vm_area_struct *vma)
5187 {
5188 - struct drm_file *priv = filp->private_data;
5189 - struct drm_device *dev = priv->minor->dev;
5190 - struct drm_gem_object *obj;
5191 - struct drm_vma_offset_node *node;
5192 + struct drm_device *drm = obj->dev;
5193 int ret;
5194
5195 - if (drm_device_is_unplugged(dev))
5196 - return -ENODEV;
5197 + mutex_lock(&drm->struct_mutex);
5198 + ret = drm_gem_mmap_obj(obj, obj->size, vma);
5199 + mutex_unlock(&drm->struct_mutex);
5200 + if (ret)
5201 + return ret;
5202
5203 - mutex_lock(&dev->struct_mutex);
5204 + return rockchip_drm_gem_object_mmap(obj, vma);
5205 +}
5206
5207 - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
5208 - vma->vm_pgoff,
5209 - vma_pages(vma));
5210 - if (!node) {
5211 - mutex_unlock(&dev->struct_mutex);
5212 - DRM_ERROR("failed to find vma node.\n");
5213 - return -EINVAL;
5214 - } else if (!drm_vma_node_is_allowed(node, filp)) {
5215 - mutex_unlock(&dev->struct_mutex);
5216 - return -EACCES;
5217 - }
5218 +/* drm driver mmap file operations */
5219 +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
5220 +{
5221 + struct drm_gem_object *obj;
5222 + int ret;
5223
5224 - obj = container_of(node, struct drm_gem_object, vma_node);
5225 - ret = rockchip_gem_mmap_buf(obj, vma);
5226 + ret = drm_gem_mmap(filp, vma);
5227 + if (ret)
5228 + return ret;
5229
5230 - mutex_unlock(&dev->struct_mutex);
5231 + obj = vma->vm_private_data;
5232
5233 - return ret;
5234 + return rockchip_drm_gem_object_mmap(obj, vma);
5235 }
5236
5237 struct rockchip_gem_object *
5238 diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
5239 index d6b55e3e3716..a43a836e6f88 100644
5240 --- a/drivers/gpu/drm/tegra/dpaux.c
5241 +++ b/drivers/gpu/drm/tegra/dpaux.c
5242 @@ -72,34 +72,32 @@ static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
5243 static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
5244 size_t size)
5245 {
5246 - unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
5247 size_t i, j;
5248
5249 - for (i = 0; i < size; i += 4) {
5250 - size_t num = min_t(size_t, size - i, 4);
5251 + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
5252 + size_t num = min_t(size_t, size - i * 4, 4);
5253 unsigned long value = 0;
5254
5255 for (j = 0; j < num; j++)
5256 - value |= buffer[i + j] << (j * 8);
5257 + value |= buffer[i * 4 + j] << (j * 8);
5258
5259 - tegra_dpaux_writel(dpaux, value, offset++);
5260 + tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
5261 }
5262 }
5263
5264 static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
5265 size_t size)
5266 {
5267 - unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
5268 size_t i, j;
5269
5270 - for (i = 0; i < size; i += 4) {
5271 - size_t num = min_t(size_t, size - i, 4);
5272 + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
5273 + size_t num = min_t(size_t, size - i * 4, 4);
5274 unsigned long value;
5275
5276 - value = tegra_dpaux_readl(dpaux, offset++);
5277 + value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
5278
5279 for (j = 0; j < num; j++)
5280 - buffer[i + j] = value >> (j * 8);
5281 + buffer[i * 4 + j] = value >> (j * 8);
5282 }
5283 }
5284
5285 diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
5286 index 7a207ca547be..6394547cf67a 100644
5287 --- a/drivers/gpu/drm/vgem/vgem_drv.c
5288 +++ b/drivers/gpu/drm/vgem/vgem_drv.c
5289 @@ -328,6 +328,8 @@ static int __init vgem_init(void)
5290 goto out;
5291 }
5292
5293 + drm_dev_set_unique(vgem_device, "vgem");
5294 +
5295 ret = drm_dev_register(vgem_device, 0);
5296
5297 if (ret)
5298 diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
5299 index d219c06a857b..972444a14cca 100644
5300 --- a/drivers/hwmon/mcp3021.c
5301 +++ b/drivers/hwmon/mcp3021.c
5302 @@ -31,14 +31,11 @@
5303 /* output format */
5304 #define MCP3021_SAR_SHIFT 2
5305 #define MCP3021_SAR_MASK 0x3ff
5306 -
5307 #define MCP3021_OUTPUT_RES 10 /* 10-bit resolution */
5308 -#define MCP3021_OUTPUT_SCALE 4
5309
5310 #define MCP3221_SAR_SHIFT 0
5311 #define MCP3221_SAR_MASK 0xfff
5312 #define MCP3221_OUTPUT_RES 12 /* 12-bit resolution */
5313 -#define MCP3221_OUTPUT_SCALE 1
5314
5315 enum chips {
5316 mcp3021,
5317 @@ -54,7 +51,6 @@ struct mcp3021_data {
5318 u16 sar_shift;
5319 u16 sar_mask;
5320 u8 output_res;
5321 - u8 output_scale;
5322 };
5323
5324 static int mcp3021_read16(struct i2c_client *client)
5325 @@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
5326
5327 static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
5328 {
5329 - if (val == 0)
5330 - return 0;
5331 -
5332 - val = val * data->output_scale - data->output_scale / 2;
5333 -
5334 - return val * DIV_ROUND_CLOSEST(data->vdd,
5335 - (1 << data->output_res) * data->output_scale);
5336 + return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
5337 }
5338
5339 static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
5340 @@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
5341 data->sar_shift = MCP3021_SAR_SHIFT;
5342 data->sar_mask = MCP3021_SAR_MASK;
5343 data->output_res = MCP3021_OUTPUT_RES;
5344 - data->output_scale = MCP3021_OUTPUT_SCALE;
5345 break;
5346
5347 case mcp3221:
5348 data->sar_shift = MCP3221_SAR_SHIFT;
5349 data->sar_mask = MCP3221_SAR_MASK;
5350 data->output_res = MCP3221_OUTPUT_RES;
5351 - data->output_scale = MCP3221_OUTPUT_SCALE;
5352 break;
5353 }
5354
5355 diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
5356 index 55765790907b..28fcb2e246d5 100644
5357 --- a/drivers/hwmon/nct7802.c
5358 +++ b/drivers/hwmon/nct7802.c
5359 @@ -547,7 +547,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
5360 if (index >= 9 && index < 18 &&
5361 (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08) /* RD2 */
5362 return 0;
5363 - if (index >= 18 && index < 27 && (reg & 0x30) != 0x10) /* RD3 */
5364 + if (index >= 18 && index < 27 && (reg & 0x30) != 0x20) /* RD3 */
5365 return 0;
5366 if (index >= 27 && index < 35) /* local */
5367 return attr->mode;
5368 diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
5369 index ff23d1bdd230..9bd10a9b4b50 100644
5370 --- a/drivers/i2c/busses/i2c-at91.c
5371 +++ b/drivers/i2c/busses/i2c-at91.c
5372 @@ -65,6 +65,9 @@
5373 #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
5374 #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
5375
5376 +#define AT91_TWI_INT_MASK \
5377 + (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
5378 +
5379 #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
5380 #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
5381 #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
5382 @@ -119,13 +122,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
5383
5384 static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
5385 {
5386 - at91_twi_write(dev, AT91_TWI_IDR,
5387 - AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
5388 + at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
5389 }
5390
5391 static void at91_twi_irq_save(struct at91_twi_dev *dev)
5392 {
5393 - dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
5394 + dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
5395 at91_disable_twi_interrupts(dev);
5396 }
5397
5398 @@ -215,6 +217,14 @@ static void at91_twi_write_data_dma_callback(void *data)
5399 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
5400 dev->buf_len, DMA_TO_DEVICE);
5401
5402 + /*
5403 + * When this callback is called, THR/TX FIFO is likely not to be empty
5404 + * yet. So we have to wait for TXCOMP or NACK bits to be set into the
5405 + * Status Register to be sure that the STOP bit has been sent and the
5406 + * transfer is completed. The NACK interrupt has already been enabled,
5407 + * we just have to enable TXCOMP one.
5408 + */
5409 + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
5410 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
5411 }
5412
5413 @@ -309,7 +319,7 @@ static void at91_twi_read_data_dma_callback(void *data)
5414 /* The last two bytes have to be read without using dma */
5415 dev->buf += dev->buf_len - 2;
5416 dev->buf_len = 2;
5417 - at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
5418 + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
5419 }
5420
5421 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
5422 @@ -370,7 +380,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
5423 /* catch error flags */
5424 dev->transfer_status |= status;
5425
5426 - if (irqstatus & AT91_TWI_TXCOMP) {
5427 + if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
5428 at91_disable_twi_interrupts(dev);
5429 complete(&dev->cmd_complete);
5430 }
5431 @@ -384,6 +394,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
5432 unsigned long time_left;
5433 bool has_unre_flag = dev->pdata->has_unre_flag;
5434
5435 + /*
5436 + * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
5437 + * read flag but shows the state of the transmission at the time the
5438 + * Status Register is read. According to the programmer datasheet,
5439 + * TXCOMP is set when both holding register and internal shifter are
5440 + * empty and STOP condition has been sent.
5441 + * Consequently, we should enable NACK interrupt rather than TXCOMP to
5442 + * detect transmission failure.
5443 + *
5444 + * Besides, the TXCOMP bit is already set before the i2c transaction
5445 + * has been started. For read transactions, this bit is cleared when
5446 + * writing the START bit into the Control Register. So the
5447 + * corresponding interrupt can safely be enabled just after.
5448 + * However for write transactions managed by the CPU, we first write
5449 + * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
5450 + * interrupt. If TXCOMP interrupt were enabled before writing into THR,
5451 + * the interrupt handler would be called immediately and the i2c command
5452 + * would be reported as completed.
5453 + * Also when a write transaction is managed by the DMA controller,
5454 + * enabling the TXCOMP interrupt in this function may lead to a race
5455 + * condition since we don't know whether the TXCOMP interrupt is enabled
5456 + * before or after the DMA has started to write into THR. So the TXCOMP
5457 + * interrupt is enabled later by at91_twi_write_data_dma_callback().
5458 + * Immediately after in that DMA callback, we still need to send the
5459 + * STOP condition manually writing the corresponding bit into the
5460 + * Control Register.
5461 + */
5462 +
5463 dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
5464 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
5465
5466 @@ -414,26 +452,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
5467 * seems to be the best solution.
5468 */
5469 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
5470 + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
5471 at91_twi_read_data_dma(dev);
5472 - /*
5473 - * It is important to enable TXCOMP irq here because
5474 - * doing it only when transferring the last two bytes
5475 - * will mask NACK errors since TXCOMP is set when a
5476 - * NACK occurs.
5477 - */
5478 - at91_twi_write(dev, AT91_TWI_IER,
5479 - AT91_TWI_TXCOMP);
5480 - } else
5481 + } else {
5482 at91_twi_write(dev, AT91_TWI_IER,
5483 - AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
5484 + AT91_TWI_TXCOMP |
5485 + AT91_TWI_NACK |
5486 + AT91_TWI_RXRDY);
5487 + }
5488 } else {
5489 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
5490 + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
5491 at91_twi_write_data_dma(dev);
5492 - at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
5493 } else {
5494 at91_twi_write_next_byte(dev);
5495 at91_twi_write(dev, AT91_TWI_IER,
5496 - AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
5497 + AT91_TWI_TXCOMP |
5498 + AT91_TWI_NACK |
5499 + AT91_TWI_TXRDY);
5500 }
5501 }
5502
5503 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
5504 index 06cc1ff088f1..2ba7c0fbc615 100644
5505 --- a/drivers/i2c/i2c-mux.c
5506 +++ b/drivers/i2c/i2c-mux.c
5507 @@ -51,7 +51,7 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
5508
5509 ret = priv->select(parent, priv->mux_priv, priv->chan_id);
5510 if (ret >= 0)
5511 - ret = parent->algo->master_xfer(parent, msgs, num);
5512 + ret = __i2c_transfer(parent, msgs, num);
5513 if (priv->deselect)
5514 priv->deselect(parent, priv->mux_priv, priv->chan_id);
5515
5516 @@ -144,6 +144,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
5517 priv->adap.dev.parent = &parent->dev;
5518 priv->adap.retries = parent->retries;
5519 priv->adap.timeout = parent->timeout;
5520 + priv->adap.quirks = parent->quirks;
5521
5522 /* Sanity check on class */
5523 if (i2c_mux_parent_classes(parent) & class)
5524 diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
5525 index cb772775da43..0c8d4d2cbdaf 100644
5526 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c
5527 +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
5528 @@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
5529 buf[0] = command;
5530 buf[1] = val;
5531 msg.buf = buf;
5532 - ret = adap->algo->master_xfer(adap, &msg, 1);
5533 + ret = __i2c_transfer(adap, &msg, 1);
5534 } else {
5535 union i2c_smbus_data data;
5536
5537 @@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command)
5538 .buf = &val
5539 }
5540 };
5541 - ret = adap->algo->master_xfer(adap, msg, 2);
5542 + ret = __i2c_transfer(adap, msg, 2);
5543 if (ret == 2)
5544 ret = val;
5545 else if (ret >= 0)
5546 diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
5547 index bea0d2de2993..ea4aa9dfcea9 100644
5548 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c
5549 +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
5550 @@ -134,7 +134,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
5551 msg.len = 1;
5552 buf[0] = val;
5553 msg.buf = buf;
5554 - ret = adap->algo->master_xfer(adap, &msg, 1);
5555 + ret = __i2c_transfer(adap, &msg, 1);
5556 } else {
5557 union i2c_smbus_data data;
5558 ret = adap->algo->smbus_xfer(adap, client->addr,
5559 diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
5560 index 73e87739d219..bf827d012a71 100644
5561 --- a/drivers/iio/accel/bmc150-accel.c
5562 +++ b/drivers/iio/accel/bmc150-accel.c
5563 @@ -1465,7 +1465,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
5564 {
5565 int i;
5566
5567 - for (i = from; i >= 0; i++) {
5568 + for (i = from; i >= 0; i--) {
5569 if (data->triggers[i].indio_trig) {
5570 iio_trigger_unregister(data->triggers[i].indio_trig);
5571 data->triggers[i].indio_trig = NULL;
5572 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
5573 index e36a73e7c3a8..1bcb65b8d4a1 100644
5574 --- a/drivers/iio/adc/Kconfig
5575 +++ b/drivers/iio/adc/Kconfig
5576 @@ -146,8 +146,7 @@ config DA9150_GPADC
5577
5578 config CC10001_ADC
5579 tristate "Cosmic Circuits 10001 ADC driver"
5580 - depends on HAVE_CLK || REGULATOR
5581 - depends on HAS_IOMEM
5582 + depends on HAS_IOMEM && HAVE_CLK && REGULATOR
5583 select IIO_BUFFER
5584 select IIO_TRIGGERED_BUFFER
5585 help
5586 diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
5587 index 8a0eb4a04fb5..7b40925dd4ff 100644
5588 --- a/drivers/iio/adc/at91_adc.c
5589 +++ b/drivers/iio/adc/at91_adc.c
5590 @@ -182,7 +182,7 @@ struct at91_adc_caps {
5591 u8 ts_pen_detect_sensitivity;
5592
5593 /* startup time calculate function */
5594 - u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
5595 + u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
5596
5597 u8 num_channels;
5598 struct at91_adc_reg_desc registers;
5599 @@ -201,7 +201,7 @@ struct at91_adc_state {
5600 u8 num_channels;
5601 void __iomem *reg_base;
5602 struct at91_adc_reg_desc *registers;
5603 - u8 startup_time;
5604 + u32 startup_time;
5605 u8 sample_hold_time;
5606 bool sleep_mode;
5607 struct iio_trigger **trig;
5608 @@ -779,7 +779,7 @@ ret:
5609 return ret;
5610 }
5611
5612 -static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
5613 +static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
5614 {
5615 /*
5616 * Number of ticks needed to cover the startup time of the ADC
5617 @@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
5618 return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
5619 }
5620
5621 -static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
5622 +static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
5623 {
5624 /*
5625 * For sama5d3x and at91sam9x5, the formula changes to:
5626 diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
5627 index 8d4e019ea4ca..9c311c1e1ac7 100644
5628 --- a/drivers/iio/adc/rockchip_saradc.c
5629 +++ b/drivers/iio/adc/rockchip_saradc.c
5630 @@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
5631 };
5632
5633 module_platform_driver(rockchip_saradc_driver);
5634 +
5635 +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
5636 +MODULE_DESCRIPTION("Rockchip SARADC driver");
5637 +MODULE_LICENSE("GPL v2");
5638 diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
5639 index 94c5f05b4bc1..4caecbea4c97 100644
5640 --- a/drivers/iio/adc/twl4030-madc.c
5641 +++ b/drivers/iio/adc/twl4030-madc.c
5642 @@ -835,7 +835,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
5643 irq = platform_get_irq(pdev, 0);
5644 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
5645 twl4030_madc_threaded_irq_handler,
5646 - IRQF_TRIGGER_RISING, "twl4030_madc", madc);
5647 + IRQF_TRIGGER_RISING | IRQF_ONESHOT,
5648 + "twl4030_madc", madc);
5649 if (ret) {
5650 dev_err(&pdev->dev, "could not request irq\n");
5651 goto err_i2c;
5652 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
5653 index 610fc98f88ef..595511022795 100644
5654 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
5655 +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
5656 @@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
5657 s32 poll_value = 0;
5658
5659 if (state) {
5660 + if (!atomic_read(&st->user_requested_state))
5661 + return 0;
5662 if (sensor_hub_device_open(st->hsdev))
5663 return -EIO;
5664
5665 @@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
5666
5667 poll_value = hid_sensor_read_poll_value(st);
5668 } else {
5669 - if (!atomic_dec_and_test(&st->data_ready))
5670 + int val;
5671 +
5672 + val = atomic_dec_if_positive(&st->data_ready);
5673 + if (val < 0)
5674 return 0;
5675 +
5676 sensor_hub_device_close(st->hsdev);
5677 state_val = hid_sensor_get_usage_index(st->hsdev,
5678 st->power_state.report_id,
5679 @@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
5680
5681 int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
5682 {
5683 +
5684 #ifdef CONFIG_PM
5685 int ret;
5686
5687 + atomic_set(&st->user_requested_state, state);
5688 if (state)
5689 ret = pm_runtime_get_sync(&st->pdev->dev);
5690 else {
5691 @@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
5692
5693 return 0;
5694 #else
5695 + atomic_set(&st->user_requested_state, state);
5696 return _hid_sensor_power_state(st, state);
5697 #endif
5698 }
5699 diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
5700 index 61bb9d4239ea..e98428df0d44 100644
5701 --- a/drivers/iio/dac/ad5624r_spi.c
5702 +++ b/drivers/iio/dac/ad5624r_spi.c
5703 @@ -22,7 +22,7 @@
5704 #include "ad5624r.h"
5705
5706 static int ad5624r_spi_write(struct spi_device *spi,
5707 - u8 cmd, u8 addr, u16 val, u8 len)
5708 + u8 cmd, u8 addr, u16 val, u8 shift)
5709 {
5710 u32 data;
5711 u8 msg[3];
5712 @@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
5713 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
5714 * for the AD5664R, AD5644R, and AD5624R, respectively.
5715 */
5716 - data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
5717 + data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
5718 msg[0] = data >> 16;
5719 msg[1] = data >> 8;
5720 msg[2] = data;
5721 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
5722 index 17d4bb15be4d..65ce86837177 100644
5723 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
5724 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
5725 @@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
5726 return -EINVAL;
5727 }
5728
5729 +static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
5730 + struct iio_chan_spec const *chan, long mask)
5731 +{
5732 + switch (mask) {
5733 + case IIO_CHAN_INFO_SCALE:
5734 + switch (chan->type) {
5735 + case IIO_ANGL_VEL:
5736 + return IIO_VAL_INT_PLUS_NANO;
5737 + default:
5738 + return IIO_VAL_INT_PLUS_MICRO;
5739 + }
5740 + default:
5741 + return IIO_VAL_INT_PLUS_MICRO;
5742 + }
5743 +
5744 + return -EINVAL;
5745 +}
5746 static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
5747 {
5748 int result, i;
5749 @@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
5750 .driver_module = THIS_MODULE,
5751 .read_raw = &inv_mpu6050_read_raw,
5752 .write_raw = &inv_mpu6050_write_raw,
5753 + .write_raw_get_fmt = &inv_write_raw_get_fmt,
5754 .attrs = &inv_attribute_group,
5755 .validate_trigger = inv_mpu6050_validate_trigger,
5756 };
5757 diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
5758 index 869033e48a1f..a1d4905cc9d2 100644
5759 --- a/drivers/iio/light/cm3323.c
5760 +++ b/drivers/iio/light/cm3323.c
5761 @@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
5762 for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
5763 if (val == cm3323_int_time[i].val &&
5764 val2 == cm3323_int_time[i].val2) {
5765 - reg_conf = data->reg_conf;
5766 + reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
5767 reg_conf |= i << CM3323_CONF_IT_SHIFT;
5768
5769 ret = i2c_smbus_write_word_data(data->client,
5770 diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
5771 index 71c2bde275aa..f8b1df018abe 100644
5772 --- a/drivers/iio/light/tcs3414.c
5773 +++ b/drivers/iio/light/tcs3414.c
5774 @@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
5775 if (val != 0)
5776 return -EINVAL;
5777 for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
5778 - if (val == tcs3414_times[i] * 1000) {
5779 + if (val2 == tcs3414_times[i] * 1000) {
5780 data->timing &= ~TCS3414_INTEG_MASK;
5781 data->timing |= i;
5782 return i2c_smbus_write_byte_data(
5783 diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
5784 index fa40f6d0ca39..bd26a484abcc 100644
5785 --- a/drivers/iio/proximity/sx9500.c
5786 +++ b/drivers/iio/proximity/sx9500.c
5787 @@ -206,7 +206,7 @@ static int sx9500_read_proximity(struct sx9500_data *data,
5788 if (ret < 0)
5789 return ret;
5790
5791 - *val = 32767 - (s16)be16_to_cpu(regval);
5792 + *val = be16_to_cpu(regval);
5793
5794 return IIO_VAL_INT;
5795 }
5796 diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
5797 index 84a0789c3d96..7a8050996b4e 100644
5798 --- a/drivers/iio/temperature/tmp006.c
5799 +++ b/drivers/iio/temperature/tmp006.c
5800 @@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
5801 struct tmp006_data *data = iio_priv(indio_dev);
5802 int i;
5803
5804 + if (mask != IIO_CHAN_INFO_SAMP_FREQ)
5805 + return -EINVAL;
5806 +
5807 for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
5808 if ((val == tmp006_freqs[i][0]) &&
5809 (val2 == tmp006_freqs[i][1])) {
5810 diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
5811 index 9dcb66077d6c..219f2122f9b9 100644
5812 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
5813 +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
5814 @@ -679,7 +679,6 @@ err:
5815 ocrdma_release_ucontext_pd(uctx);
5816 } else {
5817 status = _ocrdma_dealloc_pd(dev, pd);
5818 - kfree(pd);
5819 }
5820 exit:
5821 return ERR_PTR(status);
5822 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
5823 index 135a0907e9de..c90118e90708 100644
5824 --- a/drivers/md/bitmap.c
5825 +++ b/drivers/md/bitmap.c
5826 @@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
5827 bitmap_super_t *sb;
5828 unsigned long chunksize, daemon_sleep, write_behind;
5829
5830 - bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
5831 + bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5832 if (bitmap->storage.sb_page == NULL)
5833 return -ENOMEM;
5834 bitmap->storage.sb_page->index = 0;
5835 @@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
5836 sb->state = cpu_to_le32(bitmap->flags);
5837 bitmap->events_cleared = bitmap->mddev->events;
5838 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
5839 + bitmap->mddev->bitmap_info.nodes = 0;
5840
5841 kunmap_atomic(sb);
5842
5843 @@ -611,8 +612,16 @@ re_read:
5844 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
5845 write_behind = le32_to_cpu(sb->write_behind);
5846 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
5847 - nodes = le32_to_cpu(sb->nodes);
5848 - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
5849 + /* XXX: This is a hack to ensure that we don't use clustering
5850 + * in case:
5851 + * - dm-raid is in use and
5852 + * - the nodes written in bitmap_sb is erroneous.
5853 + */
5854 + if (!bitmap->mddev->sync_super) {
5855 + nodes = le32_to_cpu(sb->nodes);
5856 + strlcpy(bitmap->mddev->bitmap_info.cluster_name,
5857 + sb->cluster_name, 64);
5858 + }
5859
5860 /* verify that the bitmap-specific fields are valid */
5861 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
5862 diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
5863 index b04d1f904d07..004e463c9423 100644
5864 --- a/drivers/md/dm-cache-policy-cleaner.c
5865 +++ b/drivers/md/dm-cache-policy-cleaner.c
5866 @@ -171,7 +171,8 @@ static void remove_cache_hash_entry(struct wb_cache_entry *e)
5867 /* Public interface (see dm-cache-policy.h */
5868 static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
5869 bool can_block, bool can_migrate, bool discarded_oblock,
5870 - struct bio *bio, struct policy_result *result)
5871 + struct bio *bio, struct policy_locker *locker,
5872 + struct policy_result *result)
5873 {
5874 struct policy *p = to_policy(pe);
5875 struct wb_cache_entry *e;
5876 diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
5877 index 2256a1f24f73..c198e6defb9c 100644
5878 --- a/drivers/md/dm-cache-policy-internal.h
5879 +++ b/drivers/md/dm-cache-policy-internal.h
5880 @@ -16,9 +16,10 @@
5881 */
5882 static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
5883 bool can_block, bool can_migrate, bool discarded_oblock,
5884 - struct bio *bio, struct policy_result *result)
5885 + struct bio *bio, struct policy_locker *locker,
5886 + struct policy_result *result)
5887 {
5888 - return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
5889 + return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result);
5890 }
5891
5892 static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
5893 diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
5894 index 3ddd1162334d..515d44bf24d3 100644
5895 --- a/drivers/md/dm-cache-policy-mq.c
5896 +++ b/drivers/md/dm-cache-policy-mq.c
5897 @@ -693,9 +693,10 @@ static void requeue(struct mq_policy *mq, struct entry *e)
5898 * - set the hit count to a hard coded value other than 1, eg, is it better
5899 * if it goes in at level 2?
5900 */
5901 -static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
5902 +static int demote_cblock(struct mq_policy *mq,
5903 + struct policy_locker *locker, dm_oblock_t *oblock)
5904 {
5905 - struct entry *demoted = pop(mq, &mq->cache_clean);
5906 + struct entry *demoted = peek(&mq->cache_clean);
5907
5908 if (!demoted)
5909 /*
5910 @@ -707,6 +708,13 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
5911 */
5912 return -ENOSPC;
5913
5914 + if (locker->fn(locker, demoted->oblock))
5915 + /*
5916 + * We couldn't lock the demoted block.
5917 + */
5918 + return -EBUSY;
5919 +
5920 + del(mq, demoted);
5921 *oblock = demoted->oblock;
5922 free_entry(&mq->cache_pool, demoted);
5923
5924 @@ -795,6 +803,7 @@ static int cache_entry_found(struct mq_policy *mq,
5925 * finding which cache block to use.
5926 */
5927 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
5928 + struct policy_locker *locker,
5929 struct policy_result *result)
5930 {
5931 int r;
5932 @@ -803,11 +812,12 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
5933 /* Ensure there's a free cblock in the cache */
5934 if (epool_empty(&mq->cache_pool)) {
5935 result->op = POLICY_REPLACE;
5936 - r = demote_cblock(mq, &result->old_oblock);
5937 + r = demote_cblock(mq, locker, &result->old_oblock);
5938 if (r) {
5939 result->op = POLICY_MISS;
5940 return 0;
5941 }
5942 +
5943 } else
5944 result->op = POLICY_NEW;
5945
5946 @@ -829,7 +839,8 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
5947
5948 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
5949 bool can_migrate, bool discarded_oblock,
5950 - int data_dir, struct policy_result *result)
5951 + int data_dir, struct policy_locker *locker,
5952 + struct policy_result *result)
5953 {
5954 int r = 0;
5955
5956 @@ -842,7 +853,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
5957
5958 else {
5959 requeue(mq, e);
5960 - r = pre_cache_to_cache(mq, e, result);
5961 + r = pre_cache_to_cache(mq, e, locker, result);
5962 }
5963
5964 return r;
5965 @@ -872,6 +883,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
5966 }
5967
5968 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
5969 + struct policy_locker *locker,
5970 struct policy_result *result)
5971 {
5972 int r;
5973 @@ -879,7 +891,7 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
5974
5975 if (epool_empty(&mq->cache_pool)) {
5976 result->op = POLICY_REPLACE;
5977 - r = demote_cblock(mq, &result->old_oblock);
5978 + r = demote_cblock(mq, locker, &result->old_oblock);
5979 if (unlikely(r)) {
5980 result->op = POLICY_MISS;
5981 insert_in_pre_cache(mq, oblock);
5982 @@ -907,11 +919,12 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
5983
5984 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
5985 bool can_migrate, bool discarded_oblock,
5986 - int data_dir, struct policy_result *result)
5987 + int data_dir, struct policy_locker *locker,
5988 + struct policy_result *result)
5989 {
5990 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
5991 if (can_migrate)
5992 - insert_in_cache(mq, oblock, result);
5993 + insert_in_cache(mq, oblock, locker, result);
5994 else
5995 return -EWOULDBLOCK;
5996 } else {
5997 @@ -928,7 +941,8 @@ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
5998 */
5999 static int map(struct mq_policy *mq, dm_oblock_t oblock,
6000 bool can_migrate, bool discarded_oblock,
6001 - int data_dir, struct policy_result *result)
6002 + int data_dir, struct policy_locker *locker,
6003 + struct policy_result *result)
6004 {
6005 int r = 0;
6006 struct entry *e = hash_lookup(mq, oblock);
6007 @@ -942,11 +956,11 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
6008
6009 else if (e)
6010 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
6011 - data_dir, result);
6012 + data_dir, locker, result);
6013
6014 else
6015 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
6016 - data_dir, result);
6017 + data_dir, locker, result);
6018
6019 if (r == -EWOULDBLOCK)
6020 result->op = POLICY_MISS;
6021 @@ -1012,7 +1026,8 @@ static void copy_tick(struct mq_policy *mq)
6022
6023 static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
6024 bool can_block, bool can_migrate, bool discarded_oblock,
6025 - struct bio *bio, struct policy_result *result)
6026 + struct bio *bio, struct policy_locker *locker,
6027 + struct policy_result *result)
6028 {
6029 int r;
6030 struct mq_policy *mq = to_mq_policy(p);
6031 @@ -1028,7 +1043,7 @@ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
6032
6033 iot_examine_bio(&mq->tracker, bio);
6034 r = map(mq, oblock, can_migrate, discarded_oblock,
6035 - bio_data_dir(bio), result);
6036 + bio_data_dir(bio), locker, result);
6037
6038 mutex_unlock(&mq->lock);
6039
6040 diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
6041 index f50fe360c546..5524e21e4836 100644
6042 --- a/drivers/md/dm-cache-policy.h
6043 +++ b/drivers/md/dm-cache-policy.h
6044 @@ -70,6 +70,18 @@ enum policy_operation {
6045 };
6046
6047 /*
6048 + * When issuing a POLICY_REPLACE the policy needs to make a callback to
6049 + * lock the block being demoted. This doesn't need to occur during a
6050 + * writeback operation since the block remains in the cache.
6051 + */
6052 +struct policy_locker;
6053 +typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
6054 +
6055 +struct policy_locker {
6056 + policy_lock_fn fn;
6057 +};
6058 +
6059 +/*
6060 * This is the instruction passed back to the core target.
6061 */
6062 struct policy_result {
6063 @@ -122,7 +134,8 @@ struct dm_cache_policy {
6064 */
6065 int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
6066 bool can_block, bool can_migrate, bool discarded_oblock,
6067 - struct bio *bio, struct policy_result *result);
6068 + struct bio *bio, struct policy_locker *locker,
6069 + struct policy_result *result);
6070
6071 /*
6072 * Sometimes we want to see if a block is in the cache, without
6073 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
6074 index 7755af351867..e049becaaf2d 100644
6075 --- a/drivers/md/dm-cache-target.c
6076 +++ b/drivers/md/dm-cache-target.c
6077 @@ -1445,16 +1445,43 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
6078 &cache->stats.read_miss : &cache->stats.write_miss);
6079 }
6080
6081 +/*----------------------------------------------------------------*/
6082 +
6083 +struct old_oblock_lock {
6084 + struct policy_locker locker;
6085 + struct cache *cache;
6086 + struct prealloc *structs;
6087 + struct dm_bio_prison_cell *cell;
6088 +};
6089 +
6090 +static int null_locker(struct policy_locker *locker, dm_oblock_t b)
6091 +{
6092 + /* This should never be called */
6093 + BUG();
6094 + return 0;
6095 +}
6096 +
6097 +static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
6098 +{
6099 + struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
6100 + struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
6101 +
6102 + return bio_detain(l->cache, b, NULL, cell_prealloc,
6103 + (cell_free_fn) prealloc_put_cell,
6104 + l->structs, &l->cell);
6105 +}
6106 +
6107 static void process_bio(struct cache *cache, struct prealloc *structs,
6108 struct bio *bio)
6109 {
6110 int r;
6111 bool release_cell = true;
6112 dm_oblock_t block = get_bio_block(cache, bio);
6113 - struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
6114 + struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
6115 struct policy_result lookup_result;
6116 bool passthrough = passthrough_mode(&cache->features);
6117 bool discarded_block, can_migrate;
6118 + struct old_oblock_lock ool;
6119
6120 /*
6121 * Check to see if that block is currently migrating.
6122 @@ -1469,8 +1496,12 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
6123 discarded_block = is_discarded_oblock(cache, block);
6124 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
6125
6126 + ool.locker.fn = cell_locker;
6127 + ool.cache = cache;
6128 + ool.structs = structs;
6129 + ool.cell = NULL;
6130 r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
6131 - bio, &lookup_result);
6132 + bio, &ool.locker, &lookup_result);
6133
6134 if (r == -EWOULDBLOCK)
6135 /* migration has been denied */
6136 @@ -1527,27 +1558,11 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
6137 break;
6138
6139 case POLICY_REPLACE:
6140 - cell_prealloc = prealloc_get_cell(structs);
6141 - r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
6142 - (cell_free_fn) prealloc_put_cell,
6143 - structs, &old_ocell);
6144 - if (r > 0) {
6145 - /*
6146 - * We have to be careful to avoid lock inversion of
6147 - * the cells. So we back off, and wait for the
6148 - * old_ocell to become free.
6149 - */
6150 - policy_force_mapping(cache->policy, block,
6151 - lookup_result.old_oblock);
6152 - atomic_inc(&cache->stats.cache_cell_clash);
6153 - break;
6154 - }
6155 atomic_inc(&cache->stats.demotion);
6156 atomic_inc(&cache->stats.promotion);
6157 -
6158 demote_then_promote(cache, structs, lookup_result.old_oblock,
6159 block, lookup_result.cblock,
6160 - old_ocell, new_ocell);
6161 + ool.cell, new_ocell);
6162 release_cell = false;
6163 break;
6164
6165 @@ -2595,6 +2610,9 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
6166 bool discarded_block;
6167 struct policy_result lookup_result;
6168 struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
6169 + struct old_oblock_lock ool;
6170 +
6171 + ool.locker.fn = null_locker;
6172
6173 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
6174 /*
6175 @@ -2633,7 +2651,7 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
6176 discarded_block = is_discarded_oblock(cache, block);
6177
6178 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
6179 - bio, &lookup_result);
6180 + bio, &ool.locker, &lookup_result);
6181 if (r == -EWOULDBLOCK) {
6182 cell_defer(cache, *cell, true);
6183 return DM_MAPIO_SUBMITTED;
6184 diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
6185 index f478a4c96d2f..419bdd4fc8b8 100644
6186 --- a/drivers/md/dm-stats.c
6187 +++ b/drivers/md/dm-stats.c
6188 @@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
6189 return -EINVAL;
6190
6191 if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
6192 + if (!divisor)
6193 + return -EINVAL;
6194 step = end - start;
6195 if (do_div(step, divisor))
6196 step++;
6197 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
6198 index 921aafd12aee..e22e6c892b8a 100644
6199 --- a/drivers/md/dm-thin.c
6200 +++ b/drivers/md/dm-thin.c
6201 @@ -18,6 +18,7 @@
6202 #include <linux/init.h>
6203 #include <linux/module.h>
6204 #include <linux/slab.h>
6205 +#include <linux/vmalloc.h>
6206 #include <linux/sort.h>
6207 #include <linux/rbtree.h>
6208
6209 @@ -260,7 +261,7 @@ struct pool {
6210 process_mapping_fn process_prepared_mapping;
6211 process_mapping_fn process_prepared_discard;
6212
6213 - struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
6214 + struct dm_bio_prison_cell **cell_sort_array;
6215 };
6216
6217 static enum pool_mode get_pool_mode(struct pool *pool);
6218 @@ -2499,6 +2500,7 @@ static void __pool_destroy(struct pool *pool)
6219 {
6220 __pool_table_remove(pool);
6221
6222 + vfree(pool->cell_sort_array);
6223 if (dm_pool_metadata_close(pool->pmd) < 0)
6224 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
6225
6226 @@ -2611,6 +2613,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
6227 goto bad_mapping_pool;
6228 }
6229
6230 + pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
6231 + if (!pool->cell_sort_array) {
6232 + *error = "Error allocating cell sort array";
6233 + err_p = ERR_PTR(-ENOMEM);
6234 + goto bad_sort_array;
6235 + }
6236 +
6237 pool->ref_count = 1;
6238 pool->last_commit_jiffies = jiffies;
6239 pool->pool_md = pool_md;
6240 @@ -2619,6 +2628,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
6241
6242 return pool;
6243
6244 +bad_sort_array:
6245 + mempool_destroy(pool->mapping_pool);
6246 bad_mapping_pool:
6247 dm_deferred_set_destroy(pool->all_io_ds);
6248 bad_all_io_ds:
6249 diff --git a/drivers/md/md.c b/drivers/md/md.c
6250 index 4dbed4a67aaf..b9200282fd77 100644
6251 --- a/drivers/md/md.c
6252 +++ b/drivers/md/md.c
6253 @@ -4005,8 +4005,10 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
6254 else
6255 rdev = md_import_device(dev, -1, -1);
6256
6257 - if (IS_ERR(rdev))
6258 + if (IS_ERR(rdev)) {
6259 + mddev_unlock(mddev);
6260 return PTR_ERR(rdev);
6261 + }
6262 err = bind_rdev_to_array(rdev, mddev);
6263 out:
6264 if (err)
6265 @@ -5159,6 +5161,7 @@ int md_run(struct mddev *mddev)
6266 mddev_detach(mddev);
6267 if (mddev->private)
6268 pers->free(mddev, mddev->private);
6269 + mddev->private = NULL;
6270 module_put(pers->owner);
6271 bitmap_destroy(mddev);
6272 return err;
6273 @@ -5294,6 +5297,7 @@ static void md_clean(struct mddev *mddev)
6274 mddev->changed = 0;
6275 mddev->degraded = 0;
6276 mddev->safemode = 0;
6277 + mddev->private = NULL;
6278 mddev->merge_check_needed = 0;
6279 mddev->bitmap_info.offset = 0;
6280 mddev->bitmap_info.default_offset = 0;
6281 @@ -5366,6 +5370,7 @@ static void __md_stop(struct mddev *mddev)
6282 mddev->pers = NULL;
6283 spin_unlock(&mddev->lock);
6284 pers->free(mddev, mddev->private);
6285 + mddev->private = NULL;
6286 if (pers->sync_request && mddev->to_remove == NULL)
6287 mddev->to_remove = &md_redundancy_group;
6288 module_put(pers->owner);
6289 @@ -6375,7 +6380,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6290 mddev->ctime != info->ctime ||
6291 mddev->level != info->level ||
6292 /* mddev->layout != info->layout || */
6293 - !mddev->persistent != info->not_persistent||
6294 + mddev->persistent != !info->not_persistent ||
6295 mddev->chunk_sectors != info->chunk_size >> 9 ||
6296 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6297 ((state^info->state) & 0xfffffe00)
6298 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
6299 index b88757cd0d1d..a03178e91a79 100644
6300 --- a/drivers/md/persistent-data/dm-btree-remove.c
6301 +++ b/drivers/md/persistent-data/dm-btree-remove.c
6302 @@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
6303
6304 if (s < 0 && nr_center < -s) {
6305 /* not enough in central node */
6306 - shift(left, center, nr_center);
6307 - s = nr_center - target;
6308 + shift(left, center, -nr_center);
6309 + s += nr_center;
6310 shift(left, right, s);
6311 nr_right += s;
6312 } else
6313 @@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
6314 if (s > 0 && nr_center < s) {
6315 /* not enough in central node */
6316 shift(center, right, nr_center);
6317 - s = target - nr_center;
6318 + s -= nr_center;
6319 shift(left, right, s);
6320 nr_left -= s;
6321 } else
6322 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
6323 index 200ac12a1d40..fdd3793e22f9 100644
6324 --- a/drivers/md/persistent-data/dm-btree.c
6325 +++ b/drivers/md/persistent-data/dm-btree.c
6326 @@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
6327 int r;
6328 struct del_stack *s;
6329
6330 - s = kmalloc(sizeof(*s), GFP_KERNEL);
6331 + s = kmalloc(sizeof(*s), GFP_NOIO);
6332 if (!s)
6333 return -ENOMEM;
6334 s->info = info;
6335 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
6336 index e8a904298887..53091295fce9 100644
6337 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
6338 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
6339 @@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
6340 smm->recursion_count++;
6341 }
6342
6343 +static int apply_bops(struct sm_metadata *smm)
6344 +{
6345 + int r = 0;
6346 +
6347 + while (!brb_empty(&smm->uncommitted)) {
6348 + struct block_op bop;
6349 +
6350 + r = brb_pop(&smm->uncommitted, &bop);
6351 + if (r) {
6352 + DMERR("bug in bop ring buffer");
6353 + break;
6354 + }
6355 +
6356 + r = commit_bop(smm, &bop);
6357 + if (r)
6358 + break;
6359 + }
6360 +
6361 + return r;
6362 +}
6363 +
6364 static int out(struct sm_metadata *smm)
6365 {
6366 int r = 0;
6367 @@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
6368 return -ENOMEM;
6369 }
6370
6371 - if (smm->recursion_count == 1) {
6372 - while (!brb_empty(&smm->uncommitted)) {
6373 - struct block_op bop;
6374 -
6375 - r = brb_pop(&smm->uncommitted, &bop);
6376 - if (r) {
6377 - DMERR("bug in bop ring buffer");
6378 - break;
6379 - }
6380 -
6381 - r = commit_bop(smm, &bop);
6382 - if (r)
6383 - break;
6384 - }
6385 - }
6386 + if (smm->recursion_count == 1)
6387 + apply_bops(smm);
6388
6389 smm->recursion_count--;
6390
6391 @@ -704,6 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
6392 }
6393 old_len = smm->begin;
6394
6395 + r = apply_bops(smm);
6396 + if (r) {
6397 + DMERR("%s: apply_bops failed", __func__);
6398 + goto out;
6399 + }
6400 +
6401 r = sm_ll_commit(&smm->ll);
6402 if (r)
6403 goto out;
6404 @@ -773,6 +787,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
6405 if (r)
6406 return r;
6407
6408 + r = apply_bops(smm);
6409 + if (r) {
6410 + DMERR("%s: apply_bops failed", __func__);
6411 + return r;
6412 + }
6413 +
6414 return sm_metadata_commit(sm);
6415 }
6416
6417 diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
6418 index 8001690d7576..ba6c8f6c42a1 100644
6419 --- a/drivers/media/dvb-frontends/af9013.c
6420 +++ b/drivers/media/dvb-frontends/af9013.c
6421 @@ -605,6 +605,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
6422 }
6423 }
6424
6425 + /* Return an error if can't find bandwidth or the right clock */
6426 + if (i == ARRAY_SIZE(coeff_lut))
6427 + return -EINVAL;
6428 +
6429 ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
6430 sizeof(coeff_lut[i].val));
6431 }
6432 diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
6433 index 2916d7c74a1d..7bc68b355c0b 100644
6434 --- a/drivers/media/dvb-frontends/cx24116.c
6435 +++ b/drivers/media/dvb-frontends/cx24116.c
6436 @@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
6437 struct cx24116_state *state = fe->demodulator_priv;
6438 int i, ret;
6439
6440 + /* Validate length */
6441 + if (d->msg_len > sizeof(d->msg))
6442 + return -EINVAL;
6443 +
6444 /* Dump DiSEqC message */
6445 if (debug) {
6446 printk(KERN_INFO "cx24116: %s(", __func__);
6447 @@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
6448 printk(") toneburst=%d\n", toneburst);
6449 }
6450
6451 - /* Validate length */
6452 - if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
6453 - return -EINVAL;
6454 -
6455 /* DiSEqC message */
6456 for (i = 0; i < d->msg_len; i++)
6457 state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
6458 diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
6459 index acb965ce0358..af6363573efd 100644
6460 --- a/drivers/media/dvb-frontends/cx24117.c
6461 +++ b/drivers/media/dvb-frontends/cx24117.c
6462 @@ -1043,7 +1043,7 @@ static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
6463 dev_dbg(&state->priv->i2c->dev, ")\n");
6464
6465 /* Validate length */
6466 - if (d->msg_len > 15)
6467 + if (d->msg_len > sizeof(d->msg))
6468 return -EINVAL;
6469
6470 /* DiSEqC message */
6471 diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
6472 index 93eeaf7118fd..0b4f8fe6bf99 100644
6473 --- a/drivers/media/dvb-frontends/s5h1420.c
6474 +++ b/drivers/media/dvb-frontends/s5h1420.c
6475 @@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
6476 int result = 0;
6477
6478 dprintk("enter %s\n", __func__);
6479 - if (cmd->msg_len > 8)
6480 + if (cmd->msg_len > sizeof(cmd->msg))
6481 return -EINVAL;
6482
6483 /* setup for DISEQC */
6484 diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
6485 index c82d25d53341..c9860845264f 100644
6486 --- a/drivers/media/pci/cx18/cx18-streams.c
6487 +++ b/drivers/media/pci/cx18/cx18-streams.c
6488 @@ -90,6 +90,7 @@ static struct {
6489 "encoder PCM audio",
6490 VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
6491 PCI_DMA_FROMDEVICE,
6492 + V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
6493 },
6494 { /* CX18_ENC_STREAM_TYPE_IDX */
6495 "encoder IDX",
6496 diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
6497 index 9266965412c3..7a0a65146723 100644
6498 --- a/drivers/media/pci/saa7164/saa7164-encoder.c
6499 +++ b/drivers/media/pci/saa7164/saa7164-encoder.c
6500 @@ -721,13 +721,14 @@ static int vidioc_querycap(struct file *file, void *priv,
6501 sizeof(cap->card));
6502 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
6503
6504 - cap->capabilities =
6505 + cap->device_caps =
6506 V4L2_CAP_VIDEO_CAPTURE |
6507 - V4L2_CAP_READWRITE |
6508 - 0;
6509 + V4L2_CAP_READWRITE |
6510 + V4L2_CAP_TUNER;
6511
6512 - cap->capabilities |= V4L2_CAP_TUNER;
6513 - cap->version = 0;
6514 + cap->capabilities = cap->device_caps |
6515 + V4L2_CAP_VBI_CAPTURE |
6516 + V4L2_CAP_DEVICE_CAPS;
6517
6518 return 0;
6519 }
6520 diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
6521 index 6e025fea2542..06117e6c0596 100644
6522 --- a/drivers/media/pci/saa7164/saa7164-vbi.c
6523 +++ b/drivers/media/pci/saa7164/saa7164-vbi.c
6524 @@ -660,13 +660,14 @@ static int vidioc_querycap(struct file *file, void *priv,
6525 sizeof(cap->card));
6526 sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
6527
6528 - cap->capabilities =
6529 + cap->device_caps =
6530 V4L2_CAP_VBI_CAPTURE |
6531 - V4L2_CAP_READWRITE |
6532 - 0;
6533 + V4L2_CAP_READWRITE |
6534 + V4L2_CAP_TUNER;
6535
6536 - cap->capabilities |= V4L2_CAP_TUNER;
6537 - cap->version = 0;
6538 + cap->capabilities = cap->device_caps |
6539 + V4L2_CAP_VIDEO_CAPTURE |
6540 + V4L2_CAP_DEVICE_CAPS;
6541
6542 return 0;
6543 }
6544 diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
6545 index 2b40393836ff..0d248ce02a9b 100644
6546 --- a/drivers/media/usb/dvb-usb/dib0700_core.c
6547 +++ b/drivers/media/usb/dvb-usb/dib0700_core.c
6548 @@ -655,10 +655,20 @@ out:
6549 struct dib0700_rc_response {
6550 u8 report_id;
6551 u8 data_state;
6552 - u8 system;
6553 - u8 not_system;
6554 - u8 data;
6555 - u8 not_data;
6556 + union {
6557 + struct {
6558 + u8 system;
6559 + u8 not_system;
6560 + u8 data;
6561 + u8 not_data;
6562 + } nec;
6563 + struct {
6564 + u8 not_used;
6565 + u8 system;
6566 + u8 data;
6567 + u8 not_data;
6568 + } rc5;
6569 + };
6570 };
6571 #define RC_MSG_SIZE_V1_20 6
6572
6573 @@ -694,8 +704,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
6574
6575 deb_data("IR ID = %02X state = %02X System = %02X %02X Cmd = %02X %02X (len %d)\n",
6576 poll_reply->report_id, poll_reply->data_state,
6577 - poll_reply->system, poll_reply->not_system,
6578 - poll_reply->data, poll_reply->not_data,
6579 + poll_reply->nec.system, poll_reply->nec.not_system,
6580 + poll_reply->nec.data, poll_reply->nec.not_data,
6581 purb->actual_length);
6582
6583 switch (d->props.rc.core.protocol) {
6584 @@ -704,30 +714,30 @@ static void dib0700_rc_urb_completion(struct urb *purb)
6585 toggle = 0;
6586
6587 /* NEC protocol sends repeat code as 0 0 0 FF */
6588 - if (poll_reply->system == 0x00 &&
6589 - poll_reply->not_system == 0x00 &&
6590 - poll_reply->data == 0x00 &&
6591 - poll_reply->not_data == 0xff) {
6592 + if (poll_reply->nec.system == 0x00 &&
6593 + poll_reply->nec.not_system == 0x00 &&
6594 + poll_reply->nec.data == 0x00 &&
6595 + poll_reply->nec.not_data == 0xff) {
6596 poll_reply->data_state = 2;
6597 break;
6598 }
6599
6600 - if ((poll_reply->data ^ poll_reply->not_data) != 0xff) {
6601 + if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
6602 deb_data("NEC32 protocol\n");
6603 - keycode = RC_SCANCODE_NEC32(poll_reply->system << 24 |
6604 - poll_reply->not_system << 16 |
6605 - poll_reply->data << 8 |
6606 - poll_reply->not_data);
6607 - } else if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
6608 + keycode = RC_SCANCODE_NEC32(poll_reply->nec.system << 24 |
6609 + poll_reply->nec.not_system << 16 |
6610 + poll_reply->nec.data << 8 |
6611 + poll_reply->nec.not_data);
6612 + } else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) {
6613 deb_data("NEC extended protocol\n");
6614 - keycode = RC_SCANCODE_NECX(poll_reply->system << 8 |
6615 - poll_reply->not_system,
6616 - poll_reply->data);
6617 + keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 |
6618 + poll_reply->nec.not_system,
6619 + poll_reply->nec.data);
6620
6621 } else {
6622 deb_data("NEC normal protocol\n");
6623 - keycode = RC_SCANCODE_NEC(poll_reply->system,
6624 - poll_reply->data);
6625 + keycode = RC_SCANCODE_NEC(poll_reply->nec.system,
6626 + poll_reply->nec.data);
6627 }
6628
6629 break;
6630 @@ -735,19 +745,19 @@ static void dib0700_rc_urb_completion(struct urb *purb)
6631 deb_data("RC5 protocol\n");
6632 protocol = RC_TYPE_RC5;
6633 toggle = poll_reply->report_id;
6634 - keycode = RC_SCANCODE_RC5(poll_reply->system, poll_reply->data);
6635 + keycode = RC_SCANCODE_RC5(poll_reply->rc5.system, poll_reply->rc5.data);
6636 +
6637 + if ((poll_reply->rc5.data ^ poll_reply->rc5.not_data) != 0xff) {
6638 + /* Key failed integrity check */
6639 + err("key failed integrity check: %02x %02x %02x %02x",
6640 + poll_reply->rc5.not_used, poll_reply->rc5.system,
6641 + poll_reply->rc5.data, poll_reply->rc5.not_data);
6642 + goto resubmit;
6643 + }
6644
6645 break;
6646 }
6647
6648 - if ((poll_reply->data + poll_reply->not_data) != 0xff) {
6649 - /* Key failed integrity check */
6650 - err("key failed integrity check: %02x %02x %02x %02x",
6651 - poll_reply->system, poll_reply->not_system,
6652 - poll_reply->data, poll_reply->not_data);
6653 - goto resubmit;
6654 - }
6655 -
6656 rc_keydown(d->rc_dev, protocol, keycode, toggle);
6657
6658 resubmit:
6659 diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
6660 index d7d55a20e959..c170523226aa 100644
6661 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c
6662 +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
6663 @@ -3944,6 +3944,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
6664
6665 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
6666 }},
6667 + .size_of_priv = sizeof(struct
6668 + dib0700_adapter_state),
6669 }, {
6670 .num_frontends = 1,
6671 .fe = {{
6672 @@ -3956,6 +3958,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
6673
6674 DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
6675 }},
6676 + .size_of_priv = sizeof(struct
6677 + dib0700_adapter_state),
6678 }
6679 },
6680
6681 @@ -4009,6 +4013,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
6682
6683 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
6684 }},
6685 + .size_of_priv = sizeof(struct
6686 + dib0700_adapter_state),
6687 },
6688 },
6689
6690 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
6691 index 66ada01c796c..cf9d644a8aff 100644
6692 --- a/drivers/media/v4l2-core/videobuf2-core.c
6693 +++ b/drivers/media/v4l2-core/videobuf2-core.c
6694 @@ -1237,6 +1237,23 @@ void vb2_discard_done(struct vb2_queue *q)
6695 }
6696 EXPORT_SYMBOL_GPL(vb2_discard_done);
6697
6698 +static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
6699 +{
6700 + static bool __check_once __read_mostly;
6701 +
6702 + if (__check_once)
6703 + return;
6704 +
6705 + __check_once = true;
6706 + __WARN();
6707 +
6708 + pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
6709 + if (vb->vb2_queue->allow_zero_bytesused)
6710 + pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
6711 + else
6712 + pr_warn_once("use the actual size instead.\n");
6713 +}
6714 +
6715 /**
6716 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
6717 * v4l2_buffer by the userspace. The caller has already verified that struct
6718 @@ -1247,16 +1264,6 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
6719 {
6720 unsigned int plane;
6721
6722 - if (V4L2_TYPE_IS_OUTPUT(b->type)) {
6723 - if (WARN_ON_ONCE(b->bytesused == 0)) {
6724 - pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
6725 - if (vb->vb2_queue->allow_zero_bytesused)
6726 - pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
6727 - else
6728 - pr_warn_once("use the actual size instead.\n");
6729 - }
6730 - }
6731 -
6732 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
6733 if (b->memory == V4L2_MEMORY_USERPTR) {
6734 for (plane = 0; plane < vb->num_planes; ++plane) {
6735 @@ -1297,6 +1304,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
6736 struct v4l2_plane *pdst = &v4l2_planes[plane];
6737 struct v4l2_plane *psrc = &b->m.planes[plane];
6738
6739 + if (psrc->bytesused == 0)
6740 + vb2_warn_zero_bytesused(vb);
6741 +
6742 if (vb->vb2_queue->allow_zero_bytesused)
6743 pdst->bytesused = psrc->bytesused;
6744 else
6745 @@ -1331,6 +1341,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
6746 }
6747
6748 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
6749 + if (b->bytesused == 0)
6750 + vb2_warn_zero_bytesused(vb);
6751 +
6752 if (vb->vb2_queue->allow_zero_bytesused)
6753 v4l2_planes[0].bytesused = b->bytesused;
6754 else
6755 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
6756 index 60f7141a6b02..31d2627d9d4d 100644
6757 --- a/drivers/mmc/card/block.c
6758 +++ b/drivers/mmc/card/block.c
6759 @@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
6760
6761 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
6762
6763 + mmc_blk_put(md);
6764 +
6765 return ret;
6766 }
6767
6768 @@ -1910,9 +1912,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
6769 break;
6770 case MMC_BLK_CMD_ERR:
6771 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
6772 - if (!mmc_blk_reset(md, card->host, type))
6773 - break;
6774 - goto cmd_abort;
6775 + if (mmc_blk_reset(md, card->host, type))
6776 + goto cmd_abort;
6777 + if (!ret)
6778 + goto start_new_req;
6779 + break;
6780 case MMC_BLK_RETRY:
6781 if (retry++ < 5)
6782 break;
6783 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
6784 index 9231cdfe2757..d3dbb28057e9 100644
6785 --- a/drivers/mmc/host/sdhci.c
6786 +++ b/drivers/mmc/host/sdhci.c
6787 @@ -3315,13 +3315,14 @@ int sdhci_add_host(struct sdhci_host *host)
6788 SDHCI_MAX_CURRENT_MULTIPLIER;
6789 }
6790
6791 - /* If OCR set by external regulators, use it instead */
6792 + /* If OCR set by host, use it instead. */
6793 + if (host->ocr_mask)
6794 + ocr_avail = host->ocr_mask;
6795 +
6796 + /* If OCR set by external regulators, give it highest prio. */
6797 if (mmc->ocr_avail)
6798 ocr_avail = mmc->ocr_avail;
6799
6800 - if (host->ocr_mask)
6801 - ocr_avail &= host->ocr_mask;
6802 -
6803 mmc->ocr_avail = ocr_avail;
6804 mmc->ocr_avail_sdio = ocr_avail;
6805 if (host->ocr_avail_sdio)
6806 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
6807 index dc79ed85030b..32e77755a9c6 100644
6808 --- a/drivers/net/ethernet/intel/e1000e/82571.c
6809 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
6810 @@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
6811 .flags2 = FLAG2_DISABLE_ASPM_L1
6812 | FLAG2_DISABLE_ASPM_L0S,
6813 .pba = 20,
6814 - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
6815 + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
6816 .get_variants = e1000_get_variants_82571,
6817 .mac_ops = &e82571_mac_ops,
6818 .phy_ops = &e82_phy_ops_m88,
6819 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
6820 index 9d81c0317433..e2498dbf3c3b 100644
6821 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
6822 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
6823 @@ -1563,7 +1563,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
6824 ((adapter->hw.mac.type >= e1000_pch2lan) &&
6825 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
6826 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
6827 - adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
6828 + adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
6829
6830 hw->mac.ops.blink_led = NULL;
6831 }
6832 @@ -5681,7 +5681,7 @@ const struct e1000_info e1000_ich8_info = {
6833 | FLAG_HAS_FLASH
6834 | FLAG_APME_IN_WUC,
6835 .pba = 8,
6836 - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
6837 + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
6838 .get_variants = e1000_get_variants_ich8lan,
6839 .mac_ops = &ich8_mac_ops,
6840 .phy_ops = &ich8_phy_ops,
6841 @@ -5754,7 +5754,7 @@ const struct e1000_info e1000_pch2_info = {
6842 .flags2 = FLAG2_HAS_PHY_STATS
6843 | FLAG2_HAS_EEE,
6844 .pba = 26,
6845 - .max_hw_frame_size = 9018,
6846 + .max_hw_frame_size = 9022,
6847 .get_variants = e1000_get_variants_ich8lan,
6848 .mac_ops = &ich8_mac_ops,
6849 .phy_ops = &ich8_phy_ops,
6850 @@ -5774,7 +5774,7 @@ const struct e1000_info e1000_pch_lpt_info = {
6851 .flags2 = FLAG2_HAS_PHY_STATS
6852 | FLAG2_HAS_EEE,
6853 .pba = 26,
6854 - .max_hw_frame_size = 9018,
6855 + .max_hw_frame_size = 9022,
6856 .get_variants = e1000_get_variants_ich8lan,
6857 .mac_ops = &ich8_mac_ops,
6858 .phy_ops = &ich8_phy_ops,
6859 @@ -5794,7 +5794,7 @@ const struct e1000_info e1000_pch_spt_info = {
6860 .flags2 = FLAG2_HAS_PHY_STATS
6861 | FLAG2_HAS_EEE,
6862 .pba = 26,
6863 - .max_hw_frame_size = 9018,
6864 + .max_hw_frame_size = 9022,
6865 .get_variants = e1000_get_variants_ich8lan,
6866 .mac_ops = &ich8_mac_ops,
6867 .phy_ops = &ich8_phy_ops,
6868 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
6869 index c509a5c900f5..68913d103542 100644
6870 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
6871 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
6872 @@ -3807,7 +3807,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
6873 /* reset Packet Buffer Allocation to default */
6874 ew32(PBA, pba);
6875
6876 - if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
6877 + if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
6878 /* To maintain wire speed transmits, the Tx FIFO should be
6879 * large enough to accommodate two full transmit packets,
6880 * rounded up to the next 1KB and expressed in KB. Likewise,
6881 @@ -4196,9 +4196,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
6882 {
6883 struct net_device *netdev = adapter->netdev;
6884
6885 - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
6886 + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
6887 adapter->rx_ps_bsize0 = 128;
6888 - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
6889 + adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
6890 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
6891 adapter->tx_ring_count = E1000_DEFAULT_TXD;
6892 adapter->rx_ring_count = E1000_DEFAULT_RXD;
6893 @@ -5781,17 +5781,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
6894 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
6895 {
6896 struct e1000_adapter *adapter = netdev_priv(netdev);
6897 - int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
6898 + int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
6899
6900 /* Jumbo frame support */
6901 - if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
6902 + if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
6903 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
6904 e_err("Jumbo Frames not supported.\n");
6905 return -EINVAL;
6906 }
6907
6908 /* Supported frame sizes */
6909 - if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
6910 + if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
6911 (max_frame > adapter->max_hw_frame_size)) {
6912 e_err("Unsupported MTU setting\n");
6913 return -EINVAL;
6914 @@ -5831,10 +5831,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
6915 adapter->rx_buffer_len = 4096;
6916
6917 /* adjust allocation if LPE protects us, and we aren't using SBP */
6918 - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
6919 - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
6920 - adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
6921 - + ETH_FCS_LEN;
6922 + if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
6923 + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
6924
6925 if (netif_running(netdev))
6926 e1000e_up(adapter);
6927 diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
6928 index e82a0d4ce23f..5dbc617ecf8a 100644
6929 --- a/drivers/net/wireless/ath/ath9k/htc.h
6930 +++ b/drivers/net/wireless/ath/ath9k/htc.h
6931 @@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
6932 }
6933 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
6934
6935 -#define OP_BT_PRIORITY_DETECTED BIT(3)
6936 -#define OP_BT_SCAN BIT(4)
6937 -#define OP_TSF_RESET BIT(6)
6938 +#define OP_BT_PRIORITY_DETECTED 3
6939 +#define OP_BT_SCAN 4
6940 +#define OP_TSF_RESET 6
6941
6942 enum htc_op_flags {
6943 HTC_FWFLAG_NO_RMW,
6944 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
6945 index b0badef71ce7..d5f2fbf62d72 100644
6946 --- a/drivers/net/wireless/ath/ath9k/main.c
6947 +++ b/drivers/net/wireless/ath/ath9k/main.c
6948 @@ -216,11 +216,13 @@ static bool ath_prepare_reset(struct ath_softc *sc)
6949 ath_stop_ani(sc);
6950 ath9k_hw_disable_interrupts(ah);
6951
6952 - if (!ath_drain_all_txq(sc))
6953 - ret = false;
6954 -
6955 - if (!ath_stoprecv(sc))
6956 - ret = false;
6957 + if (AR_SREV_9300_20_OR_LATER(ah)) {
6958 + ret &= ath_stoprecv(sc);
6959 + ret &= ath_drain_all_txq(sc);
6960 + } else {
6961 + ret &= ath_drain_all_txq(sc);
6962 + ret &= ath_stoprecv(sc);
6963 + }
6964
6965 return ret;
6966 }
6967 diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
6968 index 9ac04c1ea706..8c17b943cc6f 100644
6969 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
6970 +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
6971 @@ -6,7 +6,7 @@
6972 * GPL LICENSE SUMMARY
6973 *
6974 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
6975 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
6976 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6977 *
6978 * This program is free software; you can redistribute it and/or modify
6979 * it under the terms of version 2 of the GNU General Public License as
6980 @@ -32,7 +32,7 @@
6981 * BSD LICENSE
6982 *
6983 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
6984 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
6985 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6986 * All rights reserved.
6987 *
6988 * Redistribution and use in source and binary forms, with or without
6989 @@ -1356,6 +1356,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
6990 PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
6991 PRINT_MVM_REF(IWL_MVM_REF_SCAN);
6992 PRINT_MVM_REF(IWL_MVM_REF_ROC);
6993 + PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
6994 PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
6995 PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
6996 PRINT_MVM_REF(IWL_MVM_REF_USER);
6997 diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
6998 index dda9f7b5f342..60c138a9bf4f 100644
6999 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
7000 +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
7001 @@ -1404,7 +1404,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
7002 * The work item could be running or queued if the
7003 * ROC time event stops just as we get here.
7004 */
7005 - cancel_work_sync(&mvm->roc_done_wk);
7006 + flush_work(&mvm->roc_done_wk);
7007
7008 iwl_trans_stop_device(mvm->trans);
7009
7010 diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
7011 index cf70f681d1ac..6af21daaaaef 100644
7012 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
7013 +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
7014 @@ -275,6 +275,7 @@ enum iwl_mvm_ref_type {
7015 IWL_MVM_REF_UCODE_DOWN,
7016 IWL_MVM_REF_SCAN,
7017 IWL_MVM_REF_ROC,
7018 + IWL_MVM_REF_ROC_AUX,
7019 IWL_MVM_REF_P2P_CLIENT,
7020 IWL_MVM_REF_AP_IBSS,
7021 IWL_MVM_REF_USER,
7022 diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
7023 index fd7b0d36f9a6..a7448cf01688 100644
7024 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
7025 +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
7026 @@ -6,7 +6,7 @@
7027 * GPL LICENSE SUMMARY
7028 *
7029 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
7030 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
7031 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
7032 *
7033 * This program is free software; you can redistribute it and/or modify
7034 * it under the terms of version 2 of the GNU General Public License as
7035 @@ -32,7 +32,7 @@
7036 * BSD LICENSE
7037 *
7038 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
7039 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
7040 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
7041 * All rights reserved.
7042 *
7043 * Redistribution and use in source and binary forms, with or without
7044 @@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
7045 * in the case that the time event actually completed in the firmware
7046 * (which is handled in iwl_mvm_te_handle_notif).
7047 */
7048 - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
7049 + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
7050 queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
7051 - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
7052 + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
7053 + }
7054 + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
7055 queues |= BIT(mvm->aux_queue);
7056 -
7057 - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
7058 + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
7059 + }
7060
7061 synchronize_net();
7062
7063 @@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
7064 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
7065 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
7066 te_data->running = true;
7067 + iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
7068 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
7069 } else {
7070 IWL_DEBUG_TE(mvm,
7071 diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
7072 index 86ce5b1930e6..e5d8108f1987 100644
7073 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
7074 +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
7075 @@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
7076 }
7077 }
7078
7079 -static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
7080 -{
7081 - struct rtl_priv *rtlpriv = rtl_priv(hw);
7082 - u32 tmp;
7083 -
7084 - tmp = rtl_read_dword(rtlpriv, REG_HISR);
7085 - rtl_write_dword(rtlpriv, REG_HISR, tmp);
7086 -
7087 - tmp = rtl_read_dword(rtlpriv, REG_HISRE);
7088 - rtl_write_dword(rtlpriv, REG_HISRE, tmp);
7089 -
7090 - tmp = rtl_read_dword(rtlpriv, REG_HSISR);
7091 - rtl_write_dword(rtlpriv, REG_HSISR, tmp);
7092 -}
7093 -
7094 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
7095 {
7096 struct rtl_priv *rtlpriv = rtl_priv(hw);
7097 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
7098
7099 - rtl88ee_clear_interrupt(hw);/*clear it here first*/
7100 rtl_write_dword(rtlpriv, REG_HIMR,
7101 rtlpci->irq_mask[0] & 0xFFFFFFFF);
7102 rtl_write_dword(rtlpriv, REG_HIMRE,
7103 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
7104 index da0a6125f314..cbf2ca7c7c6d 100644
7105 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
7106 +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
7107 @@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
7108 }
7109 }
7110
7111 -static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
7112 -{
7113 - struct rtl_priv *rtlpriv = rtl_priv(hw);
7114 - u32 tmp;
7115 -
7116 - tmp = rtl_read_dword(rtlpriv, REG_HISR);
7117 - rtl_write_dword(rtlpriv, REG_HISR, tmp);
7118 -
7119 - tmp = rtl_read_dword(rtlpriv, REG_HISRE);
7120 - rtl_write_dword(rtlpriv, REG_HISRE, tmp);
7121 -
7122 - tmp = rtl_read_dword(rtlpriv, REG_HSISR);
7123 - rtl_write_dword(rtlpriv, REG_HSISR, tmp);
7124 -}
7125 -
7126 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
7127 {
7128 struct rtl_priv *rtlpriv = rtl_priv(hw);
7129 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
7130
7131 - rtl92ee_clear_interrupt(hw);/*clear it here first*/
7132 -
7133 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
7134 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
7135 rtlpci->irq_enabled = true;
7136 diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
7137 index 67bb47d77b68..a4b7eac6856f 100644
7138 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
7139 +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
7140 @@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
7141 }
7142 }
7143
7144 -static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
7145 -{
7146 - struct rtl_priv *rtlpriv = rtl_priv(hw);
7147 - u32 tmp;
7148 -
7149 - tmp = rtl_read_dword(rtlpriv, REG_HISR);
7150 - rtl_write_dword(rtlpriv, REG_HISR, tmp);
7151 -
7152 - tmp = rtl_read_dword(rtlpriv, REG_HISRE);
7153 - rtl_write_dword(rtlpriv, REG_HISRE, tmp);
7154 -}
7155 -
7156 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
7157 {
7158 struct rtl_priv *rtlpriv = rtl_priv(hw);
7159 @@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
7160 {
7161 struct rtl_priv *rtlpriv = rtl_priv(hw);
7162 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
7163 - rtl8723e_clear_interrupt(hw);/*clear it here first*/
7164 rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
7165 rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
7166 rtlpci->irq_enabled = false;
7167 diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
7168 index b681af3c7a35..b9417268427e 100644
7169 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
7170 +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
7171 @@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
7172 }
7173 }
7174
7175 -static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
7176 -{
7177 - struct rtl_priv *rtlpriv = rtl_priv(hw);
7178 - u32 tmp;
7179 -
7180 - tmp = rtl_read_dword(rtlpriv, REG_HISR);
7181 - rtl_write_dword(rtlpriv, REG_HISR, tmp);
7182 -
7183 - tmp = rtl_read_dword(rtlpriv, REG_HISRE);
7184 - rtl_write_dword(rtlpriv, REG_HISRE, tmp);
7185 -
7186 - tmp = rtl_read_dword(rtlpriv, REG_HSISR);
7187 - rtl_write_dword(rtlpriv, REG_HSISR, tmp);
7188 -}
7189 -
7190 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
7191 {
7192 struct rtl_priv *rtlpriv = rtl_priv(hw);
7193 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
7194
7195 - rtl8723be_clear_interrupt(hw);/*clear it here first*/
7196 -
7197 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
7198 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
7199 rtlpci->irq_enabled = true;
7200 diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
7201 index 8704eee9f3a4..57966e3c8e8d 100644
7202 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
7203 +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
7204 @@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
7205 }
7206 }
7207
7208 -static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
7209 -{
7210 - struct rtl_priv *rtlpriv = rtl_priv(hw);
7211 - u32 tmp;
7212 - tmp = rtl_read_dword(rtlpriv, REG_HISR);
7213 - /*printk("clear interrupt first:\n");
7214 - printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
7215 - rtl_write_dword(rtlpriv, REG_HISR, tmp);
7216 -
7217 - tmp = rtl_read_dword(rtlpriv, REG_HISRE);
7218 - /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
7219 - rtl_write_dword(rtlpriv, REG_HISRE, tmp);
7220 -
7221 - tmp = rtl_read_dword(rtlpriv, REG_HSISR);
7222 - /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
7223 - rtl_write_dword(rtlpriv, REG_HSISR, tmp);
7224 -}
7225 -
7226 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
7227 {
7228 struct rtl_priv *rtlpriv = rtl_priv(hw);
7229 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
7230
7231 - rtl8821ae_clear_interrupt(hw);/*clear it here first*/
7232 -
7233 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
7234 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
7235 rtlpci->irq_enabled = true;
7236 diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
7237 index 76a4cad41cec..c44f8cf5391a 100644
7238 --- a/drivers/nfc/st21nfcb/i2c.c
7239 +++ b/drivers/nfc/st21nfcb/i2c.c
7240 @@ -87,11 +87,6 @@ static void st21nfcb_nci_i2c_disable(void *phy_id)
7241 gpio_set_value(phy->gpio_reset, 1);
7242 }
7243
7244 -static void st21nfcb_nci_remove_header(struct sk_buff *skb)
7245 -{
7246 - skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
7247 -}
7248 -
7249 /*
7250 * Writing a frame must not return the number of written bytes.
7251 * It must return either zero for success, or <0 for error.
7252 @@ -121,8 +116,6 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
7253 r = 0;
7254 }
7255
7256 - st21nfcb_nci_remove_header(skb);
7257 -
7258 return r;
7259 }
7260
7261 @@ -366,9 +359,6 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
7262
7263 ndlc_remove(phy->ndlc);
7264
7265 - if (phy->powered)
7266 - st21nfcb_nci_i2c_disable(phy);
7267 -
7268 return 0;
7269 }
7270
7271 diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
7272 index ca9871ab3fb3..c7dc282d5c3b 100644
7273 --- a/drivers/nfc/st21nfcb/st21nfcb.c
7274 +++ b/drivers/nfc/st21nfcb/st21nfcb.c
7275 @@ -131,11 +131,8 @@ EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
7276
7277 void st21nfcb_nci_remove(struct nci_dev *ndev)
7278 {
7279 - struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
7280 -
7281 nci_unregister_device(ndev);
7282 nci_free_device(ndev);
7283 - kfree(info);
7284 }
7285 EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
7286
7287 diff --git a/drivers/of/address.c b/drivers/of/address.c
7288 index 6906a3f61bd8..8bfda6ade2c0 100644
7289 --- a/drivers/of/address.c
7290 +++ b/drivers/of/address.c
7291 @@ -712,7 +712,7 @@ int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
7292 }
7293
7294 /* add the range to the list */
7295 - range = kzalloc(sizeof(*range), GFP_KERNEL);
7296 + range = kzalloc(sizeof(*range), GFP_ATOMIC);
7297 if (!range) {
7298 err = -ENOMEM;
7299 goto end_register;
7300 diff --git a/drivers/of/base.c b/drivers/of/base.c
7301 index f0650265febf..5ed97246c2e7 100644
7302 --- a/drivers/of/base.c
7303 +++ b/drivers/of/base.c
7304 @@ -89,7 +89,7 @@ EXPORT_SYMBOL(of_n_size_cells);
7305 #ifdef CONFIG_NUMA
7306 int __weak of_node_to_nid(struct device_node *np)
7307 {
7308 - return numa_node_id();
7309 + return NUMA_NO_NODE;
7310 }
7311 #endif
7312
7313 diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
7314 index c6fc95b53083..ab54f2864451 100644
7315 --- a/drivers/phy/phy-berlin-usb.c
7316 +++ b/drivers/phy/phy-berlin-usb.c
7317 @@ -106,8 +106,8 @@
7318 static const u32 phy_berlin_pll_dividers[] = {
7319 /* Berlin 2 */
7320 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
7321 - /* Berlin 2CD */
7322 - CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
7323 + /* Berlin 2CD/Q */
7324 + CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
7325 };
7326
7327 struct phy_berlin_usb_priv {
7328 diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
7329 index bc42d6a8939f..8882afbef688 100644
7330 --- a/drivers/phy/phy-twl4030-usb.c
7331 +++ b/drivers/phy/phy-twl4030-usb.c
7332 @@ -711,7 +711,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
7333 pm_runtime_use_autosuspend(&pdev->dev);
7334 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
7335 pm_runtime_enable(&pdev->dev);
7336 - pm_runtime_get_sync(&pdev->dev);
7337
7338 /* Our job is to use irqs and status from the power module
7339 * to keep the transceiver disabled when nothing's connected.
7340 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
7341 index 03aa58c4cb85..1eb084c3b0c9 100644
7342 --- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
7343 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
7344 @@ -370,11 +370,11 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
7345 MPP_MODE(64,
7346 MPP_FUNCTION(0x0, "gpio", NULL),
7347 MPP_FUNCTION(0x1, "spi0", "miso"),
7348 - MPP_FUNCTION(0x2, "spi0-1", "cs1")),
7349 + MPP_FUNCTION(0x2, "spi0", "cs1")),
7350 MPP_MODE(65,
7351 MPP_FUNCTION(0x0, "gpio", NULL),
7352 MPP_FUNCTION(0x1, "spi0", "mosi"),
7353 - MPP_FUNCTION(0x2, "spi0-1", "cs2")),
7354 + MPP_FUNCTION(0x2, "spi0", "cs2")),
7355 };
7356
7357 static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
7358 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
7359 index ca1e7571fedb..203291bde608 100644
7360 --- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
7361 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
7362 @@ -92,19 +92,17 @@ static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
7363 MPP_FUNCTION(0x5, "nand", "io1")),
7364 MPP_MODE(8,
7365 MPP_FUNCTION(0x0, "gpio", NULL),
7366 - MPP_FUNCTION(0x1, "dev ", "bootcs"),
7367 + MPP_FUNCTION(0x1, "dev", "bootcs"),
7368 MPP_FUNCTION(0x2, "spi0", "cs0"),
7369 MPP_FUNCTION(0x3, "spi1", "cs0"),
7370 MPP_FUNCTION(0x5, "nand", "ce")),
7371 MPP_MODE(9,
7372 MPP_FUNCTION(0x0, "gpio", NULL),
7373 - MPP_FUNCTION(0x1, "nf", "wen"),
7374 MPP_FUNCTION(0x2, "spi0", "sck"),
7375 MPP_FUNCTION(0x3, "spi1", "sck"),
7376 MPP_FUNCTION(0x5, "nand", "we")),
7377 MPP_MODE(10,
7378 MPP_FUNCTION(0x0, "gpio", NULL),
7379 - MPP_FUNCTION(0x1, "nf", "ren"),
7380 MPP_FUNCTION(0x2, "dram", "vttctrl"),
7381 MPP_FUNCTION(0x3, "led", "c1"),
7382 MPP_FUNCTION(0x5, "nand", "re"),
7383 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
7384 index 83bbcc72be1f..ff411a53b5a4 100644
7385 --- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
7386 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
7387 @@ -94,37 +94,39 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7388 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7389 MPP_VAR_FUNCTION(1, "ge0", "rxd0", V_88F6810_PLUS),
7390 MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
7391 - MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
7392 MPP_VAR_FUNCTION(4, "spi0", "cs1", V_88F6810_PLUS),
7393 - MPP_VAR_FUNCTION(5, "dev", "ad14", V_88F6810_PLUS)),
7394 + MPP_VAR_FUNCTION(5, "dev", "ad14", V_88F6810_PLUS),
7395 + MPP_VAR_FUNCTION(6, "pcie3", "clkreq", V_88F6810_PLUS)),
7396 MPP_MODE(13,
7397 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7398 MPP_VAR_FUNCTION(1, "ge0", "rxd1", V_88F6810_PLUS),
7399 MPP_VAR_FUNCTION(2, "pcie0", "clkreq", V_88F6810_PLUS),
7400 MPP_VAR_FUNCTION(3, "pcie1", "clkreq", V_88F6820_PLUS),
7401 MPP_VAR_FUNCTION(4, "spi0", "cs2", V_88F6810_PLUS),
7402 - MPP_VAR_FUNCTION(5, "dev", "ad15", V_88F6810_PLUS)),
7403 + MPP_VAR_FUNCTION(5, "dev", "ad15", V_88F6810_PLUS),
7404 + MPP_VAR_FUNCTION(6, "pcie2", "clkreq", V_88F6810_PLUS)),
7405 MPP_MODE(14,
7406 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7407 MPP_VAR_FUNCTION(1, "ge0", "rxd2", V_88F6810_PLUS),
7408 MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
7409 MPP_VAR_FUNCTION(3, "m", "vtt_ctrl", V_88F6810_PLUS),
7410 MPP_VAR_FUNCTION(4, "spi0", "cs3", V_88F6810_PLUS),
7411 - MPP_VAR_FUNCTION(5, "dev", "wen1", V_88F6810_PLUS)),
7412 + MPP_VAR_FUNCTION(5, "dev", "wen1", V_88F6810_PLUS),
7413 + MPP_VAR_FUNCTION(6, "pcie3", "clkreq", V_88F6810_PLUS)),
7414 MPP_MODE(15,
7415 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7416 MPP_VAR_FUNCTION(1, "ge0", "rxd3", V_88F6810_PLUS),
7417 MPP_VAR_FUNCTION(2, "ge", "mdc slave", V_88F6810_PLUS),
7418 MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
7419 - MPP_VAR_FUNCTION(4, "spi0", "mosi", V_88F6810_PLUS),
7420 - MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
7421 + MPP_VAR_FUNCTION(4, "spi0", "mosi", V_88F6810_PLUS)),
7422 MPP_MODE(16,
7423 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7424 MPP_VAR_FUNCTION(1, "ge0", "rxctl", V_88F6810_PLUS),
7425 MPP_VAR_FUNCTION(2, "ge", "mdio slave", V_88F6810_PLUS),
7426 MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
7427 MPP_VAR_FUNCTION(4, "spi0", "miso", V_88F6810_PLUS),
7428 - MPP_VAR_FUNCTION(5, "pcie0", "clkreq", V_88F6810_PLUS)),
7429 + MPP_VAR_FUNCTION(5, "pcie0", "clkreq", V_88F6810_PLUS),
7430 + MPP_VAR_FUNCTION(6, "pcie1", "clkreq", V_88F6820_PLUS)),
7431 MPP_MODE(17,
7432 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7433 MPP_VAR_FUNCTION(1, "ge0", "rxclk", V_88F6810_PLUS),
7434 @@ -137,13 +139,12 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7435 MPP_VAR_FUNCTION(1, "ge0", "rxerr", V_88F6810_PLUS),
7436 MPP_VAR_FUNCTION(2, "ptp", "trig_gen", V_88F6810_PLUS),
7437 MPP_VAR_FUNCTION(3, "ua1", "txd", V_88F6810_PLUS),
7438 - MPP_VAR_FUNCTION(4, "spi0", "cs0", V_88F6810_PLUS),
7439 - MPP_VAR_FUNCTION(5, "pcie1", "rstout", V_88F6820_PLUS)),
7440 + MPP_VAR_FUNCTION(4, "spi0", "cs0", V_88F6810_PLUS)),
7441 MPP_MODE(19,
7442 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7443 MPP_VAR_FUNCTION(1, "ge0", "col", V_88F6810_PLUS),
7444 MPP_VAR_FUNCTION(2, "ptp", "event_req", V_88F6810_PLUS),
7445 - MPP_VAR_FUNCTION(3, "pcie0", "clkreq", V_88F6810_PLUS),
7446 + MPP_VAR_FUNCTION(3, "ge0", "txerr", V_88F6810_PLUS),
7447 MPP_VAR_FUNCTION(4, "sata1", "prsnt", V_88F6810_PLUS),
7448 MPP_VAR_FUNCTION(5, "ua0", "cts", V_88F6810_PLUS),
7449 MPP_VAR_FUNCTION(6, "ua1", "rxd", V_88F6810_PLUS)),
7450 @@ -151,7 +152,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7451 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7452 MPP_VAR_FUNCTION(1, "ge0", "txclk", V_88F6810_PLUS),
7453 MPP_VAR_FUNCTION(2, "ptp", "clk", V_88F6810_PLUS),
7454 - MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
7455 MPP_VAR_FUNCTION(4, "sata0", "prsnt", V_88F6810_PLUS),
7456 MPP_VAR_FUNCTION(5, "ua0", "rts", V_88F6810_PLUS),
7457 MPP_VAR_FUNCTION(6, "ua1", "txd", V_88F6810_PLUS)),
7458 @@ -277,35 +277,27 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7459 MPP_VAR_FUNCTION(1, "pcie0", "clkreq", V_88F6810_PLUS),
7460 MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
7461 MPP_VAR_FUNCTION(3, "m", "decc_err", V_88F6810_PLUS),
7462 - MPP_VAR_FUNCTION(4, "pcie0", "rstout", V_88F6810_PLUS),
7463 + MPP_VAR_FUNCTION(4, "spi1", "cs2", V_88F6810_PLUS),
7464 MPP_VAR_FUNCTION(5, "dev", "clkout", V_88F6810_PLUS)),
7465 MPP_MODE(44,
7466 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7467 MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
7468 MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
7469 MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
7470 - MPP_VAR_FUNCTION(4, "sata3", "prsnt", V_88F6828),
7471 - MPP_VAR_FUNCTION(5, "pcie0", "rstout", V_88F6810_PLUS)),
7472 + MPP_VAR_FUNCTION(4, "sata3", "prsnt", V_88F6828)),
7473 MPP_MODE(45,
7474 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7475 MPP_VAR_FUNCTION(1, "ref", "clk_out0", V_88F6810_PLUS),
7476 - MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
7477 - MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
7478 - MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
7479 - MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
7480 + MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS)),
7481 MPP_MODE(46,
7482 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7483 MPP_VAR_FUNCTION(1, "ref", "clk_out1", V_88F6810_PLUS),
7484 - MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS),
7485 - MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
7486 - MPP_VAR_FUNCTION(4, "pcie2", "rstout", V_88F6810_PLUS),
7487 - MPP_VAR_FUNCTION(5, "pcie3", "rstout", V_88F6810_PLUS)),
7488 + MPP_VAR_FUNCTION(2, "pcie0", "rstout", V_88F6810_PLUS)),
7489 MPP_MODE(47,
7490 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7491 MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
7492 MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
7493 MPP_VAR_FUNCTION(3, "sata2", "prsnt", V_88F6828),
7494 - MPP_VAR_FUNCTION(4, "spi1", "cs2", V_88F6810_PLUS),
7495 MPP_VAR_FUNCTION(5, "sata3", "prsnt", V_88F6828)),
7496 MPP_MODE(48,
7497 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7498 @@ -313,18 +305,19 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7499 MPP_VAR_FUNCTION(2, "m", "vtt_ctrl", V_88F6810_PLUS),
7500 MPP_VAR_FUNCTION(3, "tdm2c", "pclk", V_88F6810_PLUS),
7501 MPP_VAR_FUNCTION(4, "audio", "mclk", V_88F6810_PLUS),
7502 - MPP_VAR_FUNCTION(5, "sd0", "d4", V_88F6810_PLUS)),
7503 + MPP_VAR_FUNCTION(5, "sd0", "d4", V_88F6810_PLUS),
7504 + MPP_VAR_FUNCTION(6, "pcie0", "clkreq", V_88F6810_PLUS)),
7505 MPP_MODE(49,
7506 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7507 MPP_VAR_FUNCTION(1, "sata2", "prsnt", V_88F6828),
7508 MPP_VAR_FUNCTION(2, "sata3", "prsnt", V_88F6828),
7509 MPP_VAR_FUNCTION(3, "tdm2c", "fsync", V_88F6810_PLUS),
7510 MPP_VAR_FUNCTION(4, "audio", "lrclk", V_88F6810_PLUS),
7511 - MPP_VAR_FUNCTION(5, "sd0", "d5", V_88F6810_PLUS)),
7512 + MPP_VAR_FUNCTION(5, "sd0", "d5", V_88F6810_PLUS),
7513 + MPP_VAR_FUNCTION(6, "pcie1", "clkreq", V_88F6820_PLUS)),
7514 MPP_MODE(50,
7515 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7516 MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
7517 - MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
7518 MPP_VAR_FUNCTION(3, "tdm2c", "drx", V_88F6810_PLUS),
7519 MPP_VAR_FUNCTION(4, "audio", "extclk", V_88F6810_PLUS),
7520 MPP_VAR_FUNCTION(5, "sd0", "cmd", V_88F6810_PLUS)),
7521 @@ -336,7 +329,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7522 MPP_MODE(52,
7523 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7524 MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
7525 - MPP_VAR_FUNCTION(2, "pcie1", "rstout", V_88F6820_PLUS),
7526 MPP_VAR_FUNCTION(3, "tdm2c", "intn", V_88F6810_PLUS),
7527 MPP_VAR_FUNCTION(4, "audio", "sdi", V_88F6810_PLUS),
7528 MPP_VAR_FUNCTION(5, "sd0", "d6", V_88F6810_PLUS)),
7529 @@ -352,7 +344,7 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7530 MPP_VAR_FUNCTION(1, "sata0", "prsnt", V_88F6810_PLUS),
7531 MPP_VAR_FUNCTION(2, "sata1", "prsnt", V_88F6810_PLUS),
7532 MPP_VAR_FUNCTION(3, "pcie0", "rstout", V_88F6810_PLUS),
7533 - MPP_VAR_FUNCTION(4, "pcie1", "rstout", V_88F6820_PLUS),
7534 + MPP_VAR_FUNCTION(4, "ge0", "txerr", V_88F6810_PLUS),
7535 MPP_VAR_FUNCTION(5, "sd0", "d3", V_88F6810_PLUS)),
7536 MPP_MODE(55,
7537 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7538 @@ -382,7 +374,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
7539 MPP_VAR_FUNCTION(0, "gpio", NULL, V_88F6810_PLUS),
7540 MPP_VAR_FUNCTION(1, "pcie0", "rstout", V_88F6810_PLUS),
7541 MPP_VAR_FUNCTION(2, "i2c1", "sda", V_88F6810_PLUS),
7542 - MPP_VAR_FUNCTION(3, "pcie1", "rstout", V_88F6820_PLUS),
7543 MPP_VAR_FUNCTION(4, "spi1", "cs0", V_88F6810_PLUS),
7544 MPP_VAR_FUNCTION(5, "sd0", "d2", V_88F6810_PLUS)),
7545 };
7546 @@ -411,7 +402,7 @@ static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
7547
7548 static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
7549 MPP_GPIO_RANGE(0, 0, 0, 32),
7550 - MPP_GPIO_RANGE(1, 32, 32, 27),
7551 + MPP_GPIO_RANGE(1, 32, 32, 28),
7552 };
7553
7554 static int armada_38x_pinctrl_probe(struct platform_device *pdev)
7555 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
7556 index 42491624d660..2dcf9b41e01e 100644
7557 --- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
7558 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
7559 @@ -380,7 +380,7 @@ static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
7560
7561 static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
7562 MPP_GPIO_RANGE(0, 0, 0, 32),
7563 - MPP_GPIO_RANGE(1, 32, 32, 27),
7564 + MPP_GPIO_RANGE(1, 32, 32, 28),
7565 };
7566
7567 static int armada_39x_pinctrl_probe(struct platform_device *pdev)
7568 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
7569 index 578db9f033b2..d7cdb146f44d 100644
7570 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
7571 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
7572 @@ -14,10 +14,7 @@
7573 * available: mv78230, mv78260 and mv78460. From a pin muxing
7574 * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
7575 * both have 67 MPP pins (more GPIOs and address lines for the memory
7576 - * bus mainly). The only difference between the mv78260 and the
7577 - * mv78460 in terms of pin muxing is the addition of two functions on
7578 - * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
7579 - * cores, mv78460 has four cores).
7580 + * bus mainly).
7581 */
7582
7583 #include <linux/err.h>
7584 @@ -172,20 +169,17 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7585 MPP_MODE(24,
7586 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7587 MPP_VAR_FUNCTION(0x1, "sata1", "prsnt", V_MV78230_PLUS),
7588 - MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re", V_MV78230_PLUS),
7589 MPP_VAR_FUNCTION(0x3, "tdm", "rst", V_MV78230_PLUS),
7590 MPP_VAR_FUNCTION(0x4, "lcd", "hsync", V_MV78230_PLUS)),
7591 MPP_MODE(25,
7592 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7593 MPP_VAR_FUNCTION(0x1, "sata0", "prsnt", V_MV78230_PLUS),
7594 - MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we", V_MV78230_PLUS),
7595 MPP_VAR_FUNCTION(0x3, "tdm", "pclk", V_MV78230_PLUS),
7596 MPP_VAR_FUNCTION(0x4, "lcd", "vsync", V_MV78230_PLUS)),
7597 MPP_MODE(26,
7598 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7599 MPP_VAR_FUNCTION(0x3, "tdm", "fsync", V_MV78230_PLUS),
7600 - MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS),
7601 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
7602 + MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS)),
7603 MPP_MODE(27,
7604 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7605 MPP_VAR_FUNCTION(0x1, "ptp", "trig", V_MV78230_PLUS),
7606 @@ -200,8 +194,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7607 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7608 MPP_VAR_FUNCTION(0x1, "ptp", "clk", V_MV78230_PLUS),
7609 MPP_VAR_FUNCTION(0x3, "tdm", "int0", V_MV78230_PLUS),
7610 - MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS),
7611 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
7612 + MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS)),
7613 MPP_MODE(30,
7614 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7615 MPP_VAR_FUNCTION(0x1, "sd0", "clk", V_MV78230_PLUS),
7616 @@ -209,13 +202,11 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7617 MPP_MODE(31,
7618 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7619 MPP_VAR_FUNCTION(0x1, "sd0", "cmd", V_MV78230_PLUS),
7620 - MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS),
7621 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
7622 + MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS)),
7623 MPP_MODE(32,
7624 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7625 MPP_VAR_FUNCTION(0x1, "sd0", "d0", V_MV78230_PLUS),
7626 - MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS),
7627 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)),
7628 + MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS)),
7629 MPP_MODE(33,
7630 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7631 MPP_VAR_FUNCTION(0x1, "sd0", "d1", V_MV78230_PLUS),
7632 @@ -247,7 +238,6 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7633 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7634 MPP_VAR_FUNCTION(0x1, "spi", "cs1", V_MV78230_PLUS),
7635 MPP_VAR_FUNCTION(0x2, "uart2", "cts", V_MV78230_PLUS),
7636 - MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd", V_MV78230_PLUS),
7637 MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync", V_MV78230_PLUS),
7638 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0", V_MV78230_PLUS)),
7639 MPP_MODE(41,
7640 @@ -262,15 +252,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7641 MPP_VAR_FUNCTION(0x1, "uart2", "rxd", V_MV78230_PLUS),
7642 MPP_VAR_FUNCTION(0x2, "uart0", "cts", V_MV78230_PLUS),
7643 MPP_VAR_FUNCTION(0x3, "tdm", "int7", V_MV78230_PLUS),
7644 - MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS),
7645 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)),
7646 + MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS)),
7647 MPP_MODE(43,
7648 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7649 MPP_VAR_FUNCTION(0x1, "uart2", "txd", V_MV78230_PLUS),
7650 MPP_VAR_FUNCTION(0x2, "uart0", "rts", V_MV78230_PLUS),
7651 MPP_VAR_FUNCTION(0x3, "spi", "cs3", V_MV78230_PLUS),
7652 - MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS),
7653 - MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd", V_MV78460)),
7654 + MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS)),
7655 MPP_MODE(44,
7656 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7657 MPP_VAR_FUNCTION(0x1, "uart2", "cts", V_MV78230_PLUS),
7658 @@ -299,7 +287,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7659 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3", V_MV78230_PLUS)),
7660 MPP_MODE(48,
7661 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS),
7662 - MPP_VAR_FUNCTION(0x1, "tclk", NULL, V_MV78230_PLUS),
7663 + MPP_VAR_FUNCTION(0x1, "dev", "clkout", V_MV78230_PLUS),
7664 MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
7665 MPP_MODE(49,
7666 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
7667 @@ -321,16 +309,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
7668 MPP_VAR_FUNCTION(0x1, "dev", "ad19", V_MV78260_PLUS)),
7669 MPP_MODE(55,
7670 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
7671 - MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS),
7672 - MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd", V_MV78260_PLUS)),
7673 + MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS)),
7674 MPP_MODE(56,
7675 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
7676 - MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS),
7677 - MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd", V_MV78260_PLUS)),
7678 + MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS)),
7679 MPP_MODE(57,
7680 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
7681 - MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS),
7682 - MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd", V_MV78460)),
7683 + MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS)),
7684 MPP_MODE(58,
7685 MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS),
7686 MPP_VAR_FUNCTION(0x1, "dev", "ad23", V_MV78260_PLUS)),
7687 diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
7688 index 22280bddb9e2..8c51a3c65513 100644
7689 --- a/drivers/pinctrl/pinctrl-zynq.c
7690 +++ b/drivers/pinctrl/pinctrl-zynq.c
7691 @@ -714,12 +714,13 @@ static const char * const gpio0_groups[] = {"gpio0_0_grp",
7692 .mux_val = mval, \
7693 }
7694
7695 -#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, mux, mask, shift) \
7696 +#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, offset, mask, shift)\
7697 [ZYNQ_PMUX_##fname] = { \
7698 .name = #fname, \
7699 .groups = fname##_groups, \
7700 .ngroups = ARRAY_SIZE(fname##_groups), \
7701 .mux_val = mval, \
7702 + .mux = offset, \
7703 .mux_mask = mask, \
7704 .mux_shift = shift, \
7705 }
7706 @@ -744,15 +745,15 @@ static const struct zynq_pinmux_function zynq_pmux_functions[] = {
7707 DEFINE_ZYNQ_PINMUX_FUNCTION(spi1, 0x50),
7708 DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0, 0x40),
7709 DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0_pc, 0xc),
7710 - DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 130, ZYNQ_SDIO_WP_MASK,
7711 + DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 0x130, ZYNQ_SDIO_WP_MASK,
7712 ZYNQ_SDIO_WP_SHIFT),
7713 - DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 130, ZYNQ_SDIO_CD_MASK,
7714 + DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 0x130, ZYNQ_SDIO_CD_MASK,
7715 ZYNQ_SDIO_CD_SHIFT),
7716 DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1, 0x40),
7717 DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1_pc, 0xc),
7718 - DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 134, ZYNQ_SDIO_WP_MASK,
7719 + DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 0x134, ZYNQ_SDIO_WP_MASK,
7720 ZYNQ_SDIO_WP_SHIFT),
7721 - DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 134, ZYNQ_SDIO_CD_MASK,
7722 + DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 0x134, ZYNQ_SDIO_CD_MASK,
7723 ZYNQ_SDIO_CD_SHIFT),
7724 DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor, 4),
7725 DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_cs1, 8),
7726 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
7727 index d688d806a8a5..2c1d5f5432a9 100644
7728 --- a/drivers/platform/x86/dell-laptop.c
7729 +++ b/drivers/platform/x86/dell-laptop.c
7730 @@ -305,7 +305,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
7731 };
7732
7733 static struct calling_interface_buffer *buffer;
7734 -static struct page *bufferpage;
7735 static DEFINE_MUTEX(buffer_mutex);
7736
7737 static int hwswitch_state;
7738 @@ -1896,12 +1895,11 @@ static int __init dell_init(void)
7739 * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
7740 * is passed to SMI handler.
7741 */
7742 - bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
7743 - if (!bufferpage) {
7744 + buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
7745 + if (!buffer) {
7746 ret = -ENOMEM;
7747 goto fail_buffer;
7748 }
7749 - buffer = page_address(bufferpage);
7750
7751 ret = dell_setup_rfkill();
7752
7753 @@ -1965,7 +1963,7 @@ fail_backlight:
7754 cancel_delayed_work_sync(&dell_rfkill_work);
7755 dell_cleanup_rfkill();
7756 fail_rfkill:
7757 - free_page((unsigned long)bufferpage);
7758 + free_page((unsigned long)buffer);
7759 fail_buffer:
7760 platform_device_del(platform_device);
7761 fail_platform_device2:
7762 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
7763 index b496db87bc05..cb7cd8d79329 100644
7764 --- a/drivers/platform/x86/ideapad-laptop.c
7765 +++ b/drivers/platform/x86/ideapad-laptop.c
7766 @@ -464,8 +464,9 @@ static const struct ideapad_rfk_data ideapad_rfk_data[] = {
7767 static int ideapad_rfk_set(void *data, bool blocked)
7768 {
7769 struct ideapad_rfk_priv *priv = data;
7770 + int opcode = ideapad_rfk_data[priv->dev].opcode;
7771
7772 - return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
7773 + return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
7774 }
7775
7776 static struct rfkill_ops ideapad_rfk_ops = {
7777 @@ -837,6 +838,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7778 },
7779 },
7780 {
7781 + .ident = "Lenovo G50-30",
7782 + .matches = {
7783 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7784 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
7785 + },
7786 + },
7787 + {
7788 .ident = "Lenovo Yoga 2 11 / 13 / Pro",
7789 .matches = {
7790 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7791 diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
7792 index 515f33882ab8..49c1720df59a 100644
7793 --- a/drivers/pnp/system.c
7794 +++ b/drivers/pnp/system.c
7795 @@ -7,7 +7,6 @@
7796 * Bjorn Helgaas <bjorn.helgaas@hp.com>
7797 */
7798
7799 -#include <linux/acpi.h>
7800 #include <linux/pnp.h>
7801 #include <linux/device.h>
7802 #include <linux/init.h>
7803 @@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
7804 {"", 0}
7805 };
7806
7807 -#ifdef CONFIG_ACPI
7808 -static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
7809 -{
7810 - u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
7811 - return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
7812 -}
7813 -#else
7814 -static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
7815 -{
7816 - struct resource *res;
7817 -
7818 - res = io ? request_region(start, length, desc) :
7819 - request_mem_region(start, length, desc);
7820 - if (res) {
7821 - res->flags &= ~IORESOURCE_BUSY;
7822 - return true;
7823 - }
7824 - return false;
7825 -}
7826 -#endif
7827 -
7828 static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
7829 {
7830 char *regionid;
7831 const char *pnpid = dev_name(&dev->dev);
7832 resource_size_t start = r->start, end = r->end;
7833 - bool reserved;
7834 + struct resource *res;
7835
7836 regionid = kmalloc(16, GFP_KERNEL);
7837 if (!regionid)
7838 return;
7839
7840 snprintf(regionid, 16, "pnp %s", pnpid);
7841 - reserved = __reserve_range(start, end - start + 1, !!port, regionid);
7842 - if (!reserved)
7843 + if (port)
7844 + res = request_region(start, end - start + 1, regionid);
7845 + else
7846 + res = request_mem_region(start, end - start + 1, regionid);
7847 + if (res)
7848 + res->flags &= ~IORESOURCE_BUSY;
7849 + else
7850 kfree(regionid);
7851
7852 /*
7853 @@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
7854 * have double reservations.
7855 */
7856 dev_info(&dev->dev, "%pR %s reserved\n", r,
7857 - reserved ? "has been" : "could not be");
7858 + res ? "has been" : "could not be");
7859 }
7860
7861 static void reserve_resources_of_dev(struct pnp_dev *dev)
7862 diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
7863 index 0479e807a776..d87a85cefb66 100644
7864 --- a/drivers/rtc/rtc-snvs.c
7865 +++ b/drivers/rtc/rtc-snvs.c
7866 @@ -322,6 +322,13 @@ static int snvs_rtc_suspend(struct device *dev)
7867 if (device_may_wakeup(dev))
7868 enable_irq_wake(data->irq);
7869
7870 + return 0;
7871 +}
7872 +
7873 +static int snvs_rtc_suspend_noirq(struct device *dev)
7874 +{
7875 + struct snvs_rtc_data *data = dev_get_drvdata(dev);
7876 +
7877 if (data->clk)
7878 clk_disable_unprepare(data->clk);
7879
7880 @@ -331,23 +338,28 @@ static int snvs_rtc_suspend(struct device *dev)
7881 static int snvs_rtc_resume(struct device *dev)
7882 {
7883 struct snvs_rtc_data *data = dev_get_drvdata(dev);
7884 - int ret;
7885
7886 if (device_may_wakeup(dev))
7887 - disable_irq_wake(data->irq);
7888 + return disable_irq_wake(data->irq);
7889
7890 - if (data->clk) {
7891 - ret = clk_prepare_enable(data->clk);
7892 - if (ret)
7893 - return ret;
7894 - }
7895 + return 0;
7896 +}
7897 +
7898 +static int snvs_rtc_resume_noirq(struct device *dev)
7899 +{
7900 + struct snvs_rtc_data *data = dev_get_drvdata(dev);
7901 +
7902 + if (data->clk)
7903 + return clk_prepare_enable(data->clk);
7904
7905 return 0;
7906 }
7907
7908 static const struct dev_pm_ops snvs_rtc_pm_ops = {
7909 - .suspend_noirq = snvs_rtc_suspend,
7910 - .resume_noirq = snvs_rtc_resume,
7911 + .suspend = snvs_rtc_suspend,
7912 + .suspend_noirq = snvs_rtc_suspend_noirq,
7913 + .resume = snvs_rtc_resume,
7914 + .resume_noirq = snvs_rtc_resume_noirq,
7915 };
7916
7917 #define SNVS_RTC_PM_OPS (&snvs_rtc_pm_ops)
7918 diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
7919 index c458e5010a74..4ebf5aae5019 100644
7920 --- a/drivers/staging/comedi/drivers/cb_pcimdas.c
7921 +++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
7922 @@ -243,7 +243,7 @@ static int cb_pcimdas_ao_insn_write(struct comedi_device *dev,
7923 return insn->n;
7924 }
7925
7926 -static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
7927 +static int cb_pcimdas_di_insn_bits(struct comedi_device *dev,
7928 struct comedi_subdevice *s,
7929 struct comedi_insn *insn,
7930 unsigned int *data)
7931 @@ -258,7 +258,7 @@ static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
7932 return insn->n;
7933 }
7934
7935 -static int cb_pcimdas_do_insn_write(struct comedi_device *dev,
7936 +static int cb_pcimdas_do_insn_bits(struct comedi_device *dev,
7937 struct comedi_subdevice *s,
7938 struct comedi_insn *insn,
7939 unsigned int *data)
7940 @@ -424,7 +424,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
7941 s->n_chan = 4;
7942 s->maxdata = 1;
7943 s->range_table = &range_digital;
7944 - s->insn_read = cb_pcimdas_di_insn_read;
7945 + s->insn_bits = cb_pcimdas_di_insn_bits;
7946
7947 /* Digital Output subdevice (main connector) */
7948 s = &dev->subdevices[4];
7949 @@ -433,7 +433,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
7950 s->n_chan = 4;
7951 s->maxdata = 1;
7952 s->range_table = &range_digital;
7953 - s->insn_write = cb_pcimdas_do_insn_write;
7954 + s->insn_bits = cb_pcimdas_do_insn_bits;
7955
7956 /* Counter subdevice (8254) */
7957 s = &dev->subdevices[5];
7958 diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
7959 index 50227b598e0c..fcb8c61b2884 100644
7960 --- a/drivers/staging/rtl8712/rtl8712_recv.c
7961 +++ b/drivers/staging/rtl8712/rtl8712_recv.c
7962 @@ -1056,7 +1056,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
7963 /* for first fragment packet, driver need allocate 1536 +
7964 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
7965 if ((mf == 1) && (frag == 0))
7966 - alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
7967 + /*1658+6=1664, 1664 is 128 alignment.*/
7968 + alloc_sz = max_t(u16, tmp_len, 1658);
7969 else
7970 alloc_sz = tmp_len;
7971 /* 2 is for IP header 4 bytes alignment in QoS packet case.
7972 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
7973 index 0343ae386f03..15baacb126ad 100644
7974 --- a/drivers/staging/vt6655/device_main.c
7975 +++ b/drivers/staging/vt6655/device_main.c
7976 @@ -807,6 +807,10 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
7977 pRD = pRD->next) {
7978 if (works++ > 15)
7979 break;
7980 +
7981 + if (!pRD->pRDInfo->skb)
7982 + break;
7983 +
7984 if (vnt_receive_frame(pDevice, pRD)) {
7985 if (!device_alloc_rx_buf(pDevice, pRD)) {
7986 dev_err(&pDevice->pcid->dev,
7987 @@ -1417,7 +1421,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
7988
7989 priv->current_aid = conf->aid;
7990
7991 - if (changed & BSS_CHANGED_BSSID) {
7992 + if (changed & BSS_CHANGED_BSSID && conf->bssid) {
7993 unsigned long flags;
7994
7995 spin_lock_irqsave(&priv->lock, flags);
7996 diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
7997 index ab3ab84cb0a7..766fdcece074 100644
7998 --- a/drivers/staging/vt6656/main_usb.c
7999 +++ b/drivers/staging/vt6656/main_usb.c
8000 @@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
8001
8002 priv->current_aid = conf->aid;
8003
8004 - if (changed & BSS_CHANGED_BSSID)
8005 + if (changed & BSS_CHANGED_BSSID && conf->bssid)
8006 vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
8007
8008
8009 diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
8010 index f8120c1bde14..8cd35348fc19 100644
8011 --- a/drivers/tty/serial/Kconfig
8012 +++ b/drivers/tty/serial/Kconfig
8013 @@ -241,7 +241,6 @@ config SERIAL_SAMSUNG
8014 tristate "Samsung SoC serial support"
8015 depends on PLAT_SAMSUNG || ARCH_EXYNOS
8016 select SERIAL_CORE
8017 - select SERIAL_EARLYCON
8018 help
8019 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
8020 providing /dev/ttySAC0, 1 and 2 (note, some machines may not
8021 @@ -277,6 +276,7 @@ config SERIAL_SAMSUNG_CONSOLE
8022 bool "Support for console on Samsung SoC serial port"
8023 depends on SERIAL_SAMSUNG=y
8024 select SERIAL_CORE_CONSOLE
8025 + select SERIAL_EARLYCON
8026 help
8027 Allow selection of the S3C24XX on-board serial ports for use as
8028 an virtual console.
8029 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
8030 index 27dade29646b..5ca1dfb0561c 100644
8031 --- a/drivers/tty/serial/atmel_serial.c
8032 +++ b/drivers/tty/serial/atmel_serial.c
8033 @@ -315,8 +315,7 @@ static int atmel_config_rs485(struct uart_port *port,
8034 if (rs485conf->flags & SER_RS485_ENABLED) {
8035 dev_dbg(port->dev, "Setting UART to RS485\n");
8036 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
8037 - if ((rs485conf->delay_rts_after_send) > 0)
8038 - UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
8039 + UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
8040 mode |= ATMEL_US_USMODE_RS485;
8041 } else {
8042 dev_dbg(port->dev, "Setting UART to RS232\n");
8043 @@ -354,8 +353,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
8044
8045 /* override mode to RS485 if needed, otherwise keep the current mode */
8046 if (port->rs485.flags & SER_RS485_ENABLED) {
8047 - if ((port->rs485.delay_rts_after_send) > 0)
8048 - UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
8049 + UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
8050 mode &= ~ATMEL_US_USMODE;
8051 mode |= ATMEL_US_USMODE_RS485;
8052 }
8053 @@ -2061,8 +2059,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
8054
8055 /* mode */
8056 if (port->rs485.flags & SER_RS485_ENABLED) {
8057 - if ((port->rs485.delay_rts_after_send) > 0)
8058 - UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
8059 + UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
8060 mode |= ATMEL_US_USMODE_RS485;
8061 } else if (termios->c_cflag & CRTSCTS) {
8062 /* RS232 with hardware handshake (RTS/CTS) */
8063 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
8064 index 843f2cdc280b..9ffdfcf2ec6e 100644
8065 --- a/drivers/tty/sysrq.c
8066 +++ b/drivers/tty/sysrq.c
8067 @@ -55,9 +55,6 @@
8068 static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
8069 static bool __read_mostly sysrq_always_enabled;
8070
8071 -unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
8072 -int sysrq_reset_downtime_ms __weak;
8073 -
8074 static bool sysrq_on(void)
8075 {
8076 return sysrq_enabled || sysrq_always_enabled;
8077 @@ -569,6 +566,7 @@ void handle_sysrq(int key)
8078 EXPORT_SYMBOL(handle_sysrq);
8079
8080 #ifdef CONFIG_INPUT
8081 +static int sysrq_reset_downtime_ms;
8082
8083 /* Simple translation table for the SysRq keys */
8084 static const unsigned char sysrq_xlate[KEY_CNT] =
8085 @@ -949,23 +947,8 @@ static bool sysrq_handler_registered;
8086
8087 static inline void sysrq_register_handler(void)
8088 {
8089 - unsigned short key;
8090 int error;
8091 - int i;
8092 -
8093 - /* First check if a __weak interface was instantiated. */
8094 - for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
8095 - key = platform_sysrq_reset_seq[i];
8096 - if (key == KEY_RESERVED || key > KEY_MAX)
8097 - break;
8098 -
8099 - sysrq_reset_seq[sysrq_reset_seq_len++] = key;
8100 - }
8101
8102 - /*
8103 - * DT configuration takes precedence over anything that would
8104 - * have been defined via the __weak interface.
8105 - */
8106 sysrq_of_get_keyreset_config();
8107
8108 error = input_register_handler(&sysrq_handler);
8109 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
8110 index 4b0448c26810..986abde07683 100644
8111 --- a/drivers/usb/core/devio.c
8112 +++ b/drivers/usb/core/devio.c
8113 @@ -513,7 +513,7 @@ static void async_completed(struct urb *urb)
8114 snoop(&urb->dev->dev, "urb complete\n");
8115 snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
8116 as->status, COMPLETE, NULL, 0);
8117 - if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
8118 + if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
8119 snoop_urb_data(urb, urb->actual_length);
8120
8121 if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
8122 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
8123 index 45a915ccd71c..1c1385e3a824 100644
8124 --- a/drivers/usb/core/hcd.c
8125 +++ b/drivers/usb/core/hcd.c
8126 @@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
8127 dev_name(&usb_dev->dev), retval);
8128 return (retval < 0) ? retval : -EMSGSIZE;
8129 }
8130 - if (usb_dev->speed == USB_SPEED_SUPER) {
8131 +
8132 + if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
8133 retval = usb_get_bos_descriptor(usb_dev);
8134 - if (retval < 0) {
8135 + if (!retval) {
8136 + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
8137 + } else if (usb_dev->speed == USB_SPEED_SUPER) {
8138 mutex_unlock(&usb_bus_list_lock);
8139 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
8140 dev_name(&usb_dev->dev), retval);
8141 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
8142 index 3b7151687776..1e9a8c9aa531 100644
8143 --- a/drivers/usb/core/hub.c
8144 +++ b/drivers/usb/core/hub.c
8145 @@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
8146 return usb_get_intfdata(hdev->actconfig->interface[0]);
8147 }
8148
8149 -static int usb_device_supports_lpm(struct usb_device *udev)
8150 +int usb_device_supports_lpm(struct usb_device *udev)
8151 {
8152 /* USB 2.1 (and greater) devices indicate LPM support through
8153 * their USB 2.0 Extended Capabilities BOS descriptor.
8154 @@ -2616,9 +2616,6 @@ static bool use_new_scheme(struct usb_device *udev, int retry)
8155 return USE_NEW_SCHEME(retry);
8156 }
8157
8158 -static int hub_port_reset(struct usb_hub *hub, int port1,
8159 - struct usb_device *udev, unsigned int delay, bool warm);
8160 -
8161 /* Is a USB 3.0 port in the Inactive or Compliance Mode state?
8162 * Port worm reset is required to recover
8163 */
8164 @@ -2706,44 +2703,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
8165 return 0;
8166 }
8167
8168 -static void hub_port_finish_reset(struct usb_hub *hub, int port1,
8169 - struct usb_device *udev, int *status)
8170 -{
8171 - switch (*status) {
8172 - case 0:
8173 - /* TRSTRCY = 10 ms; plus some extra */
8174 - msleep(10 + 40);
8175 - if (udev) {
8176 - struct usb_hcd *hcd = bus_to_hcd(udev->bus);
8177 -
8178 - update_devnum(udev, 0);
8179 - /* The xHC may think the device is already reset,
8180 - * so ignore the status.
8181 - */
8182 - if (hcd->driver->reset_device)
8183 - hcd->driver->reset_device(hcd, udev);
8184 - }
8185 - /* FALL THROUGH */
8186 - case -ENOTCONN:
8187 - case -ENODEV:
8188 - usb_clear_port_feature(hub->hdev,
8189 - port1, USB_PORT_FEAT_C_RESET);
8190 - if (hub_is_superspeed(hub->hdev)) {
8191 - usb_clear_port_feature(hub->hdev, port1,
8192 - USB_PORT_FEAT_C_BH_PORT_RESET);
8193 - usb_clear_port_feature(hub->hdev, port1,
8194 - USB_PORT_FEAT_C_PORT_LINK_STATE);
8195 - usb_clear_port_feature(hub->hdev, port1,
8196 - USB_PORT_FEAT_C_CONNECTION);
8197 - }
8198 - if (udev)
8199 - usb_set_device_state(udev, *status
8200 - ? USB_STATE_NOTATTACHED
8201 - : USB_STATE_DEFAULT);
8202 - break;
8203 - }
8204 -}
8205 -
8206 /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
8207 static int hub_port_reset(struct usb_hub *hub, int port1,
8208 struct usb_device *udev, unsigned int delay, bool warm)
8209 @@ -2767,13 +2726,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
8210 * If the caller hasn't explicitly requested a warm reset,
8211 * double check and see if one is needed.
8212 */
8213 - status = hub_port_status(hub, port1,
8214 - &portstatus, &portchange);
8215 - if (status < 0)
8216 - goto done;
8217 -
8218 - if (hub_port_warm_reset_required(hub, port1, portstatus))
8219 - warm = true;
8220 + if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
8221 + if (hub_port_warm_reset_required(hub, port1,
8222 + portstatus))
8223 + warm = true;
8224 }
8225 clear_bit(port1, hub->warm_reset_bits);
8226
8227 @@ -2799,11 +2755,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
8228
8229 /* Check for disconnect or reset */
8230 if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
8231 - hub_port_finish_reset(hub, port1, udev, &status);
8232 + usb_clear_port_feature(hub->hdev, port1,
8233 + USB_PORT_FEAT_C_RESET);
8234
8235 if (!hub_is_superspeed(hub->hdev))
8236 goto done;
8237
8238 + usb_clear_port_feature(hub->hdev, port1,
8239 + USB_PORT_FEAT_C_BH_PORT_RESET);
8240 + usb_clear_port_feature(hub->hdev, port1,
8241 + USB_PORT_FEAT_C_PORT_LINK_STATE);
8242 + usb_clear_port_feature(hub->hdev, port1,
8243 + USB_PORT_FEAT_C_CONNECTION);
8244 +
8245 /*
8246 * If a USB 3.0 device migrates from reset to an error
8247 * state, re-issue the warm reset.
8248 @@ -2836,6 +2800,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
8249 dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n");
8250
8251 done:
8252 + if (status == 0) {
8253 + /* TRSTRCY = 10 ms; plus some extra */
8254 + msleep(10 + 40);
8255 + if (udev) {
8256 + struct usb_hcd *hcd = bus_to_hcd(udev->bus);
8257 +
8258 + update_devnum(udev, 0);
8259 + /* The xHC may think the device is already reset,
8260 + * so ignore the status.
8261 + */
8262 + if (hcd->driver->reset_device)
8263 + hcd->driver->reset_device(hcd, udev);
8264 +
8265 + usb_set_device_state(udev, USB_STATE_DEFAULT);
8266 + }
8267 + } else {
8268 + if (udev)
8269 + usb_set_device_state(udev, USB_STATE_NOTATTACHED);
8270 + }
8271 +
8272 if (!hub_is_superspeed(hub->hdev))
8273 up_read(&ehci_cf_port_reset_rwsem);
8274
8275 diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
8276 index 7eb1e26798e5..457255a3306a 100644
8277 --- a/drivers/usb/core/usb.h
8278 +++ b/drivers/usb/core/usb.h
8279 @@ -65,6 +65,7 @@ extern int usb_hub_init(void);
8280 extern void usb_hub_cleanup(void);
8281 extern int usb_major_init(void);
8282 extern void usb_major_cleanup(void);
8283 +extern int usb_device_supports_lpm(struct usb_device *udev);
8284
8285 #ifdef CONFIG_PM
8286
8287 diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
8288 index 2ef3c8d6a9db..69e769c35cf5 100644
8289 --- a/drivers/usb/dwc3/ep0.c
8290 +++ b/drivers/usb/dwc3/ep0.c
8291 @@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8292 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
8293 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
8294 break;
8295 + case USB_REQ_SET_INTERFACE:
8296 + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
8297 + dwc->start_config_issued = false;
8298 + /* Fall through */
8299 default:
8300 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
8301 ret = dwc3_ep0_delegate_req(dwc, ctrl);
8302 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
8303 index 8946c32047e9..333a7c0078fc 100644
8304 --- a/drivers/usb/dwc3/gadget.c
8305 +++ b/drivers/usb/dwc3/gadget.c
8306 @@ -291,6 +291,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
8307 dwc3_trace(trace_dwc3_gadget,
8308 "Command Complete --> %d",
8309 DWC3_DGCMD_STATUS(reg));
8310 + if (DWC3_DGCMD_STATUS(reg))
8311 + return -EINVAL;
8312 return 0;
8313 }
8314
8315 @@ -328,6 +330,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
8316 dwc3_trace(trace_dwc3_gadget,
8317 "Command Complete --> %d",
8318 DWC3_DEPCMD_STATUS(reg));
8319 + if (DWC3_DEPCMD_STATUS(reg))
8320 + return -EINVAL;
8321 return 0;
8322 }
8323
8324 @@ -1902,12 +1906,16 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
8325 {
8326 unsigned status = 0;
8327 int clean_busy;
8328 + u32 is_xfer_complete;
8329 +
8330 + is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
8331
8332 if (event->status & DEPEVT_STATUS_BUSERR)
8333 status = -ECONNRESET;
8334
8335 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
8336 - if (clean_busy)
8337 + if (clean_busy && (is_xfer_complete ||
8338 + usb_endpoint_xfer_isoc(dep->endpoint.desc)))
8339 dep->flags &= ~DWC3_EP_BUSY;
8340
8341 /*
8342 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
8343 index 4e3447bbd097..58b4657fc721 100644
8344 --- a/drivers/usb/gadget/composite.c
8345 +++ b/drivers/usb/gadget/composite.c
8346 @@ -1758,10 +1758,13 @@ unknown:
8347 * take such requests too, if that's ever needed: to work
8348 * in config 0, etc.
8349 */
8350 - list_for_each_entry(f, &cdev->config->functions, list)
8351 - if (f->req_match && f->req_match(f, ctrl))
8352 - goto try_fun_setup;
8353 - f = NULL;
8354 + if (cdev->config) {
8355 + list_for_each_entry(f, &cdev->config->functions, list)
8356 + if (f->req_match && f->req_match(f, ctrl))
8357 + goto try_fun_setup;
8358 + f = NULL;
8359 + }
8360 +
8361 switch (ctrl->bRequestType & USB_RECIP_MASK) {
8362 case USB_RECIP_INTERFACE:
8363 if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
8364 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
8365 index 45b8c8b338df..6e7be91e6097 100644
8366 --- a/drivers/usb/gadget/function/f_fs.c
8367 +++ b/drivers/usb/gadget/function/f_fs.c
8368 @@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
8369
8370 kiocb->private = p;
8371
8372 - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
8373 + if (p->aio)
8374 + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
8375
8376 res = ffs_epfile_io(kiocb->ki_filp, p);
8377 if (res == -EIOCBQUEUED)
8378 @@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
8379
8380 kiocb->private = p;
8381
8382 - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
8383 + if (p->aio)
8384 + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
8385
8386 res = ffs_epfile_io(kiocb->ki_filp, p);
8387 if (res == -EIOCBQUEUED)
8388 diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
8389 index 3cc109f3c9c8..15c307155037 100644
8390 --- a/drivers/usb/gadget/function/f_mass_storage.c
8391 +++ b/drivers/usb/gadget/function/f_mass_storage.c
8392 @@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
8393 return -EINVAL;
8394 }
8395
8396 - curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
8397 + curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
8398 if (unlikely(!curlun))
8399 return -ENOMEM;
8400
8401 @@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
8402 common->luns = curlun;
8403 common->nluns = nluns;
8404
8405 - pr_info("Number of LUNs=%d\n", common->nluns);
8406 -
8407 return 0;
8408 }
8409 EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
8410 @@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
8411 struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
8412 struct fsg_common *common = opts->common;
8413 struct fsg_dev *fsg;
8414 + unsigned nluns, i;
8415
8416 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
8417 if (unlikely(!fsg))
8418 return ERR_PTR(-ENOMEM);
8419
8420 mutex_lock(&opts->lock);
8421 + if (!opts->refcnt) {
8422 + for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
8423 + if (common->luns[i])
8424 + nluns = i + 1;
8425 + if (!nluns)
8426 + pr_warn("No LUNS defined, continuing anyway\n");
8427 + else
8428 + common->nluns = nluns;
8429 + pr_info("Number of LUNs=%u\n", common->nluns);
8430 + }
8431 opts->refcnt++;
8432 mutex_unlock(&opts->lock);
8433 +
8434 fsg->function.name = FSG_DRIVER_DESC;
8435 fsg->function.bind = fsg_bind;
8436 fsg->function.unbind = fsg_unbind;
8437 diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
8438 index d32160d6463f..5da37c957b53 100644
8439 --- a/drivers/usb/gadget/udc/mv_udc_core.c
8440 +++ b/drivers/usb/gadget/udc/mv_udc_core.c
8441 @@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
8442 return -ENODEV;
8443 }
8444
8445 - udc->phy_regs = ioremap(r->start, resource_size(r));
8446 + udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
8447 if (udc->phy_regs == NULL) {
8448 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
8449 return -EBUSY;
8450 diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
8451 index 1463c398d322..fe1d5fc7da2d 100644
8452 --- a/drivers/usb/host/ohci-q.c
8453 +++ b/drivers/usb/host/ohci-q.c
8454 @@ -980,10 +980,6 @@ rescan_all:
8455 int completed, modified;
8456 __hc32 *prev;
8457
8458 - /* Is this ED already invisible to the hardware? */
8459 - if (ed->state == ED_IDLE)
8460 - goto ed_idle;
8461 -
8462 /* only take off EDs that the HC isn't using, accounting for
8463 * frame counter wraps and EDs with partially retired TDs
8464 */
8465 @@ -1011,12 +1007,10 @@ skip_ed:
8466 }
8467
8468 /* ED's now officially unlinked, hc doesn't see */
8469 - ed->state = ED_IDLE;
8470 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
8471 ed->hwNextED = 0;
8472 wmb();
8473 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
8474 -ed_idle:
8475
8476 /* reentrancy: if we drop the schedule lock, someone might
8477 * have modified this list. normally it's just prepending
8478 @@ -1087,6 +1081,7 @@ rescan_this:
8479 if (list_empty(&ed->td_list)) {
8480 *last = ed->ed_next;
8481 ed->ed_next = NULL;
8482 + ed->state = ED_IDLE;
8483 list_del(&ed->in_use_list);
8484 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
8485 *last = ed->ed_next;
8486 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
8487 index f8336408ef07..3e442f77a2b9 100644
8488 --- a/drivers/usb/host/xhci-mem.c
8489 +++ b/drivers/usb/host/xhci-mem.c
8490 @@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
8491 /* Attempt to use the ring cache */
8492 if (virt_dev->num_rings_cached == 0)
8493 return -ENOMEM;
8494 + virt_dev->num_rings_cached--;
8495 virt_dev->eps[ep_index].new_ring =
8496 virt_dev->ring_cache[virt_dev->num_rings_cached];
8497 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
8498 - virt_dev->num_rings_cached--;
8499 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
8500 1, type);
8501 }
8502 diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
8503 index 86c4b533e90b..4731baca377f 100644
8504 --- a/drivers/usb/musb/musb_virthub.c
8505 +++ b/drivers/usb/musb/musb_virthub.c
8506 @@ -273,9 +273,7 @@ static int musb_has_gadget(struct musb *musb)
8507 #ifdef CONFIG_USB_MUSB_HOST
8508 return 1;
8509 #else
8510 - if (musb->port_mode == MUSB_PORT_MODE_HOST)
8511 - return 1;
8512 - return musb->g.dev.driver != NULL;
8513 + return musb->port_mode == MUSB_PORT_MODE_HOST;
8514 #endif
8515 }
8516
8517 diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
8518 index 8f7cb068d29b..3fcc0483a081 100644
8519 --- a/drivers/usb/phy/phy-mxs-usb.c
8520 +++ b/drivers/usb/phy/phy-mxs-usb.c
8521 @@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
8522 {
8523 unsigned int vbus_value;
8524
8525 + if (!mxs_phy->regmap_anatop)
8526 + return false;
8527 +
8528 if (mxs_phy->port_id == 0)
8529 regmap_read(mxs_phy->regmap_anatop,
8530 ANADIG_USB1_VBUS_DET_STAT,
8531 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
8532 index ffd739e31bfc..eac7ccaa3c85 100644
8533 --- a/drivers/usb/serial/cp210x.c
8534 +++ b/drivers/usb/serial/cp210x.c
8535 @@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
8536 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
8537 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
8538 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
8539 + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
8540 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
8541 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
8542 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
8543 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
8544 index f0c0c53359ad..19b85ee98a72 100644
8545 --- a/drivers/usb/serial/option.c
8546 +++ b/drivers/usb/serial/option.c
8547 @@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = {
8548 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
8549 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
8550 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
8551 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
8552 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
8553 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
8554 { } /* Terminating entry */
8555 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
8556 index 529066bbc7e8..46f1f13b41f1 100644
8557 --- a/drivers/usb/serial/usb-serial.c
8558 +++ b/drivers/usb/serial/usb-serial.c
8559 @@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
8560 tty_unregister_driver(usb_serial_tty_driver);
8561 put_tty_driver(usb_serial_tty_driver);
8562 bus_unregister(&usb_serial_bus_type);
8563 + idr_destroy(&serial_minors);
8564 }
8565
8566
8567 diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
8568 index 1f11a20a8ab9..55eb86c9e214 100644
8569 --- a/drivers/w1/slaves/w1_therm.c
8570 +++ b/drivers/w1/slaves/w1_therm.c
8571 @@ -59,16 +59,32 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
8572 static int w1_strong_pullup = 1;
8573 module_param_named(strong_pullup, w1_strong_pullup, int, 0);
8574
8575 +struct w1_therm_family_data {
8576 + uint8_t rom[9];
8577 + atomic_t refcnt;
8578 +};
8579 +
8580 +/* return the address of the refcnt in the family data */
8581 +#define THERM_REFCNT(family_data) \
8582 + (&((struct w1_therm_family_data*)family_data)->refcnt)
8583 +
8584 static int w1_therm_add_slave(struct w1_slave *sl)
8585 {
8586 - sl->family_data = kzalloc(9, GFP_KERNEL);
8587 + sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
8588 + GFP_KERNEL);
8589 if (!sl->family_data)
8590 return -ENOMEM;
8591 + atomic_set(THERM_REFCNT(sl->family_data), 1);
8592 return 0;
8593 }
8594
8595 static void w1_therm_remove_slave(struct w1_slave *sl)
8596 {
8597 + int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
8598 + while(refcnt) {
8599 + msleep(1000);
8600 + refcnt = atomic_read(THERM_REFCNT(sl->family_data));
8601 + }
8602 kfree(sl->family_data);
8603 sl->family_data = NULL;
8604 }
8605 @@ -194,13 +210,22 @@ static ssize_t w1_slave_show(struct device *device,
8606 struct w1_slave *sl = dev_to_w1_slave(device);
8607 struct w1_master *dev = sl->master;
8608 u8 rom[9], crc, verdict, external_power;
8609 - int i, max_trying = 10;
8610 + int i, ret, max_trying = 10;
8611 ssize_t c = PAGE_SIZE;
8612 + u8 *family_data = sl->family_data;
8613 +
8614 + ret = mutex_lock_interruptible(&dev->bus_mutex);
8615 + if (ret != 0)
8616 + goto post_unlock;
8617
8618 - i = mutex_lock_interruptible(&dev->bus_mutex);
8619 - if (i != 0)
8620 - return i;
8621 + if(!sl->family_data)
8622 + {
8623 + ret = -ENODEV;
8624 + goto pre_unlock;
8625 + }
8626
8627 + /* prevent the slave from going away in sleep */
8628 + atomic_inc(THERM_REFCNT(family_data));
8629 memset(rom, 0, sizeof(rom));
8630
8631 while (max_trying--) {
8632 @@ -230,17 +255,19 @@ static ssize_t w1_slave_show(struct device *device,
8633 mutex_unlock(&dev->bus_mutex);
8634
8635 sleep_rem = msleep_interruptible(tm);
8636 - if (sleep_rem != 0)
8637 - return -EINTR;
8638 + if (sleep_rem != 0) {
8639 + ret = -EINTR;
8640 + goto post_unlock;
8641 + }
8642
8643 - i = mutex_lock_interruptible(&dev->bus_mutex);
8644 - if (i != 0)
8645 - return i;
8646 + ret = mutex_lock_interruptible(&dev->bus_mutex);
8647 + if (ret != 0)
8648 + goto post_unlock;
8649 } else if (!w1_strong_pullup) {
8650 sleep_rem = msleep_interruptible(tm);
8651 if (sleep_rem != 0) {
8652 - mutex_unlock(&dev->bus_mutex);
8653 - return -EINTR;
8654 + ret = -EINTR;
8655 + goto pre_unlock;
8656 }
8657 }
8658
8659 @@ -269,19 +296,24 @@ static ssize_t w1_slave_show(struct device *device,
8660 c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
8661 crc, (verdict) ? "YES" : "NO");
8662 if (verdict)
8663 - memcpy(sl->family_data, rom, sizeof(rom));
8664 + memcpy(family_data, rom, sizeof(rom));
8665 else
8666 dev_warn(device, "Read failed CRC check\n");
8667
8668 for (i = 0; i < 9; ++i)
8669 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
8670 - ((u8 *)sl->family_data)[i]);
8671 + ((u8 *)family_data)[i]);
8672
8673 c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
8674 w1_convert_temp(rom, sl->family->fid));
8675 + ret = PAGE_SIZE - c;
8676 +
8677 +pre_unlock:
8678 mutex_unlock(&dev->bus_mutex);
8679
8680 - return PAGE_SIZE - c;
8681 +post_unlock:
8682 + atomic_dec(THERM_REFCNT(family_data));
8683 + return ret;
8684 }
8685
8686 static int __init w1_therm_init(void)
8687 diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
8688 index 1e6be9e40577..c9c97dacf452 100644
8689 --- a/drivers/watchdog/omap_wdt.c
8690 +++ b/drivers/watchdog/omap_wdt.c
8691 @@ -132,6 +132,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
8692
8693 pm_runtime_get_sync(wdev->dev);
8694
8695 + /*
8696 + * Make sure the watchdog is disabled. This is unfortunately required
8697 + * because writing to various registers with the watchdog running has no
8698 + * effect.
8699 + */
8700 + omap_wdt_disable(wdev);
8701 +
8702 /* initialize prescaler */
8703 while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
8704 cpu_relax();
8705 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
8706 index 703342e309f5..53f1e8a21707 100644
8707 --- a/fs/9p/vfs_inode.c
8708 +++ b/fs/9p/vfs_inode.c
8709 @@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
8710 unlock_new_inode(inode);
8711 return inode;
8712 error:
8713 - unlock_new_inode(inode);
8714 - iput(inode);
8715 + iget_failed(inode);
8716 return ERR_PTR(retval);
8717
8718 }
8719 diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
8720 index 9861c7c951a6..4d3ecfb55fcf 100644
8721 --- a/fs/9p/vfs_inode_dotl.c
8722 +++ b/fs/9p/vfs_inode_dotl.c
8723 @@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
8724 unlock_new_inode(inode);
8725 return inode;
8726 error:
8727 - unlock_new_inode(inode);
8728 - iput(inode);
8729 + iget_failed(inode);
8730 return ERR_PTR(retval);
8731
8732 }
8733 diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
8734 index f6a596d5a637..d4a582ac3f73 100644
8735 --- a/fs/btrfs/inode-map.c
8736 +++ b/fs/btrfs/inode-map.c
8737 @@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
8738 {
8739 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
8740 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
8741 + spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
8742 struct btrfs_free_space *info;
8743 struct rb_node *n;
8744 u64 count;
8745 @@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
8746 return;
8747
8748 while (1) {
8749 + bool add_to_ctl = true;
8750 +
8751 + spin_lock(rbroot_lock);
8752 n = rb_first(rbroot);
8753 - if (!n)
8754 + if (!n) {
8755 + spin_unlock(rbroot_lock);
8756 break;
8757 + }
8758
8759 info = rb_entry(n, struct btrfs_free_space, offset_index);
8760 BUG_ON(info->bitmap); /* Logic error */
8761
8762 if (info->offset > root->ino_cache_progress)
8763 - goto free;
8764 + add_to_ctl = false;
8765 else if (info->offset + info->bytes > root->ino_cache_progress)
8766 count = root->ino_cache_progress - info->offset + 1;
8767 else
8768 count = info->bytes;
8769
8770 - __btrfs_add_free_space(ctl, info->offset, count);
8771 -free:
8772 rb_erase(&info->offset_index, rbroot);
8773 - kfree(info);
8774 + spin_unlock(rbroot_lock);
8775 + if (add_to_ctl)
8776 + __btrfs_add_free_space(ctl, info->offset, count);
8777 + kmem_cache_free(btrfs_free_space_cachep, info);
8778 }
8779 }
8780
8781 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
8782 index 1c22c6518504..37d456a9a3b8 100644
8783 --- a/fs/btrfs/ioctl.c
8784 +++ b/fs/btrfs/ioctl.c
8785 @@ -2413,8 +2413,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
8786 goto out_unlock_inode;
8787 }
8788
8789 - d_invalidate(dentry);
8790 -
8791 down_write(&root->fs_info->subvol_sem);
8792
8793 err = may_destroy_subvol(dest);
8794 @@ -2508,7 +2506,7 @@ out_up_write:
8795 out_unlock_inode:
8796 mutex_unlock(&inode->i_mutex);
8797 if (!err) {
8798 - shrink_dcache_sb(root->fs_info->sb);
8799 + d_invalidate(dentry);
8800 btrfs_invalidate_inodes(dest);
8801 d_delete(dentry);
8802 ASSERT(dest->send_in_progress == 0);
8803 @@ -2940,7 +2938,7 @@ out_unlock:
8804 static long btrfs_ioctl_file_extent_same(struct file *file,
8805 struct btrfs_ioctl_same_args __user *argp)
8806 {
8807 - struct btrfs_ioctl_same_args *same;
8808 + struct btrfs_ioctl_same_args *same = NULL;
8809 struct btrfs_ioctl_same_extent_info *info;
8810 struct inode *src = file_inode(file);
8811 u64 off;
8812 @@ -2970,6 +2968,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
8813
8814 if (IS_ERR(same)) {
8815 ret = PTR_ERR(same);
8816 + same = NULL;
8817 goto out;
8818 }
8819
8820 @@ -3040,6 +3039,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
8821
8822 out:
8823 mnt_drop_write_file(file);
8824 + kfree(same);
8825 return ret;
8826 }
8827
8828 @@ -3434,6 +3434,20 @@ process_slot:
8829 u64 trim = 0;
8830 u64 aligned_end = 0;
8831
8832 + /*
8833 + * Don't copy an inline extent into an offset
8834 + * greater than zero. Having an inline extent
8835 + * at such an offset results in chaos as btrfs
8836 + * isn't prepared for such cases. Just skip
8837 + * this case for the same reasons as commented
8838 + * at btrfs_ioctl_clone().
8839 + */
8840 + if (last_dest_end > 0) {
8841 + ret = -EOPNOTSUPP;
8842 + btrfs_end_transaction(trans, root);
8843 + goto out;
8844 + }
8845 +
8846 if (off > key.offset) {
8847 skip = off - key.offset;
8848 new_key.offset += skip;
8849 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
8850 index 5628e25250c0..94e909c5a503 100644
8851 --- a/fs/btrfs/transaction.c
8852 +++ b/fs/btrfs/transaction.c
8853 @@ -758,7 +758,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
8854
8855 if (!list_empty(&trans->ordered)) {
8856 spin_lock(&info->trans_lock);
8857 - list_splice(&trans->ordered, &cur_trans->pending_ordered);
8858 + list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
8859 spin_unlock(&info->trans_lock);
8860 }
8861
8862 @@ -1848,7 +1848,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
8863 }
8864
8865 spin_lock(&root->fs_info->trans_lock);
8866 - list_splice(&trans->ordered, &cur_trans->pending_ordered);
8867 + list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
8868 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
8869 spin_unlock(&root->fs_info->trans_lock);
8870 atomic_inc(&cur_trans->use_count);
8871 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
8872 index d04968374e9d..4920fceffacb 100644
8873 --- a/fs/btrfs/tree-log.c
8874 +++ b/fs/btrfs/tree-log.c
8875 @@ -4161,6 +4161,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
8876 u64 ino = btrfs_ino(inode);
8877 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8878 u64 logged_isize = 0;
8879 + bool need_log_inode_item = true;
8880
8881 path = btrfs_alloc_path();
8882 if (!path)
8883 @@ -4269,11 +4270,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
8884 } else {
8885 if (inode_only == LOG_INODE_ALL)
8886 fast_search = true;
8887 - ret = log_inode_item(trans, log, dst_path, inode);
8888 - if (ret) {
8889 - err = ret;
8890 - goto out_unlock;
8891 - }
8892 goto log_extents;
8893 }
8894
8895 @@ -4296,6 +4292,9 @@ again:
8896 if (min_key.type > max_key.type)
8897 break;
8898
8899 + if (min_key.type == BTRFS_INODE_ITEM_KEY)
8900 + need_log_inode_item = false;
8901 +
8902 src = path->nodes[0];
8903 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
8904 ins_nr++;
8905 @@ -4366,6 +4365,11 @@ next_slot:
8906 log_extents:
8907 btrfs_release_path(path);
8908 btrfs_release_path(dst_path);
8909 + if (need_log_inode_item) {
8910 + err = log_inode_item(trans, log, dst_path, inode);
8911 + if (err)
8912 + goto out_unlock;
8913 + }
8914 if (fast_search) {
8915 /*
8916 * Some ordered extents started by fsync might have completed
8917 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
8918 index e003a1e81dc3..87ba10d1d3bc 100644
8919 --- a/fs/ext4/extents.c
8920 +++ b/fs/ext4/extents.c
8921 @@ -503,7 +503,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
8922 struct buffer_head *bh;
8923 int err;
8924
8925 - bh = sb_getblk(inode->i_sb, pblk);
8926 + bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
8927 if (unlikely(!bh))
8928 return ERR_PTR(-ENOMEM);
8929
8930 @@ -1088,7 +1088,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
8931 err = -EIO;
8932 goto cleanup;
8933 }
8934 - bh = sb_getblk(inode->i_sb, newblock);
8935 + bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
8936 if (unlikely(!bh)) {
8937 err = -ENOMEM;
8938 goto cleanup;
8939 @@ -1282,7 +1282,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
8940 if (newblock == 0)
8941 return err;
8942
8943 - bh = sb_getblk(inode->i_sb, newblock);
8944 + bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
8945 if (unlikely(!bh))
8946 return -ENOMEM;
8947 lock_buffer(bh);
8948 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
8949 index 958824019509..94ae6874c2cb 100644
8950 --- a/fs/ext4/indirect.c
8951 +++ b/fs/ext4/indirect.c
8952 @@ -565,7 +565,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
8953 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
8954 EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
8955 "non-extent mapped inodes with bigalloc");
8956 - return -ENOSPC;
8957 + return -EUCLEAN;
8958 }
8959
8960 /* Set up for the direct block allocation */
8961 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8962 index 0554b0b5957b..966c614822cc 100644
8963 --- a/fs/ext4/inode.c
8964 +++ b/fs/ext4/inode.c
8965 @@ -1342,7 +1342,7 @@ static void ext4_da_page_release_reservation(struct page *page,
8966 unsigned int offset,
8967 unsigned int length)
8968 {
8969 - int to_release = 0;
8970 + int to_release = 0, contiguous_blks = 0;
8971 struct buffer_head *head, *bh;
8972 unsigned int curr_off = 0;
8973 struct inode *inode = page->mapping->host;
8974 @@ -1363,14 +1363,23 @@ static void ext4_da_page_release_reservation(struct page *page,
8975
8976 if ((offset <= curr_off) && (buffer_delay(bh))) {
8977 to_release++;
8978 + contiguous_blks++;
8979 clear_buffer_delay(bh);
8980 + } else if (contiguous_blks) {
8981 + lblk = page->index <<
8982 + (PAGE_CACHE_SHIFT - inode->i_blkbits);
8983 + lblk += (curr_off >> inode->i_blkbits) -
8984 + contiguous_blks;
8985 + ext4_es_remove_extent(inode, lblk, contiguous_blks);
8986 + contiguous_blks = 0;
8987 }
8988 curr_off = next_off;
8989 } while ((bh = bh->b_this_page) != head);
8990
8991 - if (to_release) {
8992 + if (contiguous_blks) {
8993 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
8994 - ext4_es_remove_extent(inode, lblk, to_release);
8995 + lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
8996 + ext4_es_remove_extent(inode, lblk, contiguous_blks);
8997 }
8998
8999 /* If we have released all the blocks belonging to a cluster, then we
9000 @@ -1701,19 +1710,32 @@ static int __ext4_journalled_writepage(struct page *page,
9001 ext4_walk_page_buffers(handle, page_bufs, 0, len,
9002 NULL, bget_one);
9003 }
9004 - /* As soon as we unlock the page, it can go away, but we have
9005 - * references to buffers so we are safe */
9006 + /*
9007 + * We need to release the page lock before we start the
9008 + * journal, so grab a reference so the page won't disappear
9009 + * out from under us.
9010 + */
9011 + get_page(page);
9012 unlock_page(page);
9013
9014 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
9015 ext4_writepage_trans_blocks(inode));
9016 if (IS_ERR(handle)) {
9017 ret = PTR_ERR(handle);
9018 - goto out;
9019 + put_page(page);
9020 + goto out_no_pagelock;
9021 }
9022 -
9023 BUG_ON(!ext4_handle_valid(handle));
9024
9025 + lock_page(page);
9026 + put_page(page);
9027 + if (page->mapping != mapping) {
9028 + /* The page got truncated from under us */
9029 + ext4_journal_stop(handle);
9030 + ret = 0;
9031 + goto out;
9032 + }
9033 +
9034 if (inline_data) {
9035 BUFFER_TRACE(inode_bh, "get write access");
9036 ret = ext4_journal_get_write_access(handle, inode_bh);
9037 @@ -1739,6 +1761,8 @@ static int __ext4_journalled_writepage(struct page *page,
9038 NULL, bput_one);
9039 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
9040 out:
9041 + unlock_page(page);
9042 +out_no_pagelock:
9043 brelse(inode_bh);
9044 return ret;
9045 }
9046 @@ -4345,7 +4369,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
9047 int inode_size = EXT4_INODE_SIZE(sb);
9048
9049 oi.orig_ino = orig_ino;
9050 - ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
9051 + /*
9052 + * Calculate the first inode in the inode table block. Inode
9053 + * numbers are one-based. That is, the first inode in a block
9054 + * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
9055 + */
9056 + ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
9057 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
9058 if (ino == orig_ino)
9059 continue;
9060 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
9061 index 8d1e60214ef0..41260489d3bc 100644
9062 --- a/fs/ext4/mballoc.c
9063 +++ b/fs/ext4/mballoc.c
9064 @@ -4800,18 +4800,12 @@ do_more:
9065 /*
9066 * blocks being freed are metadata. these blocks shouldn't
9067 * be used until this transaction is committed
9068 + *
9069 + * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
9070 + * to fail.
9071 */
9072 - retry:
9073 - new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
9074 - if (!new_entry) {
9075 - /*
9076 - * We use a retry loop because
9077 - * ext4_free_blocks() is not allowed to fail.
9078 - */
9079 - cond_resched();
9080 - congestion_wait(BLK_RW_ASYNC, HZ/50);
9081 - goto retry;
9082 - }
9083 + new_entry = kmem_cache_alloc(ext4_free_data_cachep,
9084 + GFP_NOFS|__GFP_NOFAIL);
9085 new_entry->efd_start_cluster = bit;
9086 new_entry->efd_group = block_group;
9087 new_entry->efd_count = count_clusters;
9088 diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
9089 index b52374e42102..6163ad21cb0e 100644
9090 --- a/fs/ext4/migrate.c
9091 +++ b/fs/ext4/migrate.c
9092 @@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
9093 struct ext4_inode_info *ei = EXT4_I(inode);
9094 struct ext4_extent *ex;
9095 unsigned int i, len;
9096 + ext4_lblk_t start, end;
9097 ext4_fsblk_t blk;
9098 handle_t *handle;
9099 int ret;
9100 @@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
9101 EXT4_FEATURE_RO_COMPAT_BIGALLOC))
9102 return -EOPNOTSUPP;
9103
9104 + /*
9105 + * In order to get correct extent info, force all delayed allocation
9106 + * blocks to be allocated, otherwise delayed allocation blocks may not
9107 + * be reflected and bypass the checks on extent header.
9108 + */
9109 + if (test_opt(inode->i_sb, DELALLOC))
9110 + ext4_alloc_da_blocks(inode);
9111 +
9112 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
9113 if (IS_ERR(handle))
9114 return PTR_ERR(handle);
9115 @@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
9116 goto errout;
9117 }
9118 if (eh->eh_entries == 0)
9119 - blk = len = 0;
9120 + blk = len = start = end = 0;
9121 else {
9122 len = le16_to_cpu(ex->ee_len);
9123 blk = ext4_ext_pblock(ex);
9124 - if (len > EXT4_NDIR_BLOCKS) {
9125 + start = le32_to_cpu(ex->ee_block);
9126 + end = start + len - 1;
9127 + if (end >= EXT4_NDIR_BLOCKS) {
9128 ret = -EOPNOTSUPP;
9129 goto errout;
9130 }
9131 @@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
9132
9133 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
9134 memset(ei->i_data, 0, sizeof(ei->i_data));
9135 - for (i=0; i < len; i++)
9136 + for (i = start; i <= end; i++)
9137 ei->i_data[i] = cpu_to_le32(blk++);
9138 ext4_mark_inode_dirty(handle, inode);
9139 errout:
9140 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
9141 index ca9d4a2fed41..ca12affdba96 100644
9142 --- a/fs/ext4/super.c
9143 +++ b/fs/ext4/super.c
9144 @@ -807,6 +807,7 @@ static void ext4_put_super(struct super_block *sb)
9145 dump_orphan_list(sb, sbi);
9146 J_ASSERT(list_empty(&sbi->s_orphan));
9147
9148 + sync_blockdev(sb->s_bdev);
9149 invalidate_bdev(sb->s_bdev);
9150 if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
9151 /*
9152 @@ -4943,6 +4944,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
9153 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
9154 }
9155
9156 + if (*flags & MS_LAZYTIME)
9157 + sb->s_flags |= MS_LAZYTIME;
9158 +
9159 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
9160 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
9161 err = -EROFS;
9162 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
9163 index 18dacf9ed8ff..708d697113fc 100644
9164 --- a/fs/fuse/inode.c
9165 +++ b/fs/fuse/inode.c
9166 @@ -1026,6 +1026,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
9167 goto err_fput;
9168
9169 fuse_conn_init(fc);
9170 + fc->release = fuse_free_conn;
9171
9172 fc->dev = sb->s_dev;
9173 fc->sb = sb;
9174 @@ -1040,7 +1041,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
9175 fc->dont_mask = 1;
9176 sb->s_flags |= MS_POSIXACL;
9177
9178 - fc->release = fuse_free_conn;
9179 fc->flags = d.flags;
9180 fc->user_id = d.user_id;
9181 fc->group_id = d.group_id;
9182 diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
9183 index 7cd00d3a7c9b..8685c655737f 100644
9184 --- a/fs/hpfs/super.c
9185 +++ b/fs/hpfs/super.c
9186 @@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
9187 }
9188
9189 /* Filesystem error... */
9190 -static char err_buf[1024];
9191 -
9192 void hpfs_error(struct super_block *s, const char *fmt, ...)
9193 {
9194 + struct va_format vaf;
9195 va_list args;
9196
9197 va_start(args, fmt);
9198 - vsnprintf(err_buf, sizeof(err_buf), fmt, args);
9199 +
9200 + vaf.fmt = fmt;
9201 + vaf.va = &args;
9202 +
9203 + pr_err("filesystem error: %pV", &vaf);
9204 +
9205 va_end(args);
9206
9207 - pr_err("filesystem error: %s", err_buf);
9208 if (!hpfs_sb(s)->sb_was_error) {
9209 if (hpfs_sb(s)->sb_err == 2) {
9210 pr_cont("; crashing the system because you wanted it\n");
9211 @@ -424,11 +427,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
9212 int o;
9213 struct hpfs_sb_info *sbi = hpfs_sb(s);
9214 char *new_opts = kstrdup(data, GFP_KERNEL);
9215 -
9216 +
9217 + if (!new_opts)
9218 + return -ENOMEM;
9219 +
9220 sync_filesystem(s);
9221
9222 *flags |= MS_NOATIME;
9223 -
9224 +
9225 hpfs_lock(s);
9226 uid = sbi->sb_uid; gid = sbi->sb_gid;
9227 umask = 0777 & ~sbi->sb_mode;
9228 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
9229 index 988b32ed4c87..4227dc4f7437 100644
9230 --- a/fs/jbd2/checkpoint.c
9231 +++ b/fs/jbd2/checkpoint.c
9232 @@ -390,7 +390,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
9233 unsigned long blocknr;
9234
9235 if (is_journal_aborted(journal))
9236 - return 1;
9237 + return -EIO;
9238
9239 if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
9240 return 1;
9241 @@ -405,10 +405,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
9242 * jbd2_cleanup_journal_tail() doesn't get called all that often.
9243 */
9244 if (journal->j_flags & JBD2_BARRIER)
9245 - blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
9246 + blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
9247
9248 - __jbd2_update_log_tail(journal, first_tid, blocknr);
9249 - return 0;
9250 + return __jbd2_update_log_tail(journal, first_tid, blocknr);
9251 }
9252
9253
9254 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
9255 index b96bd8076b70..112fad9e1e20 100644
9256 --- a/fs/jbd2/journal.c
9257 +++ b/fs/jbd2/journal.c
9258 @@ -885,9 +885,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
9259 *
9260 * Requires j_checkpoint_mutex
9261 */
9262 -void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
9263 +int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
9264 {
9265 unsigned long freed;
9266 + int ret;
9267
9268 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
9269
9270 @@ -897,7 +898,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
9271 * space and if we lose sb update during power failure we'd replay
9272 * old transaction with possibly newly overwritten data.
9273 */
9274 - jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
9275 + ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
9276 + if (ret)
9277 + goto out;
9278 +
9279 write_lock(&journal->j_state_lock);
9280 freed = block - journal->j_tail;
9281 if (block < journal->j_tail)
9282 @@ -913,6 +917,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
9283 journal->j_tail_sequence = tid;
9284 journal->j_tail = block;
9285 write_unlock(&journal->j_state_lock);
9286 +
9287 +out:
9288 + return ret;
9289 }
9290
9291 /*
9292 @@ -1331,7 +1338,7 @@ static int journal_reset(journal_t *journal)
9293 return jbd2_journal_start_thread(journal);
9294 }
9295
9296 -static void jbd2_write_superblock(journal_t *journal, int write_op)
9297 +static int jbd2_write_superblock(journal_t *journal, int write_op)
9298 {
9299 struct buffer_head *bh = journal->j_sb_buffer;
9300 journal_superblock_t *sb = journal->j_superblock;
9301 @@ -1370,7 +1377,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
9302 printk(KERN_ERR "JBD2: Error %d detected when updating "
9303 "journal superblock for %s.\n", ret,
9304 journal->j_devname);
9305 + jbd2_journal_abort(journal, ret);
9306 }
9307 +
9308 + return ret;
9309 }
9310
9311 /**
9312 @@ -1383,10 +1393,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
9313 * Update a journal's superblock information about log tail and write it to
9314 * disk, waiting for the IO to complete.
9315 */
9316 -void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
9317 +int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
9318 unsigned long tail_block, int write_op)
9319 {
9320 journal_superblock_t *sb = journal->j_superblock;
9321 + int ret;
9322
9323 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
9324 jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
9325 @@ -1395,13 +1406,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
9326 sb->s_sequence = cpu_to_be32(tail_tid);
9327 sb->s_start = cpu_to_be32(tail_block);
9328
9329 - jbd2_write_superblock(journal, write_op);
9330 + ret = jbd2_write_superblock(journal, write_op);
9331 + if (ret)
9332 + goto out;
9333
9334 /* Log is no longer empty */
9335 write_lock(&journal->j_state_lock);
9336 WARN_ON(!sb->s_sequence);
9337 journal->j_flags &= ~JBD2_FLUSHED;
9338 write_unlock(&journal->j_state_lock);
9339 +
9340 +out:
9341 + return ret;
9342 }
9343
9344 /**
9345 @@ -1950,7 +1966,14 @@ int jbd2_journal_flush(journal_t *journal)
9346 return -EIO;
9347
9348 mutex_lock(&journal->j_checkpoint_mutex);
9349 - jbd2_cleanup_journal_tail(journal);
9350 + if (!err) {
9351 + err = jbd2_cleanup_journal_tail(journal);
9352 + if (err < 0) {
9353 + mutex_unlock(&journal->j_checkpoint_mutex);
9354 + goto out;
9355 + }
9356 + err = 0;
9357 + }
9358
9359 /* Finally, mark the journal as really needing no recovery.
9360 * This sets s_start==0 in the underlying superblock, which is
9361 @@ -1966,7 +1989,8 @@ int jbd2_journal_flush(journal_t *journal)
9362 J_ASSERT(journal->j_head == journal->j_tail);
9363 J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
9364 write_unlock(&journal->j_state_lock);
9365 - return 0;
9366 +out:
9367 + return err;
9368 }
9369
9370 /**
9371 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
9372 index 7d05089e52d6..6f5f0f425e86 100644
9373 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
9374 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
9375 @@ -631,7 +631,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
9376 nfs_direct_set_resched_writes(hdr->dreq);
9377 /* fake unstable write to let common nfs resend pages */
9378 hdr->verf.committed = NFS_UNSTABLE;
9379 - hdr->good_bytes = 0;
9380 + hdr->good_bytes = hdr->args.count;
9381 }
9382 return;
9383 }
9384 diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
9385 index 77a2d026aa12..f13e1969eedd 100644
9386 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
9387 +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
9388 @@ -324,7 +324,8 @@ static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
9389 __func__, PTR_ERR(cred));
9390 return PTR_ERR(cred);
9391 } else {
9392 - mirror->cred = cred;
9393 + if (cmpxchg(&mirror->cred, NULL, cred))
9394 + put_rpccred(cred);
9395 }
9396 }
9397 return 0;
9398 @@ -386,7 +387,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
9399 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
9400 smp_rmb();
9401 if (ds->ds_clp)
9402 - goto out;
9403 + goto out_update_creds;
9404
9405 flavor = nfs4_ff_layout_choose_authflavor(mirror);
9406
9407 @@ -430,7 +431,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
9408 }
9409 }
9410 }
9411 -
9412 +out_update_creds:
9413 if (ff_layout_update_mirror_cred(mirror, ds))
9414 ds = NULL;
9415 out:
9416 diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
9417 index 53852a4bd88b..9b04c2e6fffc 100644
9418 --- a/fs/nfs/nfs3xdr.c
9419 +++ b/fs/nfs/nfs3xdr.c
9420 @@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
9421 if (args->npages != 0)
9422 xdr_write_pages(xdr, args->pages, 0, args->len);
9423 else
9424 - xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
9425 + xdr_reserve_space(xdr, args->len);
9426
9427 error = nfsacl_encode(xdr->buf, base, args->inode,
9428 (args->mask & NFS_ACL) ?
9429 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
9430 index 2782cfca2265..ddef1dc80cf7 100644
9431 --- a/fs/nfs/nfs4state.c
9432 +++ b/fs/nfs/nfs4state.c
9433 @@ -1482,6 +1482,8 @@ restart:
9434 spin_unlock(&state->state_lock);
9435 }
9436 nfs4_put_open_state(state);
9437 + clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
9438 + &state->flags);
9439 spin_lock(&sp->so_lock);
9440 goto restart;
9441 }
9442 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
9443 index 230606243be6..d47c188682b1 100644
9444 --- a/fs/nfs/pnfs.c
9445 +++ b/fs/nfs/pnfs.c
9446 @@ -1821,6 +1821,7 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
9447 /* Resend all requests through the MDS */
9448 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
9449 hdr->completion_ops);
9450 + set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
9451 return nfs_pageio_resend(&pgio, hdr);
9452 }
9453 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
9454 @@ -1865,6 +1866,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
9455 mirror->pg_recoalesce = 1;
9456 }
9457 nfs_pgio_data_destroy(hdr);
9458 + hdr->release(hdr);
9459 }
9460
9461 static enum pnfs_try_status
9462 @@ -1979,6 +1981,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
9463 mirror->pg_recoalesce = 1;
9464 }
9465 nfs_pgio_data_destroy(hdr);
9466 + hdr->release(hdr);
9467 }
9468
9469 /*
9470 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9471 index dfc19f1575a1..daf355642845 100644
9472 --- a/fs/nfs/write.c
9473 +++ b/fs/nfs/write.c
9474 @@ -1289,6 +1289,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
9475 static void nfs_redirty_request(struct nfs_page *req)
9476 {
9477 nfs_mark_request_dirty(req);
9478 + set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
9479 nfs_unlock_request(req);
9480 nfs_end_page_writeback(req);
9481 nfs_release_request(req);
9482 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
9483 index 907870e81a72..70e9af551600 100644
9484 --- a/fs/overlayfs/readdir.c
9485 +++ b/fs/overlayfs/readdir.c
9486 @@ -23,6 +23,7 @@ struct ovl_cache_entry {
9487 u64 ino;
9488 struct list_head l_node;
9489 struct rb_node node;
9490 + struct ovl_cache_entry *next_maybe_whiteout;
9491 bool is_whiteout;
9492 char name[];
9493 };
9494 @@ -39,7 +40,7 @@ struct ovl_readdir_data {
9495 struct rb_root root;
9496 struct list_head *list;
9497 struct list_head middle;
9498 - struct dentry *dir;
9499 + struct ovl_cache_entry *first_maybe_whiteout;
9500 int count;
9501 int err;
9502 };
9503 @@ -79,7 +80,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
9504 return NULL;
9505 }
9506
9507 -static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
9508 +static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
9509 const char *name, int len,
9510 u64 ino, unsigned int d_type)
9511 {
9512 @@ -98,29 +99,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
9513 p->is_whiteout = false;
9514
9515 if (d_type == DT_CHR) {
9516 - struct dentry *dentry;
9517 - const struct cred *old_cred;
9518 - struct cred *override_cred;
9519 -
9520 - override_cred = prepare_creds();
9521 - if (!override_cred) {
9522 - kfree(p);
9523 - return NULL;
9524 - }
9525 -
9526 - /*
9527 - * CAP_DAC_OVERRIDE for lookup
9528 - */
9529 - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
9530 - old_cred = override_creds(override_cred);
9531 -
9532 - dentry = lookup_one_len(name, dir, len);
9533 - if (!IS_ERR(dentry)) {
9534 - p->is_whiteout = ovl_is_whiteout(dentry);
9535 - dput(dentry);
9536 - }
9537 - revert_creds(old_cred);
9538 - put_cred(override_cred);
9539 + p->next_maybe_whiteout = rdd->first_maybe_whiteout;
9540 + rdd->first_maybe_whiteout = p;
9541 }
9542 return p;
9543 }
9544 @@ -148,7 +128,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
9545 return 0;
9546 }
9547
9548 - p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
9549 + p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
9550 if (p == NULL)
9551 return -ENOMEM;
9552
9553 @@ -169,7 +149,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
9554 if (p) {
9555 list_move_tail(&p->l_node, &rdd->middle);
9556 } else {
9557 - p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
9558 + p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
9559 if (p == NULL)
9560 rdd->err = -ENOMEM;
9561 else
9562 @@ -219,6 +199,43 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
9563 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
9564 }
9565
9566 +static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
9567 +{
9568 + int err;
9569 + struct ovl_cache_entry *p;
9570 + struct dentry *dentry;
9571 + const struct cred *old_cred;
9572 + struct cred *override_cred;
9573 +
9574 + override_cred = prepare_creds();
9575 + if (!override_cred)
9576 + return -ENOMEM;
9577 +
9578 + /*
9579 + * CAP_DAC_OVERRIDE for lookup
9580 + */
9581 + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
9582 + old_cred = override_creds(override_cred);
9583 +
9584 + err = mutex_lock_killable(&dir->d_inode->i_mutex);
9585 + if (!err) {
9586 + while (rdd->first_maybe_whiteout) {
9587 + p = rdd->first_maybe_whiteout;
9588 + rdd->first_maybe_whiteout = p->next_maybe_whiteout;
9589 + dentry = lookup_one_len(p->name, dir, p->len);
9590 + if (!IS_ERR(dentry)) {
9591 + p->is_whiteout = ovl_is_whiteout(dentry);
9592 + dput(dentry);
9593 + }
9594 + }
9595 + mutex_unlock(&dir->d_inode->i_mutex);
9596 + }
9597 + revert_creds(old_cred);
9598 + put_cred(override_cred);
9599 +
9600 + return err;
9601 +}
9602 +
9603 static inline int ovl_dir_read(struct path *realpath,
9604 struct ovl_readdir_data *rdd)
9605 {
9606 @@ -229,7 +246,7 @@ static inline int ovl_dir_read(struct path *realpath,
9607 if (IS_ERR(realfile))
9608 return PTR_ERR(realfile);
9609
9610 - rdd->dir = realpath->dentry;
9611 + rdd->first_maybe_whiteout = NULL;
9612 rdd->ctx.pos = 0;
9613 do {
9614 rdd->count = 0;
9615 @@ -238,6 +255,10 @@ static inline int ovl_dir_read(struct path *realpath,
9616 if (err >= 0)
9617 err = rdd->err;
9618 } while (!err && rdd->count);
9619 +
9620 + if (!err && rdd->first_maybe_whiteout)
9621 + err = ovl_check_whiteouts(realpath->dentry, rdd);
9622 +
9623 fput(realfile);
9624
9625 return err;
9626 diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
9627 index 3fbf167cfb4c..73e75a87af50 100644
9628 --- a/fs/xfs/xfs_attr_inactive.c
9629 +++ b/fs/xfs/xfs_attr_inactive.c
9630 @@ -435,8 +435,14 @@ xfs_attr_inactive(
9631 */
9632 xfs_trans_ijoin(trans, dp, 0);
9633
9634 - /* invalidate and truncate the attribute fork extents */
9635 - if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
9636 + /*
9637 + * Invalidate and truncate the attribute fork extents. Make sure the
9638 + * fork actually has attributes as otherwise the invalidation has no
9639 + * blocks to read and returns an error. In this case, just do the fork
9640 + * removal below.
9641 + */
9642 + if (xfs_inode_hasattr(dp) &&
9643 + dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
9644 error = xfs_attr3_root_inactive(&trans, dp);
9645 if (error)
9646 goto out_cancel;
9647 diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
9648 index 3df411eadb86..40c076523cfa 100644
9649 --- a/fs/xfs/xfs_symlink.c
9650 +++ b/fs/xfs/xfs_symlink.c
9651 @@ -104,7 +104,7 @@ xfs_readlink_bmap(
9652 cur_chunk += sizeof(struct xfs_dsymlink_hdr);
9653 }
9654
9655 - memcpy(link + offset, bp->b_addr, byte_cnt);
9656 + memcpy(link + offset, cur_chunk, byte_cnt);
9657
9658 pathlen -= byte_cnt;
9659 offset += byte_cnt;
9660 diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
9661 index 08ef57bc8d63..f5ed1f17f061 100644
9662 --- a/include/acpi/acpixf.h
9663 +++ b/include/acpi/acpixf.h
9664 @@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
9665 * address. Although ACPICA adheres to the ACPI specification which
9666 * requires the use of the corresponding 64-bit address if it is non-zero,
9667 * some machines have been found to have a corrupted non-zero 64-bit
9668 - * address. Default is TRUE, favor the 32-bit addresses.
9669 + * address. Default is FALSE, do not favor the 32-bit addresses.
9670 */
9671 -ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
9672 +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
9673 +
9674 +/*
9675 + * Optionally use 32-bit FACS table addresses.
9676 + * It is reported that some platforms fail to resume from system suspending
9677 + * if 64-bit FACS table address is selected:
9678 + * https://bugzilla.kernel.org/show_bug.cgi?id=74021
9679 + * Default is TRUE, favor the 32-bit addresses.
9680 + */
9681 +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
9682
9683 /*
9684 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
9685 diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
9686 index 1c3002e1db20..181427ef3549 100644
9687 --- a/include/acpi/actypes.h
9688 +++ b/include/acpi/actypes.h
9689 @@ -572,6 +572,7 @@ typedef u64 acpi_integer;
9690 #define ACPI_NO_ACPI_ENABLE 0x10
9691 #define ACPI_NO_DEVICE_INIT 0x20
9692 #define ACPI_NO_OBJECT_INIT 0x40
9693 +#define ACPI_NO_FACS_INIT 0x80
9694
9695 /*
9696 * Initialization state
9697 diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
9698 index c157103492b0..3f13b910f8d2 100644
9699 --- a/include/drm/drm_atomic.h
9700 +++ b/include/drm/drm_atomic.h
9701 @@ -77,26 +77,26 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
9702
9703 #define for_each_connector_in_state(state, connector, connector_state, __i) \
9704 for ((__i) = 0; \
9705 - (connector) = (state)->connectors[__i], \
9706 - (connector_state) = (state)->connector_states[__i], \
9707 - (__i) < (state)->num_connector; \
9708 + (__i) < (state)->num_connector && \
9709 + ((connector) = (state)->connectors[__i], \
9710 + (connector_state) = (state)->connector_states[__i], 1); \
9711 (__i)++) \
9712 if (connector)
9713
9714 #define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
9715 for ((__i) = 0; \
9716 - (crtc) = (state)->crtcs[__i], \
9717 - (crtc_state) = (state)->crtc_states[__i], \
9718 - (__i) < (state)->dev->mode_config.num_crtc; \
9719 + (__i) < (state)->dev->mode_config.num_crtc && \
9720 + ((crtc) = (state)->crtcs[__i], \
9721 + (crtc_state) = (state)->crtc_states[__i], 1); \
9722 (__i)++) \
9723 if (crtc_state)
9724
9725 -#define for_each_plane_in_state(state, plane, plane_state, __i) \
9726 - for ((__i) = 0; \
9727 - (plane) = (state)->planes[__i], \
9728 - (plane_state) = (state)->plane_states[__i], \
9729 - (__i) < (state)->dev->mode_config.num_total_plane; \
9730 - (__i)++) \
9731 +#define for_each_plane_in_state(state, plane, plane_state, __i) \
9732 + for ((__i) = 0; \
9733 + (__i) < (state)->dev->mode_config.num_total_plane && \
9734 + ((plane) = (state)->planes[__i], \
9735 + (plane_state) = (state)->plane_states[__i], 1); \
9736 + (__i)++) \
9737 if (plane_state)
9738
9739 #endif /* DRM_ATOMIC_H_ */
9740 diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
9741 index ca71c03143d1..54233583c6cb 100644
9742 --- a/include/drm/drm_crtc.h
9743 +++ b/include/drm/drm_crtc.h
9744 @@ -731,6 +731,8 @@ struct drm_connector {
9745 uint8_t num_h_tile, num_v_tile;
9746 uint8_t tile_h_loc, tile_v_loc;
9747 uint16_t tile_h_size, tile_v_size;
9748 +
9749 + struct list_head destroy_list;
9750 };
9751
9752 /**
9753 diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
9754 index a2507817be41..86d0b25ed054 100644
9755 --- a/include/drm/drm_dp_mst_helper.h
9756 +++ b/include/drm/drm_dp_mst_helper.h
9757 @@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
9758 struct work_struct work;
9759
9760 struct work_struct tx_work;
9761 +
9762 + struct list_head destroy_connector_list;
9763 + struct mutex destroy_connector_lock;
9764 + struct work_struct destroy_connector_work;
9765 };
9766
9767 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
9768 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
9769 index 5da2d2e9d38e..4550be3bb63b 100644
9770 --- a/include/linux/acpi.h
9771 +++ b/include/linux/acpi.h
9772 @@ -332,9 +332,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
9773
9774 int acpi_resources_are_enforced(void);
9775
9776 -int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
9777 - unsigned long flags, char *desc);
9778 -
9779 #ifdef CONFIG_HIBERNATION
9780 void __init acpi_no_s4_hw_signature(void);
9781 #endif
9782 @@ -530,13 +527,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
9783 return 0;
9784 }
9785
9786 -static inline int acpi_reserve_region(u64 start, unsigned int length,
9787 - u8 space_id, unsigned long flags,
9788 - char *desc)
9789 -{
9790 - return -ENXIO;
9791 -}
9792 -
9793 struct acpi_table_header;
9794 static inline int acpi_table_parse(char *id,
9795 int (*handler)(struct acpi_table_header *))
9796 diff --git a/include/linux/ata.h b/include/linux/ata.h
9797 index b666b773e111..533dbb6428f5 100644
9798 --- a/include/linux/ata.h
9799 +++ b/include/linux/ata.h
9800 @@ -45,6 +45,7 @@ enum {
9801 ATA_SECT_SIZE = 512,
9802 ATA_MAX_SECTORS_128 = 128,
9803 ATA_MAX_SECTORS = 256,
9804 + ATA_MAX_SECTORS_1024 = 1024,
9805 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
9806 ATA_MAX_SECTORS_TAPE = 65535,
9807
9808 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
9809 index 73b45225a7ca..e6797ded700e 100644
9810 --- a/include/linux/buffer_head.h
9811 +++ b/include/linux/buffer_head.h
9812 @@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
9813 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
9814 }
9815
9816 +
9817 +static inline struct buffer_head *
9818 +sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
9819 +{
9820 + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
9821 +}
9822 +
9823 static inline struct buffer_head *
9824 sb_find_get_block(struct super_block *sb, sector_t block)
9825 {
9826 diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
9827 index 0c9a2f2c2802..d4c71132d07f 100644
9828 --- a/include/linux/compiler-intel.h
9829 +++ b/include/linux/compiler-intel.h
9830 @@ -13,10 +13,12 @@
9831 /* Intel ECC compiler doesn't support gcc specific asm stmts.
9832 * It uses intrinsics to do the equivalent things.
9833 */
9834 +#undef barrier
9835 #undef barrier_data
9836 #undef RELOC_HIDE
9837 #undef OPTIMIZER_HIDE_VAR
9838
9839 +#define barrier() __memory_barrier()
9840 #define barrier_data(ptr) barrier()
9841
9842 #define RELOC_HIDE(ptr, off) \
9843 diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
9844 index 3a7c9ffd5ab9..da042657dc31 100644
9845 --- a/include/linux/gpio/consumer.h
9846 +++ b/include/linux/gpio/consumer.h
9847 @@ -406,6 +406,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
9848 return -EINVAL;
9849 }
9850
9851 +/* Child properties interface */
9852 +struct fwnode_handle;
9853 +
9854 +static inline struct gpio_desc *fwnode_get_named_gpiod(
9855 + struct fwnode_handle *fwnode, const char *propname)
9856 +{
9857 + return ERR_PTR(-ENOSYS);
9858 +}
9859 +
9860 +static inline struct gpio_desc *devm_get_gpiod_from_child(
9861 + struct device *dev, const char *con_id, struct fwnode_handle *child)
9862 +{
9863 + return ERR_PTR(-ENOSYS);
9864 +}
9865 +
9866 #endif /* CONFIG_GPIOLIB */
9867
9868 /*
9869 diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
9870 index 0042bf330b99..c02b5ce6c5cd 100644
9871 --- a/include/linux/hid-sensor-hub.h
9872 +++ b/include/linux/hid-sensor-hub.h
9873 @@ -230,6 +230,7 @@ struct hid_sensor_common {
9874 struct platform_device *pdev;
9875 unsigned usage_id;
9876 atomic_t data_ready;
9877 + atomic_t user_requested_state;
9878 struct iio_trigger *trigger;
9879 struct hid_sensor_hub_attribute_info poll;
9880 struct hid_sensor_hub_attribute_info report_state;
9881 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
9882 index 20e7f78041c8..edb640ae9a94 100644
9883 --- a/include/linux/jbd2.h
9884 +++ b/include/linux/jbd2.h
9885 @@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
9886 int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
9887 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
9888 unsigned long *block);
9889 -void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
9890 +int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
9891 void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
9892
9893 /* Commit management */
9894 @@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
9895 extern int jbd2_journal_wipe (journal_t *, int);
9896 extern int jbd2_journal_skip_recovery (journal_t *);
9897 extern void jbd2_journal_update_sb_errno(journal_t *);
9898 -extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
9899 +extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
9900 unsigned long, int);
9901 extern void __jbd2_journal_abort_hard (journal_t *);
9902 extern void jbd2_journal_abort (journal_t *, int);
9903 diff --git a/include/linux/libata.h b/include/linux/libata.h
9904 index 28aeae46f355..e0e33787c485 100644
9905 --- a/include/linux/libata.h
9906 +++ b/include/linux/libata.h
9907 @@ -431,6 +431,9 @@ enum {
9908 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
9909 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
9910 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
9911 + ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
9912 + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
9913 + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
9914
9915 /* DMA mask for user DMA control: User visible values; DO NOT
9916 renumber */
9917 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
9918 index 93ab6071bbe9..e9e9a8dcfb47 100644
9919 --- a/include/linux/nfs_xdr.h
9920 +++ b/include/linux/nfs_xdr.h
9921 @@ -1142,7 +1142,7 @@ struct nfs41_state_protection {
9922 struct nfs4_op_map allow;
9923 };
9924
9925 -#define NFS4_EXCHANGE_ID_LEN (48)
9926 +#define NFS4_EXCHANGE_ID_LEN (127)
9927 struct nfs41_exchange_id_args {
9928 struct nfs_client *client;
9929 nfs4_verifier *verifier;
9930 diff --git a/include/linux/of.h b/include/linux/of.h
9931 index b871ff9d81d7..8135d507d089 100644
9932 --- a/include/linux/of.h
9933 +++ b/include/linux/of.h
9934 @@ -673,7 +673,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
9935 #if defined(CONFIG_OF) && defined(CONFIG_NUMA)
9936 extern int of_node_to_nid(struct device_node *np);
9937 #else
9938 -static inline int of_node_to_nid(struct device_node *device) { return 0; }
9939 +static inline int of_node_to_nid(struct device_node *device)
9940 +{
9941 + return NUMA_NO_NODE;
9942 +}
9943 #endif
9944
9945 static inline struct device_node *of_find_matching_node(
9946 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
9947 index 551b6737f5df..a7e41fb6ed54 100644
9948 --- a/include/uapi/drm/i915_drm.h
9949 +++ b/include/uapi/drm/i915_drm.h
9950 @@ -1065,6 +1065,14 @@ struct drm_i915_reg_read {
9951 __u64 offset;
9952 __u64 val; /* Return value */
9953 };
9954 +/* Known registers:
9955 + *
9956 + * Render engine timestamp - 0x2358 + 64bit - gen7+
9957 + * - Note this register returns an invalid value if using the default
9958 + * single instruction 8byte read, in order to workaround that use
9959 + * offset (0x2538 | 1) instead.
9960 + *
9961 + */
9962
9963 struct drm_i915_reset_stats {
9964 __u32 ctx_id;
9965 diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
9966 index 7e01f78f0417..9e302315e33d 100644
9967 --- a/kernel/power/Kconfig
9968 +++ b/kernel/power/Kconfig
9969 @@ -187,7 +187,7 @@ config DPM_WATCHDOG
9970 config DPM_WATCHDOG_TIMEOUT
9971 int "Watchdog timeout in seconds"
9972 range 1 120
9973 - default 12
9974 + default 60
9975 depends on DPM_WATCHDOG
9976
9977 config PM_TRACE
9978 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
9979 index c099b082cd02..bff0169e1ad8 100644
9980 --- a/kernel/printk/printk.c
9981 +++ b/kernel/printk/printk.c
9982 @@ -484,11 +484,11 @@ int check_syslog_permissions(int type, bool from_file)
9983 * already done the capabilities checks at open time.
9984 */
9985 if (from_file && type != SYSLOG_ACTION_OPEN)
9986 - return 0;
9987 + goto ok;
9988
9989 if (syslog_action_restricted(type)) {
9990 if (capable(CAP_SYSLOG))
9991 - return 0;
9992 + goto ok;
9993 /*
9994 * For historical reasons, accept CAP_SYS_ADMIN too, with
9995 * a warning.
9996 @@ -498,10 +498,11 @@ int check_syslog_permissions(int type, bool from_file)
9997 "CAP_SYS_ADMIN but no CAP_SYSLOG "
9998 "(deprecated).\n",
9999 current->comm, task_pid_nr(current));
10000 - return 0;
10001 + goto ok;
10002 }
10003 return -EPERM;
10004 }
10005 +ok:
10006 return security_syslog(type);
10007 }
10008
10009 @@ -1263,10 +1264,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
10010 if (error)
10011 goto out;
10012
10013 - error = security_syslog(type);
10014 - if (error)
10015 - return error;
10016 -
10017 switch (type) {
10018 case SYSLOG_ACTION_CLOSE: /* Close log */
10019 break;
10020 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
10021 index d2612016de94..921691c5cb04 100644
10022 --- a/kernel/trace/trace.h
10023 +++ b/kernel/trace/trace.h
10024 @@ -444,6 +444,7 @@ enum {
10025
10026 TRACE_CONTROL_BIT,
10027
10028 + TRACE_BRANCH_BIT,
10029 /*
10030 * Abuse of the trace_recursion.
10031 * As we need a way to maintain state if we are tracing the function
10032 @@ -1312,7 +1313,7 @@ void trace_event_init(void);
10033 void trace_event_enum_update(struct trace_enum_map **map, int len);
10034 #else
10035 static inline void __init trace_event_init(void) { }
10036 -static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
10037 +static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
10038 #endif
10039
10040 extern struct trace_iterator *tracepoint_print_iter;
10041 diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
10042 index 57cbf1efdd44..1879980f06c2 100644
10043 --- a/kernel/trace/trace_branch.c
10044 +++ b/kernel/trace/trace_branch.c
10045 @@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
10046 struct trace_branch *entry;
10047 struct ring_buffer *buffer;
10048 unsigned long flags;
10049 - int cpu, pc;
10050 + int pc;
10051 const char *p;
10052
10053 + if (current->trace_recursion & TRACE_BRANCH_BIT)
10054 + return;
10055 +
10056 /*
10057 * I would love to save just the ftrace_likely_data pointer, but
10058 * this code can also be used by modules. Ugly things can happen
10059 @@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
10060 if (unlikely(!tr))
10061 return;
10062
10063 - local_irq_save(flags);
10064 - cpu = raw_smp_processor_id();
10065 - data = per_cpu_ptr(tr->trace_buffer.data, cpu);
10066 - if (atomic_inc_return(&data->disabled) != 1)
10067 + raw_local_irq_save(flags);
10068 + current->trace_recursion |= TRACE_BRANCH_BIT;
10069 + data = this_cpu_ptr(tr->trace_buffer.data);
10070 + if (atomic_read(&data->disabled))
10071 goto out;
10072
10073 pc = preempt_count();
10074 @@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
10075 __buffer_unlock_commit(buffer, event);
10076
10077 out:
10078 - atomic_dec(&data->disabled);
10079 - local_irq_restore(flags);
10080 + current->trace_recursion &= ~TRACE_BRANCH_BIT;
10081 + raw_local_irq_restore(flags);
10082 }
10083
10084 static inline
10085 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
10086 index 7f2e97ce71a7..52adf02d7619 100644
10087 --- a/kernel/trace/trace_events_filter.c
10088 +++ b/kernel/trace/trace_events_filter.c
10089 @@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
10090
10091 static char infix_next(struct filter_parse_state *ps)
10092 {
10093 + if (!ps->infix.cnt)
10094 + return 0;
10095 +
10096 ps->infix.cnt--;
10097
10098 return ps->infix.string[ps->infix.tail++];
10099 @@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
10100
10101 static void infix_advance(struct filter_parse_state *ps)
10102 {
10103 + if (!ps->infix.cnt)
10104 + return;
10105 +
10106 ps->infix.cnt--;
10107 ps->infix.tail++;
10108 }
10109 @@ -1385,7 +1391,9 @@ static int check_preds(struct filter_parse_state *ps)
10110 if (elt->op != OP_NOT)
10111 cnt--;
10112 n_normal_preds++;
10113 - WARN_ON_ONCE(cnt < 0);
10114 + /* all ops should have operands */
10115 + if (cnt < 0)
10116 + break;
10117 }
10118
10119 if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
10120 diff --git a/lib/bitmap.c b/lib/bitmap.c
10121 index 64c0926f5dd8..40162f87ea2d 100644
10122 --- a/lib/bitmap.c
10123 +++ b/lib/bitmap.c
10124 @@ -506,12 +506,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
10125 unsigned a, b;
10126 int c, old_c, totaldigits;
10127 const char __user __force *ubuf = (const char __user __force *)buf;
10128 - int exp_digit, in_range;
10129 + int at_start, in_range;
10130
10131 totaldigits = c = 0;
10132 bitmap_zero(maskp, nmaskbits);
10133 do {
10134 - exp_digit = 1;
10135 + at_start = 1;
10136 in_range = 0;
10137 a = b = 0;
10138
10139 @@ -540,11 +540,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
10140 break;
10141
10142 if (c == '-') {
10143 - if (exp_digit || in_range)
10144 + if (at_start || in_range)
10145 return -EINVAL;
10146 b = 0;
10147 in_range = 1;
10148 - exp_digit = 1;
10149 continue;
10150 }
10151
10152 @@ -554,16 +553,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
10153 b = b * 10 + (c - '0');
10154 if (!in_range)
10155 a = b;
10156 - exp_digit = 0;
10157 + at_start = 0;
10158 totaldigits++;
10159 }
10160 if (!(a <= b))
10161 return -EINVAL;
10162 if (b >= nmaskbits)
10163 return -ERANGE;
10164 - while (a <= b) {
10165 - set_bit(a, maskp);
10166 - a++;
10167 + if (!at_start) {
10168 + while (a <= b) {
10169 + set_bit(a, maskp);
10170 + a++;
10171 + }
10172 }
10173 } while (buflen && c == ',');
10174 return 0;
10175 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
10176 index 271e4432734c..8c4c1f9f9a9a 100644
10177 --- a/mm/hugetlb.c
10178 +++ b/mm/hugetlb.c
10179 @@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
10180 int hugetlb_max_hstate __read_mostly;
10181 unsigned int default_hstate_idx;
10182 struct hstate hstates[HUGE_MAX_HSTATE];
10183 +/*
10184 + * Minimum page order among possible hugepage sizes, set to a proper value
10185 + * at boot time.
10186 + */
10187 +static unsigned int minimum_order __read_mostly = UINT_MAX;
10188
10189 __initdata LIST_HEAD(huge_boot_pages);
10190
10191 @@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
10192 */
10193 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
10194 {
10195 - unsigned int order = 8 * sizeof(void *);
10196 unsigned long pfn;
10197 - struct hstate *h;
10198
10199 if (!hugepages_supported())
10200 return;
10201
10202 - /* Set scan step to minimum hugepage size */
10203 - for_each_hstate(h)
10204 - if (order > huge_page_order(h))
10205 - order = huge_page_order(h);
10206 - VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
10207 - for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
10208 + VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
10209 + for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
10210 dissolve_free_huge_page(pfn_to_page(pfn));
10211 }
10212
10213 @@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
10214 struct hstate *h;
10215
10216 for_each_hstate(h) {
10217 + if (minimum_order > huge_page_order(h))
10218 + minimum_order = huge_page_order(h);
10219 +
10220 /* oversize hugepages were init'ed in early boot */
10221 if (!hstate_is_gigantic(h))
10222 hugetlb_hstate_alloc_pages(h);
10223 }
10224 + VM_BUG_ON(minimum_order == UINT_MAX);
10225 }
10226
10227 static char * __init memfmt(char *buf, unsigned long n)
10228 diff --git a/mm/memory.c b/mm/memory.c
10229 index 22e037e3364e..2a9e09870c20 100644
10230 --- a/mm/memory.c
10231 +++ b/mm/memory.c
10232 @@ -2669,6 +2669,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
10233
10234 pte_unmap(page_table);
10235
10236 + /* File mapping without ->vm_ops ? */
10237 + if (vma->vm_flags & VM_SHARED)
10238 + return VM_FAULT_SIGBUS;
10239 +
10240 /* Check if we need to add a guard page to the stack */
10241 if (check_stack_guard_page(vma, address) < 0)
10242 return VM_FAULT_SIGSEGV;
10243 @@ -3097,6 +3101,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
10244 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
10245
10246 pte_unmap(page_table);
10247 + /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
10248 + if (!vma->vm_ops->fault)
10249 + return VM_FAULT_SIGBUS;
10250 if (!(flags & FAULT_FLAG_WRITE))
10251 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
10252 orig_pte);
10253 @@ -3242,13 +3249,12 @@ static int handle_pte_fault(struct mm_struct *mm,
10254 barrier();
10255 if (!pte_present(entry)) {
10256 if (pte_none(entry)) {
10257 - if (vma->vm_ops) {
10258 - if (likely(vma->vm_ops->fault))
10259 - return do_fault(mm, vma, address, pte,
10260 - pmd, flags, entry);
10261 - }
10262 - return do_anonymous_page(mm, vma, address,
10263 - pte, pmd, flags);
10264 + if (vma->vm_ops)
10265 + return do_fault(mm, vma, address, pte, pmd,
10266 + flags, entry);
10267 +
10268 + return do_anonymous_page(mm, vma, address, pte, pmd,
10269 + flags);
10270 }
10271 return do_swap_page(mm, vma, address,
10272 pte, pmd, flags, entry);
10273 diff --git a/net/9p/client.c b/net/9p/client.c
10274 index 6f4c4c88db84..81925b923318 100644
10275 --- a/net/9p/client.c
10276 +++ b/net/9p/client.c
10277 @@ -843,7 +843,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
10278 if (err < 0) {
10279 if (err == -EIO)
10280 c->status = Disconnected;
10281 - goto reterr;
10282 + if (err != -ERESTARTSYS)
10283 + goto reterr;
10284 }
10285 if (req->status == REQ_STATUS_ERROR) {
10286 p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
10287 @@ -1647,6 +1648,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
10288 if (*err) {
10289 trace_9p_protocol_dump(clnt, req->rc);
10290 p9_free_req(clnt, req);
10291 + break;
10292 }
10293
10294 p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
10295 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
10296 index 56f9edbf3d05..e11a5cfda4b1 100644
10297 --- a/net/bluetooth/hci_sock.c
10298 +++ b/net/bluetooth/hci_sock.c
10299 @@ -741,10 +741,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
10300 goto done;
10301 }
10302
10303 - if (test_bit(HCI_UP, &hdev->flags) ||
10304 - test_bit(HCI_INIT, &hdev->flags) ||
10305 + if (test_bit(HCI_INIT, &hdev->flags) ||
10306 hci_dev_test_flag(hdev, HCI_SETUP) ||
10307 - hci_dev_test_flag(hdev, HCI_CONFIG)) {
10308 + hci_dev_test_flag(hdev, HCI_CONFIG) ||
10309 + (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
10310 + test_bit(HCI_UP, &hdev->flags))) {
10311 err = -EBUSY;
10312 hci_dev_put(hdev);
10313 goto done;
10314 @@ -760,10 +761,21 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
10315
10316 err = hci_dev_open(hdev->id);
10317 if (err) {
10318 - hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
10319 - mgmt_index_added(hdev);
10320 - hci_dev_put(hdev);
10321 - goto done;
10322 + if (err == -EALREADY) {
10323 + /* In case the transport is already up and
10324 + * running, clear the error here.
10325 + *
10326 + * This can happen when opening an user
10327 + * channel and HCI_AUTO_OFF grace period
10328 + * is still active.
10329 + */
10330 + err = 0;
10331 + } else {
10332 + hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
10333 + mgmt_index_added(hdev);
10334 + hci_dev_put(hdev);
10335 + goto done;
10336 + }
10337 }
10338
10339 atomic_inc(&hdev->promisc);
10340 diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
10341 index 15796696d64e..4a3125836b64 100644
10342 --- a/net/ceph/osdmap.c
10343 +++ b/net/ceph/osdmap.c
10344 @@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
10345 {
10346 int j;
10347 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
10348 - ceph_decode_32_safe(p, end, b->num_nodes, bad);
10349 + ceph_decode_8_safe(p, end, b->num_nodes, bad);
10350 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
10351 if (b->node_weights == NULL)
10352 return -ENOMEM;
10353 diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
10354 index b60c65f70346..627a2537634e 100644
10355 --- a/net/ieee802154/socket.c
10356 +++ b/net/ieee802154/socket.c
10357 @@ -739,6 +739,12 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
10358 sock_recv_ts_and_drops(msg, sk, skb);
10359
10360 if (saddr) {
10361 + /* Clear the implicit padding in struct sockaddr_ieee802154
10362 + * (16 bits between 'family' and 'addr') and in struct
10363 + * ieee802154_addr_sa (16 bits at the end of the structure).
10364 + */
10365 + memset(saddr, 0, sizeof(*saddr));
10366 +
10367 saddr->family = AF_IEEE802154;
10368 ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
10369 *addr_len = sizeof(*saddr);
10370 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
10371 index ff347a0eebd4..f06d42267306 100644
10372 --- a/net/mac80211/cfg.c
10373 +++ b/net/mac80211/cfg.c
10374 @@ -3356,6 +3356,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
10375 /* Update CSA counters */
10376 if (sdata->vif.csa_active &&
10377 (sdata->vif.type == NL80211_IFTYPE_AP ||
10378 + sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
10379 sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
10380 params->n_csa_offsets) {
10381 int i;
10382 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
10383 index bfef1b215050..a9c9d961f039 100644
10384 --- a/net/mac80211/ibss.c
10385 +++ b/net/mac80211/ibss.c
10386 @@ -146,6 +146,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
10387 csa_settings->chandef.chan->center_freq);
10388 presp->csa_counter_offsets[0] = (pos - presp->head);
10389 *pos++ = csa_settings->count;
10390 + presp->csa_current_counter = csa_settings->count;
10391 }
10392
10393 /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
10394 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
10395 index df3051d96aff..e86daed83c6f 100644
10396 --- a/net/mac80211/main.c
10397 +++ b/net/mac80211/main.c
10398 @@ -249,6 +249,7 @@ static void ieee80211_restart_work(struct work_struct *work)
10399 {
10400 struct ieee80211_local *local =
10401 container_of(work, struct ieee80211_local, restart_work);
10402 + struct ieee80211_sub_if_data *sdata;
10403
10404 /* wait for scan work complete */
10405 flush_workqueue(local->workqueue);
10406 @@ -257,6 +258,8 @@ static void ieee80211_restart_work(struct work_struct *work)
10407 "%s called with hardware scan in progress\n", __func__);
10408
10409 rtnl_lock();
10410 + list_for_each_entry(sdata, &local->interfaces, list)
10411 + flush_delayed_work(&sdata->dec_tailroom_needed_wk);
10412 ieee80211_scan_cancel(local);
10413 ieee80211_reconfig(local);
10414 rtnl_unlock();
10415 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
10416 index d4684242e78b..817098add1d6 100644
10417 --- a/net/mac80211/mesh.c
10418 +++ b/net/mac80211/mesh.c
10419 @@ -680,6 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
10420 *pos++ = 0x0;
10421 *pos++ = ieee80211_frequency_to_channel(
10422 csa->settings.chandef.chan->center_freq);
10423 + bcn->csa_current_counter = csa->settings.count;
10424 bcn->csa_counter_offsets[0] = hdr_len + 6;
10425 *pos++ = csa->settings.count;
10426 *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
10427 diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
10428 index 9dd0ea8db463..28504dfd3dad 100644
10429 --- a/net/sunrpc/backchannel_rqst.c
10430 +++ b/net/sunrpc/backchannel_rqst.c
10431 @@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
10432
10433 dprintk("RPC: free allocations for req= %p\n", req);
10434 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
10435 - xbufp = &req->rq_private_buf;
10436 + xbufp = &req->rq_rcv_buf;
10437 free_page((unsigned long)xbufp->head[0].iov_base);
10438 xbufp = &req->rq_snd_buf;
10439 free_page((unsigned long)xbufp->head[0].iov_base);
10440 diff --git a/net/wireless/util.c b/net/wireless/util.c
10441 index 70051ab52f4f..7e4e3fffe7ce 100644
10442 --- a/net/wireless/util.c
10443 +++ b/net/wireless/util.c
10444 @@ -944,7 +944,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
10445 ntype == NL80211_IFTYPE_P2P_CLIENT))
10446 return -EBUSY;
10447
10448 - if (ntype != otype && netif_running(dev)) {
10449 + if (ntype != otype) {
10450 dev->ieee80211_ptr->use_4addr = false;
10451 dev->ieee80211_ptr->mesh_id_up_len = 0;
10452 wdev_lock(dev->ieee80211_ptr);
10453 diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
10454 index 8965d1bb8811..125d6402f64f 100644
10455 --- a/samples/trace_events/trace-events-sample.h
10456 +++ b/samples/trace_events/trace-events-sample.h
10457 @@ -168,7 +168,10 @@
10458 *
10459 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
10460 * Use __get_dynamic_array_len(foo) to get the length of the array
10461 - * saved.
10462 + * saved. Note, __get_dynamic_array_len() returns the total allocated
10463 + * length of the dynamic array; __print_array() expects the second
10464 + * parameter to be the number of elements. To get that, the array length
10465 + * needs to be divided by the element size.
10466 *
10467 * For __string(foo, bar) use __get_str(foo)
10468 *
10469 @@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
10470 * This prints out the array that is defined by __array in a nice format.
10471 */
10472 __print_array(__get_dynamic_array(list),
10473 - __get_dynamic_array_len(list),
10474 + __get_dynamic_array_len(list) / sizeof(int),
10475 sizeof(int)),
10476 __get_str(str), __get_bitmask(cpus))
10477 );
10478 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
10479 index 10f994307a04..582091498819 100644
10480 --- a/security/integrity/evm/evm_main.c
10481 +++ b/security/integrity/evm/evm_main.c
10482 @@ -296,6 +296,17 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
10483 iint = integrity_iint_find(d_backing_inode(dentry));
10484 if (iint && (iint->flags & IMA_NEW_FILE))
10485 return 0;
10486 +
10487 + /* exception for pseudo filesystems */
10488 + if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
10489 + || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
10490 + return 0;
10491 +
10492 + integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
10493 + dentry->d_inode, dentry->d_name.name,
10494 + "update_metadata",
10495 + integrity_status_msg[evm_status],
10496 + -EPERM, 0);
10497 }
10498 out:
10499 if (evm_status != INTEGRITY_PASS)
10500 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
10501 index 8ee997dff139..fc56d4dfa954 100644
10502 --- a/security/integrity/ima/ima.h
10503 +++ b/security/integrity/ima/ima.h
10504 @@ -106,7 +106,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
10505 const char *op, const char *cause);
10506 int ima_init_crypto(void);
10507 void ima_putc(struct seq_file *m, void *data, int datalen);
10508 -void ima_print_digest(struct seq_file *m, u8 *digest, int size);
10509 +void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
10510 struct ima_template_desc *ima_template_desc_current(void);
10511 int ima_init_template(void);
10512
10513 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
10514 index 461215e5fd31..816d175da79a 100644
10515 --- a/security/integrity/ima/ima_fs.c
10516 +++ b/security/integrity/ima/ima_fs.c
10517 @@ -190,9 +190,9 @@ static const struct file_operations ima_measurements_ops = {
10518 .release = seq_release,
10519 };
10520
10521 -void ima_print_digest(struct seq_file *m, u8 *digest, int size)
10522 +void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
10523 {
10524 - int i;
10525 + u32 i;
10526
10527 for (i = 0; i < size; i++)
10528 seq_printf(m, "%02x", *(digest + i));
10529 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
10530 index d1eefb9d65fb..3997e206f82d 100644
10531 --- a/security/integrity/ima/ima_policy.c
10532 +++ b/security/integrity/ima/ima_policy.c
10533 @@ -27,6 +27,8 @@
10534 #define IMA_UID 0x0008
10535 #define IMA_FOWNER 0x0010
10536 #define IMA_FSUUID 0x0020
10537 +#define IMA_INMASK 0x0040
10538 +#define IMA_EUID 0x0080
10539
10540 #define UNKNOWN 0
10541 #define MEASURE 0x0001 /* same as IMA_MEASURE */
10542 @@ -42,6 +44,8 @@ enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
10543 LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
10544 };
10545
10546 +enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
10547 +
10548 struct ima_rule_entry {
10549 struct list_head list;
10550 int action;
10551 @@ -70,7 +74,7 @@ struct ima_rule_entry {
10552 * normal users can easily run the machine out of memory simply building
10553 * and running executables.
10554 */
10555 -static struct ima_rule_entry default_rules[] = {
10556 +static struct ima_rule_entry dont_measure_rules[] = {
10557 {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
10558 {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
10559 {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
10560 @@ -79,12 +83,31 @@ static struct ima_rule_entry default_rules[] = {
10561 {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
10562 {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
10563 {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
10564 + {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
10565 + .flags = IMA_FSMAGIC},
10566 + {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
10567 +};
10568 +
10569 +static struct ima_rule_entry original_measurement_rules[] = {
10570 {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
10571 .flags = IMA_FUNC | IMA_MASK},
10572 {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
10573 .flags = IMA_FUNC | IMA_MASK},
10574 - {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID,
10575 - .flags = IMA_FUNC | IMA_MASK | IMA_UID},
10576 + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
10577 + .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID},
10578 + {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
10579 + {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
10580 +};
10581 +
10582 +static struct ima_rule_entry default_measurement_rules[] = {
10583 + {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
10584 + .flags = IMA_FUNC | IMA_MASK},
10585 + {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
10586 + .flags = IMA_FUNC | IMA_MASK},
10587 + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
10588 + .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
10589 + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
10590 + .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
10591 {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
10592 {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
10593 };
10594 @@ -99,6 +122,7 @@ static struct ima_rule_entry default_appraise_rules[] = {
10595 {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
10596 {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
10597 {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
10598 + {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
10599 {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
10600 #ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
10601 {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
10602 @@ -115,14 +139,29 @@ static struct list_head *ima_rules;
10603
10604 static DEFINE_MUTEX(ima_rules_mutex);
10605
10606 -static bool ima_use_tcb __initdata;
10607 +static int ima_policy __initdata;
10608 static int __init default_measure_policy_setup(char *str)
10609 {
10610 - ima_use_tcb = 1;
10611 + if (ima_policy)
10612 + return 1;
10613 +
10614 + ima_policy = ORIGINAL_TCB;
10615 return 1;
10616 }
10617 __setup("ima_tcb", default_measure_policy_setup);
10618
10619 +static int __init policy_setup(char *str)
10620 +{
10621 + if (ima_policy)
10622 + return 1;
10623 +
10624 + if (strcmp(str, "tcb") == 0)
10625 + ima_policy = DEFAULT_TCB;
10626 +
10627 + return 1;
10628 +}
10629 +__setup("ima_policy=", policy_setup);
10630 +
10631 static bool ima_use_appraise_tcb __initdata;
10632 static int __init default_appraise_policy_setup(char *str)
10633 {
10634 @@ -182,6 +221,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
10635 if ((rule->flags & IMA_MASK) &&
10636 (rule->mask != mask && func != POST_SETATTR))
10637 return false;
10638 + if ((rule->flags & IMA_INMASK) &&
10639 + (!(rule->mask & mask) && func != POST_SETATTR))
10640 + return false;
10641 if ((rule->flags & IMA_FSMAGIC)
10642 && rule->fsmagic != inode->i_sb->s_magic)
10643 return false;
10644 @@ -190,6 +232,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
10645 return false;
10646 if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
10647 return false;
10648 + if (rule->flags & IMA_EUID) {
10649 + if (has_capability_noaudit(current, CAP_SETUID)) {
10650 + if (!uid_eq(rule->uid, cred->euid)
10651 + && !uid_eq(rule->uid, cred->suid)
10652 + && !uid_eq(rule->uid, cred->uid))
10653 + return false;
10654 + } else if (!uid_eq(rule->uid, cred->euid))
10655 + return false;
10656 + }
10657 +
10658 if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
10659 return false;
10660 for (i = 0; i < MAX_LSM_RULES; i++) {
10661 @@ -333,21 +385,31 @@ void __init ima_init_policy(void)
10662 {
10663 int i, measure_entries, appraise_entries;
10664
10665 - /* if !ima_use_tcb set entries = 0 so we load NO default rules */
10666 - measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
10667 + /* if !ima_policy set entries = 0 so we load NO default rules */
10668 + measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
10669 appraise_entries = ima_use_appraise_tcb ?
10670 ARRAY_SIZE(default_appraise_rules) : 0;
10671
10672 - for (i = 0; i < measure_entries + appraise_entries; i++) {
10673 - if (i < measure_entries)
10674 - list_add_tail(&default_rules[i].list,
10675 - &ima_default_rules);
10676 - else {
10677 - int j = i - measure_entries;
10678 + for (i = 0; i < measure_entries; i++)
10679 + list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
10680
10681 - list_add_tail(&default_appraise_rules[j].list,
10682 + switch (ima_policy) {
10683 + case ORIGINAL_TCB:
10684 + for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++)
10685 + list_add_tail(&original_measurement_rules[i].list,
10686 &ima_default_rules);
10687 - }
10688 + break;
10689 + case DEFAULT_TCB:
10690 + for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++)
10691 + list_add_tail(&default_measurement_rules[i].list,
10692 + &ima_default_rules);
10693 + default:
10694 + break;
10695 + }
10696 +
10697 + for (i = 0; i < appraise_entries; i++) {
10698 + list_add_tail(&default_appraise_rules[i].list,
10699 + &ima_default_rules);
10700 }
10701
10702 ima_rules = &ima_default_rules;
10703 @@ -373,7 +435,8 @@ enum {
10704 Opt_audit,
10705 Opt_obj_user, Opt_obj_role, Opt_obj_type,
10706 Opt_subj_user, Opt_subj_role, Opt_subj_type,
10707 - Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
10708 + Opt_func, Opt_mask, Opt_fsmagic,
10709 + Opt_uid, Opt_euid, Opt_fowner,
10710 Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
10711 };
10712
10713 @@ -394,6 +457,7 @@ static match_table_t policy_tokens = {
10714 {Opt_fsmagic, "fsmagic=%s"},
10715 {Opt_fsuuid, "fsuuid=%s"},
10716 {Opt_uid, "uid=%s"},
10717 + {Opt_euid, "euid=%s"},
10718 {Opt_fowner, "fowner=%s"},
10719 {Opt_appraise_type, "appraise_type=%s"},
10720 {Opt_permit_directio, "permit_directio"},
10721 @@ -435,6 +499,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
10722 static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
10723 {
10724 struct audit_buffer *ab;
10725 + char *from;
10726 char *p;
10727 int result = 0;
10728
10729 @@ -525,18 +590,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
10730 if (entry->mask)
10731 result = -EINVAL;
10732
10733 - if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
10734 + from = args[0].from;
10735 + if (*from == '^')
10736 + from++;
10737 +
10738 + if ((strcmp(from, "MAY_EXEC")) == 0)
10739 entry->mask = MAY_EXEC;
10740 - else if (strcmp(args[0].from, "MAY_WRITE") == 0)
10741 + else if (strcmp(from, "MAY_WRITE") == 0)
10742 entry->mask = MAY_WRITE;
10743 - else if (strcmp(args[0].from, "MAY_READ") == 0)
10744 + else if (strcmp(from, "MAY_READ") == 0)
10745 entry->mask = MAY_READ;
10746 - else if (strcmp(args[0].from, "MAY_APPEND") == 0)
10747 + else if (strcmp(from, "MAY_APPEND") == 0)
10748 entry->mask = MAY_APPEND;
10749 else
10750 result = -EINVAL;
10751 if (!result)
10752 - entry->flags |= IMA_MASK;
10753 + entry->flags |= (*args[0].from == '^')
10754 + ? IMA_INMASK : IMA_MASK;
10755 break;
10756 case Opt_fsmagic:
10757 ima_log_string(ab, "fsmagic", args[0].from);
10758 @@ -566,6 +636,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
10759 break;
10760 case Opt_uid:
10761 ima_log_string(ab, "uid", args[0].from);
10762 + case Opt_euid:
10763 + if (token == Opt_euid)
10764 + ima_log_string(ab, "euid", args[0].from);
10765
10766 if (uid_valid(entry->uid)) {
10767 result = -EINVAL;
10768 @@ -574,11 +647,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
10769
10770 result = kstrtoul(args[0].from, 10, &lnum);
10771 if (!result) {
10772 - entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
10773 - if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
10774 + entry->uid = make_kuid(current_user_ns(),
10775 + (uid_t) lnum);
10776 + if (!uid_valid(entry->uid) ||
10777 + (uid_t)lnum != lnum)
10778 result = -EINVAL;
10779 else
10780 - entry->flags |= IMA_UID;
10781 + entry->flags |= (token == Opt_uid)
10782 + ? IMA_UID : IMA_EUID;
10783 }
10784 break;
10785 case Opt_fowner:
10786 diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
10787 index bcfc36cbde6a..61fbd0c0d95c 100644
10788 --- a/security/integrity/ima/ima_template_lib.c
10789 +++ b/security/integrity/ima/ima_template_lib.c
10790 @@ -70,7 +70,8 @@ static void ima_show_template_data_ascii(struct seq_file *m,
10791 enum data_formats datafmt,
10792 struct ima_field_data *field_data)
10793 {
10794 - u8 *buf_ptr = field_data->data, buflen = field_data->len;
10795 + u8 *buf_ptr = field_data->data;
10796 + u32 buflen = field_data->len;
10797
10798 switch (datafmt) {
10799 case DATA_FMT_DIGEST_WITH_ALGO:
10800 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
10801 index e72548b5897e..d33437007ad2 100644
10802 --- a/security/keys/keyring.c
10803 +++ b/security/keys/keyring.c
10804 @@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
10805 if (index_key->type == &key_type_keyring)
10806 up_write(&keyring_serialise_link_sem);
10807
10808 - if (edit && !edit->dead_leaf) {
10809 - key_payload_reserve(keyring,
10810 - keyring->datalen - KEYQUOTA_LINK_BYTES);
10811 + if (edit) {
10812 + if (!edit->dead_leaf) {
10813 + key_payload_reserve(keyring,
10814 + keyring->datalen - KEYQUOTA_LINK_BYTES);
10815 + }
10816 assoc_array_cancel_edit(edit);
10817 }
10818 up_write(&keyring->sem);
10819 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
10820 index 212070e1de1a..7f8d7f19e044 100644
10821 --- a/security/selinux/hooks.c
10822 +++ b/security/selinux/hooks.c
10823 @@ -3288,7 +3288,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
10824 int rc = 0;
10825
10826 if (default_noexec &&
10827 - (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
10828 + (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
10829 + (!shared && (prot & PROT_WRITE)))) {
10830 /*
10831 * We are making executable an anonymous mapping or a
10832 * private file mapping that will also be writable.
10833 diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
10834 index afe6a269ec17..57644b1dc42e 100644
10835 --- a/security/selinux/ss/ebitmap.c
10836 +++ b/security/selinux/ss/ebitmap.c
10837 @@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
10838 if (offset == (u32)-1)
10839 return 0;
10840
10841 + /* don't waste ebitmap space if the netlabel bitmap is empty */
10842 + if (bitmap == 0) {
10843 + offset += EBITMAP_UNIT_SIZE;
10844 + continue;
10845 + }
10846 +
10847 if (e_iter == NULL ||
10848 offset >= e_iter->startbit + EBITMAP_SIZE) {
10849 e_prev = e_iter;
10850 diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
10851 index 9b5a17de4690..aad664225dc3 100644
10852 --- a/sound/soc/codecs/max98925.c
10853 +++ b/sound/soc/codecs/max98925.c
10854 @@ -346,7 +346,7 @@ static int max98925_dai_set_fmt(struct snd_soc_dai *codec_dai,
10855 }
10856
10857 regmap_update_bits(max98925->regmap, MAX98925_FORMAT,
10858 - M98925_DAI_BCI_MASK, invert);
10859 + M98925_DAI_BCI_MASK | M98925_DAI_WCI_MASK, invert);
10860 return 0;
10861 }
10862
10863 diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
10864 index be4d741c45ba..2ee44abd56a6 100644
10865 --- a/sound/soc/codecs/rt5645.c
10866 +++ b/sound/soc/codecs/rt5645.c
10867 @@ -2837,6 +2837,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
10868 }
10869 }
10870
10871 + INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
10872 +
10873 if (rt5645->i2c->irq) {
10874 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
10875 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
10876 @@ -2855,8 +2857,6 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
10877 dev_err(&i2c->dev, "Fail gpio_direction hp_det_gpio\n");
10878 }
10879
10880 - INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
10881 -
10882 return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
10883 rt5645_dai, ARRAY_SIZE(rt5645_dai));
10884 }
10885 diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
10886 index dfb4ff5cc9ea..18558595ba72 100644
10887 --- a/sound/soc/codecs/tas2552.c
10888 +++ b/sound/soc/codecs/tas2552.c
10889 @@ -120,6 +120,9 @@ static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
10890 {
10891 u8 cfg1_reg;
10892
10893 + if (!tas_data->codec)
10894 + return;
10895 +
10896 if (sw_shutdown)
10897 cfg1_reg = 0;
10898 else
10899 @@ -335,7 +338,6 @@ static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 24);
10900 static const struct snd_kcontrol_new tas2552_snd_controls[] = {
10901 SOC_SINGLE_TLV("Speaker Driver Playback Volume",
10902 TAS2552_PGA_GAIN, 0, 0x1f, 1, dac_tlv),
10903 - SOC_DAPM_SINGLE("Playback AMP", SND_SOC_NOPM, 0, 1, 0),
10904 };
10905
10906 static const struct reg_default tas2552_init_regs[] = {
10907 diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
10908 index 0c6d1bc0526e..d476221dba51 100644
10909 --- a/sound/soc/codecs/wm5102.c
10910 +++ b/sound/soc/codecs/wm5102.c
10911 @@ -42,7 +42,7 @@ struct wm5102_priv {
10912 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
10913 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
10914 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
10915 -static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
10916 +static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
10917 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
10918
10919 static const struct wm_adsp_region wm5102_dsp1_regions[] = {
10920 diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
10921 index fbaeddb3e903..3ee6cfd0578b 100644
10922 --- a/sound/soc/codecs/wm5110.c
10923 +++ b/sound/soc/codecs/wm5110.c
10924 @@ -167,7 +167,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
10925 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
10926 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
10927 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
10928 -static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
10929 +static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
10930 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
10931
10932 #define WM5110_NG_SRC(name, base) \
10933 diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
10934 index ada9ac1ba2c6..51171e457fa4 100644
10935 --- a/sound/soc/codecs/wm8737.c
10936 +++ b/sound/soc/codecs/wm8737.c
10937 @@ -483,7 +483,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
10938
10939 /* Fast VMID ramp at 2*2.5k */
10940 snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
10941 - WM8737_VMIDSEL_MASK, 0x4);
10942 + WM8737_VMIDSEL_MASK,
10943 + 2 << WM8737_VMIDSEL_SHIFT);
10944
10945 /* Bring VMID up */
10946 snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
10947 @@ -497,7 +498,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
10948
10949 /* VMID at 2*300k */
10950 snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
10951 - WM8737_VMIDSEL_MASK, 2);
10952 + WM8737_VMIDSEL_MASK,
10953 + 1 << WM8737_VMIDSEL_SHIFT);
10954
10955 break;
10956
10957 diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
10958 index db949311c0f2..0bb4a647755d 100644
10959 --- a/sound/soc/codecs/wm8903.h
10960 +++ b/sound/soc/codecs/wm8903.h
10961 @@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
10962 #define WM8903_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
10963
10964 #define WM8903_VMID_RES_50K 2
10965 -#define WM8903_VMID_RES_250K 3
10966 +#define WM8903_VMID_RES_250K 4
10967 #define WM8903_VMID_RES_5K 6
10968
10969 /*
10970 diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
10971 index 00bec915d652..03e04bf6c5ba 100644
10972 --- a/sound/soc/codecs/wm8955.c
10973 +++ b/sound/soc/codecs/wm8955.c
10974 @@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
10975 snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
10976 WM8955_K_17_9_MASK,
10977 (pll.k >> 9) & WM8955_K_17_9_MASK);
10978 - snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
10979 + snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
10980 WM8955_K_8_0_MASK,
10981 pll.k & WM8955_K_8_0_MASK);
10982 if (pll.k)
10983 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
10984 index e97a7615df85..8d7f63253440 100644
10985 --- a/sound/soc/codecs/wm8960.c
10986 +++ b/sound/soc/codecs/wm8960.c
10987 @@ -245,7 +245,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
10988 SOC_ENUM("ADC Polarity", wm8960_enum[0]),
10989 SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
10990
10991 -SOC_ENUM("DAC Polarity", wm8960_enum[2]),
10992 +SOC_ENUM("DAC Polarity", wm8960_enum[1]),
10993 SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
10994 wm8960_get_deemph, wm8960_put_deemph),
10995
10996 diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
10997 index a4d11770630c..e7c81baefe66 100644
10998 --- a/sound/soc/codecs/wm8997.c
10999 +++ b/sound/soc/codecs/wm8997.c
11000 @@ -40,7 +40,7 @@ struct wm8997_priv {
11001 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
11002 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
11003 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
11004 -static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
11005 +static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
11006 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
11007
11008 static const struct reg_default wm8997_sysclk_reva_patch[] = {
11009 diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
11010 index cd146d4fa805..b38b98cae855 100644
11011 --- a/sound/soc/fsl/imx-wm8962.c
11012 +++ b/sound/soc/fsl/imx-wm8962.c
11013 @@ -190,7 +190,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
11014 dev_err(&pdev->dev, "audmux internal port setup failed\n");
11015 return ret;
11016 }
11017 - imx_audmux_v2_configure_port(ext_port,
11018 + ret = imx_audmux_v2_configure_port(ext_port,
11019 IMX_AUDMUX_V2_PTCR_SYN,
11020 IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
11021 if (ret) {
11022 diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
11023 index 6768e4f7d7d0..30d0109703a9 100644
11024 --- a/sound/soc/omap/Kconfig
11025 +++ b/sound/soc/omap/Kconfig
11026 @@ -100,12 +100,13 @@ config SND_OMAP_SOC_OMAP_TWL4030
11027
11028 config SND_OMAP_SOC_OMAP_ABE_TWL6040
11029 tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
11030 - depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST)
11031 + depends on TWL6040_CORE && SND_OMAP_SOC
11032 + depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
11033 select SND_OMAP_SOC_DMIC
11034 select SND_OMAP_SOC_MCPDM
11035 select SND_SOC_TWL6040
11036 select SND_SOC_DMIC
11037 - select COMMON_CLK_PALMAS if MFD_PALMAS
11038 + select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
11039 help
11040 Say Y if you want to add support for SoC audio on OMAP boards using
11041 ABE and twl6040 codec. This driver currently supports:
11042 diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
11043 index 5f58e4f1bca9..b07f183fc47f 100644
11044 --- a/sound/soc/qcom/Kconfig
11045 +++ b/sound/soc/qcom/Kconfig
11046 @@ -6,12 +6,10 @@ config SND_SOC_QCOM
11047
11048 config SND_SOC_LPASS_CPU
11049 tristate
11050 - depends on SND_SOC_QCOM
11051 select REGMAP_MMIO
11052
11053 config SND_SOC_LPASS_PLATFORM
11054 tristate
11055 - depends on SND_SOC_QCOM
11056 select REGMAP_MMIO
11057
11058 config SND_SOC_STORM
11059 diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
11060 index 85b523885f9d..2babddaa2481 100644
11061 --- a/tools/perf/util/cloexec.c
11062 +++ b/tools/perf/util/cloexec.c
11063 @@ -7,11 +7,15 @@
11064
11065 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
11066
11067 +#ifdef __GLIBC_PREREQ
11068 +#if !__GLIBC_PREREQ(2, 6)
11069 int __weak sched_getcpu(void)
11070 {
11071 errno = ENOSYS;
11072 return -1;
11073 }
11074 +#endif
11075 +#endif
11076
11077 static int perf_flag_probe(void)
11078 {